diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 15:47:02 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-07 15:47:02 -0500 |
commit | da40d036fd716f0efb2917076220814b1e927ae1 (patch) | |
tree | 567893573a48e2954d82421e77606034d3b32f84 /drivers | |
parent | aa58abc20fa85328a9f048e2626c0893691ff284 (diff) | |
parent | c32e061fa19893ce4acf95d97d5613a161f0f1b7 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (147 commits)
[SCSI] arcmsr: fix write to device check
[SCSI] lpfc: lower stack use in lpfc_fc_frame_check
[SCSI] eliminate an unnecessary local variable from scsi_remove_target()
[SCSI] libiscsi: use bh locking instead of irq with session lock
[SCSI] libiscsi: do not take host lock in queuecommand
[SCSI] be2iscsi: fix null ptr when accessing task hdr
[SCSI] be2iscsi: fix gfp use in alloc_pdu
[SCSI] libiscsi: add more informative failure message during iscsi scsi eh
[SCSI] gdth: Add missing call to gdth_ioctl_free
[SCSI] bfa: remove unused defintions and misc cleanups
[SCSI] bfa: remove inactive functions
[SCSI] bfa: replace bfa_assert with WARN_ON
[SCSI] qla2xxx: Use sg_next to fetch next sg element while walking sg list.
[SCSI] qla2xxx: Fix to avoid recursive lock failure during BSG timeout.
[SCSI] qla2xxx: Remove code to not reset ISP82xx on failure.
[SCSI] qla2xxx: Display mailbox register 4 during 8012 AEN for ISP82XX parts.
[SCSI] qla2xxx: Don't perform a BIG_HAMMER if Get-ID (0x20) mailbox command fails on CNAs.
[SCSI] qla2xxx: Remove redundant module parameter permission bits
[SCSI] qla2xxx: Add sysfs node for displaying board temperature.
[SCSI] qla2xxx: Code cleanup to remove unwanted comments and code.
...
Diffstat (limited to 'drivers')
154 files changed, 11071 insertions, 7305 deletions
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 044fb22718d2..51c666fb67a4 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -45,8 +45,8 @@ static char *init_device; | |||
45 | module_param_named(device, init_device, charp, 0400); | 45 | module_param_named(device, init_device, charp, 0400); |
46 | MODULE_PARM_DESC(device, "specify initial device"); | 46 | MODULE_PARM_DESC(device, "specify initial device"); |
47 | 47 | ||
48 | static struct kmem_cache *zfcp_cache_hw_align(const char *name, | 48 | static struct kmem_cache * __init zfcp_cache_hw_align(const char *name, |
49 | unsigned long size) | 49 | unsigned long size) |
50 | { | 50 | { |
51 | return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); | 51 | return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL); |
52 | } | 52 | } |
@@ -311,8 +311,7 @@ int zfcp_status_read_refill(struct zfcp_adapter *adapter) | |||
311 | if (zfcp_fsf_status_read(adapter->qdio)) { | 311 | if (zfcp_fsf_status_read(adapter->qdio)) { |
312 | if (atomic_read(&adapter->stat_miss) >= | 312 | if (atomic_read(&adapter->stat_miss) >= |
313 | adapter->stat_read_buf_num) { | 313 | adapter->stat_read_buf_num) { |
314 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1", | 314 | zfcp_erp_adapter_reopen(adapter, 0, "axsref1"); |
315 | NULL); | ||
316 | return 1; | 315 | return 1; |
317 | } | 316 | } |
318 | break; | 317 | break; |
@@ -459,7 +458,7 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) | |||
459 | sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); | 458 | sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); |
460 | 459 | ||
461 | zfcp_erp_thread_kill(adapter); | 460 | zfcp_erp_thread_kill(adapter); |
462 | zfcp_dbf_adapter_unregister(adapter->dbf); | 461 | zfcp_dbf_adapter_unregister(adapter); |
463 | zfcp_qdio_destroy(adapter->qdio); | 462 | zfcp_qdio_destroy(adapter->qdio); |
464 | 463 | ||
465 | zfcp_ccw_adapter_put(adapter); /* final put to release */ | 464 | zfcp_ccw_adapter_put(adapter); /* final put to release */ |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 0833c2b51e39..4f7852dd30c7 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -48,7 +48,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev) | |||
48 | 48 | ||
49 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); | 49 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); |
50 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 50 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
51 | "ccresu2", NULL); | 51 | "ccresu2"); |
52 | zfcp_erp_wait(adapter); | 52 | zfcp_erp_wait(adapter); |
53 | flush_work(&adapter->scan_work); | 53 | flush_work(&adapter->scan_work); |
54 | 54 | ||
@@ -182,7 +182,7 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev) | |||
182 | if (!adapter) | 182 | if (!adapter) |
183 | return 0; | 183 | return 0; |
184 | 184 | ||
185 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); | 185 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1"); |
186 | zfcp_erp_wait(adapter); | 186 | zfcp_erp_wait(adapter); |
187 | 187 | ||
188 | zfcp_ccw_adapter_put(adapter); | 188 | zfcp_ccw_adapter_put(adapter); |
@@ -207,24 +207,24 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event) | |||
207 | switch (event) { | 207 | switch (event) { |
208 | case CIO_GONE: | 208 | case CIO_GONE: |
209 | dev_warn(&cdev->dev, "The FCP device has been detached\n"); | 209 | dev_warn(&cdev->dev, "The FCP device has been detached\n"); |
210 | zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); | 210 | zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); |
211 | break; | 211 | break; |
212 | case CIO_NO_PATH: | 212 | case CIO_NO_PATH: |
213 | dev_warn(&cdev->dev, | 213 | dev_warn(&cdev->dev, |
214 | "The CHPID for the FCP device is offline\n"); | 214 | "The CHPID for the FCP device is offline\n"); |
215 | zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); | 215 | zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); |
216 | break; | 216 | break; |
217 | case CIO_OPER: | 217 | case CIO_OPER: |
218 | dev_info(&cdev->dev, "The FCP device is operational again\n"); | 218 | dev_info(&cdev->dev, "The FCP device is operational again\n"); |
219 | zfcp_erp_set_adapter_status(adapter, | 219 | zfcp_erp_set_adapter_status(adapter, |
220 | ZFCP_STATUS_COMMON_RUNNING); | 220 | ZFCP_STATUS_COMMON_RUNNING); |
221 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 221 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
222 | "ccnoti4", NULL); | 222 | "ccnoti4"); |
223 | break; | 223 | break; |
224 | case CIO_BOXED: | 224 | case CIO_BOXED: |
225 | dev_warn(&cdev->dev, "The FCP device did not respond within " | 225 | dev_warn(&cdev->dev, "The FCP device did not respond within " |
226 | "the specified time\n"); | 226 | "the specified time\n"); |
227 | zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); | 227 | zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5"); |
228 | break; | 228 | break; |
229 | } | 229 | } |
230 | 230 | ||
@@ -243,7 +243,7 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) | |||
243 | if (!adapter) | 243 | if (!adapter) |
244 | return; | 244 | return; |
245 | 245 | ||
246 | zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); | 246 | zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1"); |
247 | zfcp_erp_wait(adapter); | 247 | zfcp_erp_wait(adapter); |
248 | zfcp_erp_thread_kill(adapter); | 248 | zfcp_erp_thread_kill(adapter); |
249 | 249 | ||
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index d692e229ecba..46342fee394d 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -288,7 +288,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) | |||
288 | (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) | 288 | (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) |
289 | zfcp_erp_port_reopen(port, | 289 | zfcp_erp_port_reopen(port, |
290 | ZFCP_STATUS_COMMON_ERP_FAILED, | 290 | ZFCP_STATUS_COMMON_ERP_FAILED, |
291 | "cfaac_1", NULL); | 291 | "cfaac_1"); |
292 | } | 292 | } |
293 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 293 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
294 | 294 | ||
@@ -299,7 +299,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter) | |||
299 | (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) | 299 | (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) |
300 | zfcp_erp_lun_reopen(sdev, | 300 | zfcp_erp_lun_reopen(sdev, |
301 | ZFCP_STATUS_COMMON_ERP_FAILED, | 301 | ZFCP_STATUS_COMMON_ERP_FAILED, |
302 | "cfaac_2", NULL); | 302 | "cfaac_2"); |
303 | } | 303 | } |
304 | } | 304 | } |
305 | 305 | ||
@@ -426,7 +426,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, | |||
426 | zfcp_scsi_dev_lun(sdev), | 426 | zfcp_scsi_dev_lun(sdev), |
427 | (unsigned long long)zfcp_sdev->port->wwpn); | 427 | (unsigned long long)zfcp_sdev->port->wwpn); |
428 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); | 428 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); |
429 | zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL); | 429 | zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6"); |
430 | return -EACCES; | 430 | return -EACCES; |
431 | } | 431 | } |
432 | 432 | ||
@@ -437,7 +437,7 @@ int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev, | |||
437 | zfcp_scsi_dev_lun(sdev), | 437 | zfcp_scsi_dev_lun(sdev), |
438 | (unsigned long long)zfcp_sdev->port->wwpn); | 438 | (unsigned long long)zfcp_sdev->port->wwpn); |
439 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); | 439 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); |
440 | zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL); | 440 | zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8"); |
441 | return -EACCES; | 441 | return -EACCES; |
442 | } | 442 | } |
443 | 443 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 2cdd6b28ff7f..96d1462e0bf5 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Debug traces for zfcp. | 4 | * Debug traces for zfcp. |
5 | * | 5 | * |
6 | * Copyright IBM Corporation 2002, 2009 | 6 | * Copyright IBM Corporation 2002, 2010 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | 9 | #define KMSG_COMPONENT "zfcp" |
@@ -22,980 +22,392 @@ module_param(dbfsize, uint, 0400); | |||
22 | MODULE_PARM_DESC(dbfsize, | 22 | MODULE_PARM_DESC(dbfsize, |
23 | "number of pages for each debug feature area (default 4)"); | 23 | "number of pages for each debug feature area (default 4)"); |
24 | 24 | ||
25 | static void zfcp_dbf_hexdump(debug_info_t *dbf, void *to, int to_len, | 25 | static inline unsigned int zfcp_dbf_plen(unsigned int offset) |
26 | int level, char *from, int from_len) | ||
27 | { | 26 | { |
28 | int offset; | 27 | return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC; |
29 | struct zfcp_dbf_dump *dump = to; | ||
30 | int room = to_len - sizeof(*dump); | ||
31 | |||
32 | for (offset = 0; offset < from_len; offset += dump->size) { | ||
33 | memset(to, 0, to_len); | ||
34 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); | ||
35 | dump->total_size = from_len; | ||
36 | dump->offset = offset; | ||
37 | dump->size = min(from_len - offset, room); | ||
38 | memcpy(dump->data, from + offset, dump->size); | ||
39 | debug_event(dbf, level, dump, dump->size + sizeof(*dump)); | ||
40 | } | ||
41 | } | 28 | } |
42 | 29 | ||
43 | static void zfcp_dbf_tag(char **p, const char *label, const char *tag) | 30 | static inline |
31 | void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, | ||
32 | u64 req_id) | ||
44 | { | 33 | { |
45 | int i; | 34 | struct zfcp_dbf_pay *pl = &dbf->pay_buf; |
46 | 35 | u16 offset = 0, rec_length; | |
47 | *p += sprintf(*p, "%-24s", label); | ||
48 | for (i = 0; i < ZFCP_DBF_TAG_SIZE; i++) | ||
49 | *p += sprintf(*p, "%c", tag[i]); | ||
50 | *p += sprintf(*p, "\n"); | ||
51 | } | ||
52 | 36 | ||
53 | static void zfcp_dbf_outs(char **buf, const char *s1, const char *s2) | 37 | spin_lock(&dbf->pay_lock); |
54 | { | 38 | memset(pl, 0, sizeof(*pl)); |
55 | *buf += sprintf(*buf, "%-24s%s\n", s1, s2); | 39 | pl->fsf_req_id = req_id; |
56 | } | 40 | memcpy(pl->area, area, ZFCP_DBF_TAG_LEN); |
57 | 41 | ||
58 | static void zfcp_dbf_out(char **buf, const char *s, const char *format, ...) | 42 | while (offset < length) { |
59 | { | 43 | rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC, |
60 | va_list arg; | 44 | (u16) (length - offset)); |
45 | memcpy(pl->data, data + offset, rec_length); | ||
46 | debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length)); | ||
61 | 47 | ||
62 | *buf += sprintf(*buf, "%-24s", s); | 48 | offset += rec_length; |
63 | va_start(arg, format); | 49 | pl->counter++; |
64 | *buf += vsprintf(*buf, format, arg); | ||
65 | va_end(arg); | ||
66 | *buf += sprintf(*buf, "\n"); | ||
67 | } | ||
68 | |||
69 | static void zfcp_dbf_outd(char **p, const char *label, char *buffer, | ||
70 | int buflen, int offset, int total_size) | ||
71 | { | ||
72 | if (!offset) | ||
73 | *p += sprintf(*p, "%-24s ", label); | ||
74 | while (buflen--) { | ||
75 | if (offset > 0) { | ||
76 | if ((offset % 32) == 0) | ||
77 | *p += sprintf(*p, "\n%-24c ", ' '); | ||
78 | else if ((offset % 4) == 0) | ||
79 | *p += sprintf(*p, " "); | ||
80 | } | ||
81 | *p += sprintf(*p, "%02x", *buffer++); | ||
82 | if (++offset == total_size) { | ||
83 | *p += sprintf(*p, "\n"); | ||
84 | break; | ||
85 | } | ||
86 | } | 50 | } |
87 | if (!total_size) | ||
88 | *p += sprintf(*p, "\n"); | ||
89 | } | ||
90 | 51 | ||
91 | static int zfcp_dbf_view_header(debug_info_t *id, struct debug_view *view, | 52 | spin_unlock(&dbf->pay_lock); |
92 | int area, debug_entry_t *entry, char *out_buf) | ||
93 | { | ||
94 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)DEBUG_DATA(entry); | ||
95 | struct timespec t; | ||
96 | char *p = out_buf; | ||
97 | |||
98 | if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) { | ||
99 | stck_to_timespec(entry->id.stck, &t); | ||
100 | zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu", | ||
101 | t.tv_sec, t.tv_nsec); | ||
102 | zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid); | ||
103 | } else { | ||
104 | zfcp_dbf_outd(&p, "", dump->data, dump->size, dump->offset, | ||
105 | dump->total_size); | ||
106 | if ((dump->offset + dump->size) == dump->total_size) | ||
107 | p += sprintf(p, "\n"); | ||
108 | } | ||
109 | return p - out_buf; | ||
110 | } | 53 | } |
111 | 54 | ||
112 | void _zfcp_dbf_hba_fsf_response(const char *tag2, int level, | 55 | /** |
113 | struct zfcp_fsf_req *fsf_req, | 56 | * zfcp_dbf_hba_fsf_res - trace event for fsf responses |
114 | struct zfcp_dbf *dbf) | 57 | * @tag: tag indicating which kind of unsolicited status has been received |
58 | * @req: request for which a response was received | ||
59 | */ | ||
60 | void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) | ||
115 | { | 61 | { |
116 | struct fsf_qtcb *qtcb = fsf_req->qtcb; | 62 | struct zfcp_dbf *dbf = req->adapter->dbf; |
117 | union fsf_prot_status_qual *prot_status_qual = | 63 | struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; |
118 | &qtcb->prefix.prot_status_qual; | 64 | struct fsf_qtcb_header *q_head = &req->qtcb->header; |
119 | union fsf_status_qual *fsf_status_qual = &qtcb->header.fsf_status_qual; | 65 | struct zfcp_dbf_hba *rec = &dbf->hba_buf; |
120 | struct scsi_cmnd *scsi_cmnd; | ||
121 | struct zfcp_port *port; | ||
122 | struct zfcp_unit *unit; | ||
123 | struct zfcp_send_els *send_els; | ||
124 | struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; | ||
125 | struct zfcp_dbf_hba_record_response *response = &rec->u.response; | ||
126 | unsigned long flags; | 66 | unsigned long flags; |
127 | 67 | ||
128 | spin_lock_irqsave(&dbf->hba_lock, flags); | 68 | spin_lock_irqsave(&dbf->hba_lock, flags); |
129 | memset(rec, 0, sizeof(*rec)); | 69 | memset(rec, 0, sizeof(*rec)); |
130 | strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE); | ||
131 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); | ||
132 | |||
133 | response->fsf_command = fsf_req->fsf_command; | ||
134 | response->fsf_reqid = fsf_req->req_id; | ||
135 | response->fsf_seqno = fsf_req->seq_no; | ||
136 | response->fsf_issued = fsf_req->issued; | ||
137 | response->fsf_prot_status = qtcb->prefix.prot_status; | ||
138 | response->fsf_status = qtcb->header.fsf_status; | ||
139 | memcpy(response->fsf_prot_status_qual, | ||
140 | prot_status_qual, FSF_PROT_STATUS_QUAL_SIZE); | ||
141 | memcpy(response->fsf_status_qual, | ||
142 | fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE); | ||
143 | response->fsf_req_status = fsf_req->status; | ||
144 | response->sbal_first = fsf_req->qdio_req.sbal_first; | ||
145 | response->sbal_last = fsf_req->qdio_req.sbal_last; | ||
146 | response->sbal_response = fsf_req->qdio_req.sbal_response; | ||
147 | response->pool = fsf_req->pool != NULL; | ||
148 | response->erp_action = (unsigned long)fsf_req->erp_action; | ||
149 | |||
150 | switch (fsf_req->fsf_command) { | ||
151 | case FSF_QTCB_FCP_CMND: | ||
152 | if (fsf_req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) | ||
153 | break; | ||
154 | scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; | ||
155 | if (scsi_cmnd) { | ||
156 | response->u.fcp.cmnd = (unsigned long)scsi_cmnd; | ||
157 | response->u.fcp.data_dir = | ||
158 | qtcb->bottom.io.data_direction; | ||
159 | } | ||
160 | break; | ||
161 | |||
162 | case FSF_QTCB_OPEN_PORT_WITH_DID: | ||
163 | case FSF_QTCB_CLOSE_PORT: | ||
164 | case FSF_QTCB_CLOSE_PHYSICAL_PORT: | ||
165 | port = (struct zfcp_port *)fsf_req->data; | ||
166 | response->u.port.wwpn = port->wwpn; | ||
167 | response->u.port.d_id = port->d_id; | ||
168 | response->u.port.port_handle = qtcb->header.port_handle; | ||
169 | break; | ||
170 | |||
171 | case FSF_QTCB_OPEN_LUN: | ||
172 | case FSF_QTCB_CLOSE_LUN: | ||
173 | unit = (struct zfcp_unit *)fsf_req->data; | ||
174 | port = unit->port; | ||
175 | response->u.unit.wwpn = port->wwpn; | ||
176 | response->u.unit.fcp_lun = unit->fcp_lun; | ||
177 | response->u.unit.port_handle = qtcb->header.port_handle; | ||
178 | response->u.unit.lun_handle = qtcb->header.lun_handle; | ||
179 | break; | ||
180 | |||
181 | case FSF_QTCB_SEND_ELS: | ||
182 | send_els = (struct zfcp_send_els *)fsf_req->data; | ||
183 | response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id); | ||
184 | break; | ||
185 | |||
186 | case FSF_QTCB_ABORT_FCP_CMND: | ||
187 | case FSF_QTCB_SEND_GENERIC: | ||
188 | case FSF_QTCB_EXCHANGE_CONFIG_DATA: | ||
189 | case FSF_QTCB_EXCHANGE_PORT_DATA: | ||
190 | case FSF_QTCB_DOWNLOAD_CONTROL_FILE: | ||
191 | case FSF_QTCB_UPLOAD_CONTROL_FILE: | ||
192 | break; | ||
193 | } | ||
194 | |||
195 | debug_event(dbf->hba, level, rec, sizeof(*rec)); | ||
196 | 70 | ||
197 | /* have fcp channel microcode fixed to use as little as possible */ | 71 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
198 | if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) { | 72 | rec->id = ZFCP_DBF_HBA_RES; |
199 | /* adjust length skipping trailing zeros */ | 73 | rec->fsf_req_id = req->req_id; |
200 | char *buf = (char *)qtcb + qtcb->header.log_start; | 74 | rec->fsf_req_status = req->status; |
201 | int len = qtcb->header.log_length; | 75 | rec->fsf_cmd = req->fsf_command; |
202 | for (; len && !buf[len - 1]; len--); | 76 | rec->fsf_seq_no = req->seq_no; |
203 | zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf, | 77 | rec->u.res.req_issued = req->issued; |
204 | len); | 78 | rec->u.res.prot_status = q_pref->prot_status; |
79 | rec->u.res.fsf_status = q_head->fsf_status; | ||
80 | |||
81 | memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, | ||
82 | FSF_PROT_STATUS_QUAL_SIZE); | ||
83 | memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual, | ||
84 | FSF_STATUS_QUALIFIER_SIZE); | ||
85 | |||
86 | if (req->fsf_command != FSF_QTCB_FCP_CMND) { | ||
87 | rec->pl_len = q_head->log_length; | ||
88 | zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start, | ||
89 | rec->pl_len, "fsf_res", req->req_id); | ||
205 | } | 90 | } |
206 | 91 | ||
207 | spin_unlock_irqrestore(&dbf->hba_lock, flags); | 92 | debug_event(dbf->hba, 1, rec, sizeof(*rec)); |
208 | } | ||
209 | |||
210 | void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf, | ||
211 | struct fsf_status_read_buffer *status_buffer) | ||
212 | { | ||
213 | struct zfcp_dbf_hba_record *rec = &dbf->hba_buf; | ||
214 | unsigned long flags; | ||
215 | |||
216 | spin_lock_irqsave(&dbf->hba_lock, flags); | ||
217 | memset(rec, 0, sizeof(*rec)); | ||
218 | strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE); | ||
219 | strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE); | ||
220 | |||
221 | rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss); | ||
222 | if (status_buffer != NULL) { | ||
223 | rec->u.status.status_type = status_buffer->status_type; | ||
224 | rec->u.status.status_subtype = status_buffer->status_subtype; | ||
225 | memcpy(&rec->u.status.queue_designator, | ||
226 | &status_buffer->queue_designator, | ||
227 | sizeof(struct fsf_queue_designator)); | ||
228 | |||
229 | switch (status_buffer->status_type) { | ||
230 | case FSF_STATUS_READ_SENSE_DATA_AVAIL: | ||
231 | rec->u.status.payload_size = | ||
232 | ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL; | ||
233 | break; | ||
234 | |||
235 | case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: | ||
236 | rec->u.status.payload_size = | ||
237 | ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD; | ||
238 | break; | ||
239 | |||
240 | case FSF_STATUS_READ_LINK_DOWN: | ||
241 | switch (status_buffer->status_subtype) { | ||
242 | case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: | ||
243 | case FSF_STATUS_READ_SUB_FDISC_FAILED: | ||
244 | rec->u.status.payload_size = | ||
245 | sizeof(struct fsf_link_down_info); | ||
246 | } | ||
247 | break; | ||
248 | |||
249 | case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: | ||
250 | rec->u.status.payload_size = | ||
251 | ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT; | ||
252 | break; | ||
253 | } | ||
254 | memcpy(&rec->u.status.payload, | ||
255 | &status_buffer->payload, rec->u.status.payload_size); | ||
256 | } | ||
257 | |||
258 | debug_event(dbf->hba, level, rec, sizeof(*rec)); | ||
259 | spin_unlock_irqrestore(&dbf->hba_lock, flags); | 93 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
260 | } | 94 | } |
261 | 95 | ||
262 | /** | 96 | /** |
263 | * zfcp_dbf_hba_qdio - trace event for QDIO related failure | 97 | * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer |
264 | * @qdio: qdio structure affected by this QDIO related event | 98 | * @tag: tag indicating which kind of unsolicited status has been received |
265 | * @qdio_error: as passed by qdio module | 99 | * @req: request providing the unsolicited status |
266 | * @sbal_index: first buffer with error condition, as passed by qdio module | ||
267 | * @sbal_count: number of buffers affected, as passed by qdio module | ||
268 | */ | 100 | */ |
269 | void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error, | 101 | void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) |
270 | int sbal_index, int sbal_count) | ||
271 | { | 102 | { |
272 | struct zfcp_dbf_hba_record *r = &dbf->hba_buf; | 103 | struct zfcp_dbf *dbf = req->adapter->dbf; |
104 | struct fsf_status_read_buffer *srb = req->data; | ||
105 | struct zfcp_dbf_hba *rec = &dbf->hba_buf; | ||
273 | unsigned long flags; | 106 | unsigned long flags; |
274 | 107 | ||
275 | spin_lock_irqsave(&dbf->hba_lock, flags); | 108 | spin_lock_irqsave(&dbf->hba_lock, flags); |
276 | memset(r, 0, sizeof(*r)); | 109 | memset(rec, 0, sizeof(*rec)); |
277 | strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE); | 110 | |
278 | r->u.qdio.qdio_error = qdio_error; | 111 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
279 | r->u.qdio.sbal_index = sbal_index; | 112 | rec->id = ZFCP_DBF_HBA_USS; |
280 | r->u.qdio.sbal_count = sbal_count; | 113 | rec->fsf_req_id = req->req_id; |
281 | debug_event(dbf->hba, 0, r, sizeof(*r)); | 114 | rec->fsf_req_status = req->status; |
115 | rec->fsf_cmd = req->fsf_command; | ||
116 | |||
117 | if (!srb) | ||
118 | goto log; | ||
119 | |||
120 | rec->u.uss.status_type = srb->status_type; | ||
121 | rec->u.uss.status_subtype = srb->status_subtype; | ||
122 | rec->u.uss.d_id = ntoh24(srb->d_id); | ||
123 | rec->u.uss.lun = srb->fcp_lun; | ||
124 | memcpy(&rec->u.uss.queue_designator, &srb->queue_designator, | ||
125 | sizeof(rec->u.uss.queue_designator)); | ||
126 | |||
127 | /* status read buffer payload length */ | ||
128 | rec->pl_len = (!srb->length) ? 0 : srb->length - | ||
129 | offsetof(struct fsf_status_read_buffer, payload); | ||
130 | |||
131 | if (rec->pl_len) | ||
132 | zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len, | ||
133 | "fsf_uss", req->req_id); | ||
134 | log: | ||
135 | debug_event(dbf->hba, 2, rec, sizeof(*rec)); | ||
282 | spin_unlock_irqrestore(&dbf->hba_lock, flags); | 136 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
283 | } | 137 | } |
284 | 138 | ||
285 | /** | 139 | /** |
286 | * zfcp_dbf_hba_berr - trace event for bit error threshold | 140 | * zfcp_dbf_hba_bit_err - trace event for bit error conditions |
287 | * @dbf: dbf structure affected by this QDIO related event | 141 | * @tag: tag indicating which kind of unsolicited status has been received |
288 | * @req: fsf request | 142 | * @req: request which caused the bit_error condition |
289 | */ | 143 | */ |
290 | void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req) | 144 | void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) |
291 | { | 145 | { |
292 | struct zfcp_dbf_hba_record *r = &dbf->hba_buf; | 146 | struct zfcp_dbf *dbf = req->adapter->dbf; |
147 | struct zfcp_dbf_hba *rec = &dbf->hba_buf; | ||
293 | struct fsf_status_read_buffer *sr_buf = req->data; | 148 | struct fsf_status_read_buffer *sr_buf = req->data; |
294 | struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error; | ||
295 | unsigned long flags; | 149 | unsigned long flags; |
296 | 150 | ||
297 | spin_lock_irqsave(&dbf->hba_lock, flags); | 151 | spin_lock_irqsave(&dbf->hba_lock, flags); |
298 | memset(r, 0, sizeof(*r)); | 152 | memset(rec, 0, sizeof(*rec)); |
299 | strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE); | ||
300 | memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload)); | ||
301 | debug_event(dbf->hba, 0, r, sizeof(*r)); | ||
302 | spin_unlock_irqrestore(&dbf->hba_lock, flags); | ||
303 | } | ||
304 | static void zfcp_dbf_hba_view_response(char **p, | ||
305 | struct zfcp_dbf_hba_record_response *r) | ||
306 | { | ||
307 | struct timespec t; | ||
308 | |||
309 | zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command); | ||
310 | zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); | ||
311 | zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno); | ||
312 | stck_to_timespec(r->fsf_issued, &t); | ||
313 | zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); | ||
314 | zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status); | ||
315 | zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status); | ||
316 | zfcp_dbf_outd(p, "fsf_prot_status_qual", r->fsf_prot_status_qual, | ||
317 | FSF_PROT_STATUS_QUAL_SIZE, 0, FSF_PROT_STATUS_QUAL_SIZE); | ||
318 | zfcp_dbf_outd(p, "fsf_status_qual", r->fsf_status_qual, | ||
319 | FSF_STATUS_QUALIFIER_SIZE, 0, FSF_STATUS_QUALIFIER_SIZE); | ||
320 | zfcp_dbf_out(p, "fsf_req_status", "0x%08x", r->fsf_req_status); | ||
321 | zfcp_dbf_out(p, "sbal_first", "0x%02x", r->sbal_first); | ||
322 | zfcp_dbf_out(p, "sbal_last", "0x%02x", r->sbal_last); | ||
323 | zfcp_dbf_out(p, "sbal_response", "0x%02x", r->sbal_response); | ||
324 | zfcp_dbf_out(p, "pool", "0x%02x", r->pool); | ||
325 | |||
326 | switch (r->fsf_command) { | ||
327 | case FSF_QTCB_FCP_CMND: | ||
328 | if (r->fsf_req_status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT) | ||
329 | break; | ||
330 | zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir); | ||
331 | zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd); | ||
332 | *p += sprintf(*p, "\n"); | ||
333 | break; | ||
334 | |||
335 | case FSF_QTCB_OPEN_PORT_WITH_DID: | ||
336 | case FSF_QTCB_CLOSE_PORT: | ||
337 | case FSF_QTCB_CLOSE_PHYSICAL_PORT: | ||
338 | zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.port.wwpn); | ||
339 | zfcp_dbf_out(p, "d_id", "0x%06x", r->u.port.d_id); | ||
340 | zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.port.port_handle); | ||
341 | break; | ||
342 | |||
343 | case FSF_QTCB_OPEN_LUN: | ||
344 | case FSF_QTCB_CLOSE_LUN: | ||
345 | zfcp_dbf_out(p, "wwpn", "0x%016Lx", r->u.unit.wwpn); | ||
346 | zfcp_dbf_out(p, "fcp_lun", "0x%016Lx", r->u.unit.fcp_lun); | ||
347 | zfcp_dbf_out(p, "port_handle", "0x%08x", r->u.unit.port_handle); | ||
348 | zfcp_dbf_out(p, "lun_handle", "0x%08x", r->u.unit.lun_handle); | ||
349 | break; | ||
350 | |||
351 | case FSF_QTCB_SEND_ELS: | ||
352 | zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); | ||
353 | break; | ||
354 | |||
355 | case FSF_QTCB_ABORT_FCP_CMND: | ||
356 | case FSF_QTCB_SEND_GENERIC: | ||
357 | case FSF_QTCB_EXCHANGE_CONFIG_DATA: | ||
358 | case FSF_QTCB_EXCHANGE_PORT_DATA: | ||
359 | case FSF_QTCB_DOWNLOAD_CONTROL_FILE: | ||
360 | case FSF_QTCB_UPLOAD_CONTROL_FILE: | ||
361 | break; | ||
362 | } | ||
363 | } | ||
364 | |||
365 | static void zfcp_dbf_hba_view_status(char **p, | ||
366 | struct zfcp_dbf_hba_record_status *r) | ||
367 | { | ||
368 | zfcp_dbf_out(p, "failed", "0x%02x", r->failed); | ||
369 | zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type); | ||
370 | zfcp_dbf_out(p, "status_subtype", "0x%08x", r->status_subtype); | ||
371 | zfcp_dbf_outd(p, "queue_designator", (char *)&r->queue_designator, | ||
372 | sizeof(struct fsf_queue_designator), 0, | ||
373 | sizeof(struct fsf_queue_designator)); | ||
374 | zfcp_dbf_outd(p, "payload", (char *)&r->payload, r->payload_size, 0, | ||
375 | r->payload_size); | ||
376 | } | ||
377 | |||
378 | static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r) | ||
379 | { | ||
380 | zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error); | ||
381 | zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index); | ||
382 | zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count); | ||
383 | } | ||
384 | 153 | ||
385 | static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r) | 154 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
386 | { | 155 | rec->id = ZFCP_DBF_HBA_BIT; |
387 | zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count); | 156 | rec->fsf_req_id = req->req_id; |
388 | zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count); | 157 | rec->fsf_req_status = req->status; |
389 | zfcp_dbf_out(p, "loss_of_sig_err", "%d", r->loss_of_signal_error_count); | 158 | rec->fsf_cmd = req->fsf_command; |
390 | zfcp_dbf_out(p, "prim_seq_err", "%d", | 159 | memcpy(&rec->u.be, &sr_buf->payload.bit_error, |
391 | r->primitive_sequence_error_count); | 160 | sizeof(struct fsf_bit_error_payload)); |
392 | zfcp_dbf_out(p, "inval_trans_word_err", "%d", | ||
393 | r->invalid_transmission_word_error_count); | ||
394 | zfcp_dbf_out(p, "CRC_errors", "%d", r->crc_error_count); | ||
395 | zfcp_dbf_out(p, "prim_seq_event_to", "%d", | ||
396 | r->primitive_sequence_event_timeout_count); | ||
397 | zfcp_dbf_out(p, "elast_buf_overrun_err", "%d", | ||
398 | r->elastic_buffer_overrun_error_count); | ||
399 | zfcp_dbf_out(p, "adv_rec_buf2buf_cred", "%d", | ||
400 | r->advertised_receive_b2b_credit); | ||
401 | zfcp_dbf_out(p, "curr_rec_buf2buf_cred", "%d", | ||
402 | r->current_receive_b2b_credit); | ||
403 | zfcp_dbf_out(p, "adv_trans_buf2buf_cred", "%d", | ||
404 | r->advertised_transmit_b2b_credit); | ||
405 | zfcp_dbf_out(p, "curr_trans_buf2buf_cred", "%d", | ||
406 | r->current_transmit_b2b_credit); | ||
407 | } | ||
408 | 161 | ||
409 | static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view, | 162 | debug_event(dbf->hba, 1, rec, sizeof(*rec)); |
410 | char *out_buf, const char *in_buf) | 163 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
411 | { | ||
412 | struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf; | ||
413 | char *p = out_buf; | ||
414 | |||
415 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | ||
416 | return 0; | ||
417 | |||
418 | zfcp_dbf_tag(&p, "tag", r->tag); | ||
419 | if (isalpha(r->tag2[0])) | ||
420 | zfcp_dbf_tag(&p, "tag2", r->tag2); | ||
421 | |||
422 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0) | ||
423 | zfcp_dbf_hba_view_response(&p, &r->u.response); | ||
424 | else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0) | ||
425 | zfcp_dbf_hba_view_status(&p, &r->u.status); | ||
426 | else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0) | ||
427 | zfcp_dbf_hba_view_qdio(&p, &r->u.qdio); | ||
428 | else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0) | ||
429 | zfcp_dbf_hba_view_berr(&p, &r->u.berr); | ||
430 | |||
431 | if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0) | ||
432 | p += sprintf(p, "\n"); | ||
433 | return p - out_buf; | ||
434 | } | 164 | } |
435 | 165 | ||
436 | static struct debug_view zfcp_dbf_hba_view = { | 166 | static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, |
437 | .name = "structured", | 167 | struct zfcp_adapter *adapter, |
438 | .header_proc = zfcp_dbf_view_header, | 168 | struct zfcp_port *port, |
439 | .format_proc = zfcp_dbf_hba_view_format, | 169 | struct scsi_device *sdev) |
440 | }; | ||
441 | |||
442 | static const char *zfcp_dbf_rec_tags[] = { | ||
443 | [ZFCP_REC_DBF_ID_THREAD] = "thread", | ||
444 | [ZFCP_REC_DBF_ID_TARGET] = "target", | ||
445 | [ZFCP_REC_DBF_ID_TRIGGER] = "trigger", | ||
446 | [ZFCP_REC_DBF_ID_ACTION] = "action", | ||
447 | }; | ||
448 | |||
449 | static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view, | ||
450 | char *buf, const char *_rec) | ||
451 | { | 170 | { |
452 | struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec; | 171 | rec->adapter_status = atomic_read(&adapter->status); |
453 | char *p = buf; | 172 | if (port) { |
454 | char hint[ZFCP_DBF_ID_SIZE + 1]; | 173 | rec->port_status = atomic_read(&port->status); |
455 | 174 | rec->wwpn = port->wwpn; | |
456 | memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE); | 175 | rec->d_id = port->d_id; |
457 | hint[ZFCP_DBF_ID_SIZE] = 0; | 176 | } |
458 | zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]); | 177 | if (sdev) { |
459 | zfcp_dbf_outs(&p, "hint", hint); | 178 | rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); |
460 | switch (r->id) { | 179 | rec->lun = zfcp_scsi_dev_lun(sdev); |
461 | case ZFCP_REC_DBF_ID_THREAD: | ||
462 | zfcp_dbf_out(&p, "total", "%d", r->u.thread.total); | ||
463 | zfcp_dbf_out(&p, "ready", "%d", r->u.thread.ready); | ||
464 | zfcp_dbf_out(&p, "running", "%d", r->u.thread.running); | ||
465 | break; | ||
466 | case ZFCP_REC_DBF_ID_TARGET: | ||
467 | zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.target.ref); | ||
468 | zfcp_dbf_out(&p, "status", "0x%08x", r->u.target.status); | ||
469 | zfcp_dbf_out(&p, "erp_count", "%d", r->u.target.erp_count); | ||
470 | zfcp_dbf_out(&p, "d_id", "0x%06x", r->u.target.d_id); | ||
471 | zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.target.wwpn); | ||
472 | zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.target.fcp_lun); | ||
473 | break; | ||
474 | case ZFCP_REC_DBF_ID_TRIGGER: | ||
475 | zfcp_dbf_out(&p, "reference", "0x%016Lx", r->u.trigger.ref); | ||
476 | zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.trigger.action); | ||
477 | zfcp_dbf_out(&p, "requested", "%d", r->u.trigger.want); | ||
478 | zfcp_dbf_out(&p, "executed", "%d", r->u.trigger.need); | ||
479 | zfcp_dbf_out(&p, "wwpn", "0x%016Lx", r->u.trigger.wwpn); | ||
480 | zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun); | ||
481 | zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as); | ||
482 | zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps); | ||
483 | zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls); | ||
484 | break; | ||
485 | case ZFCP_REC_DBF_ID_ACTION: | ||
486 | zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action); | ||
487 | zfcp_dbf_out(&p, "fsf_req", "0x%016Lx", r->u.action.fsf_req); | ||
488 | zfcp_dbf_out(&p, "status", "0x%08Lx", r->u.action.status); | ||
489 | zfcp_dbf_out(&p, "step", "0x%08Lx", r->u.action.step); | ||
490 | break; | ||
491 | } | 180 | } |
492 | p += sprintf(p, "\n"); | ||
493 | return p - buf; | ||
494 | } | 181 | } |
495 | 182 | ||
496 | static struct debug_view zfcp_dbf_rec_view = { | ||
497 | .name = "structured", | ||
498 | .header_proc = zfcp_dbf_view_header, | ||
499 | .format_proc = zfcp_dbf_rec_view_format, | ||
500 | }; | ||
501 | |||
502 | /** | 183 | /** |
503 | * zfcp_dbf_rec_thread - trace event related to recovery thread operation | 184 | * zfcp_dbf_rec_trig - trace event related to triggered recovery |
504 | * @id2: identifier for event | 185 | * @tag: identifier for event |
505 | * @dbf: reference to dbf structure | 186 | * @adapter: adapter on which the erp_action should run |
506 | * This function assumes that the caller is holding erp_lock. | 187 | * @port: remote port involved in the erp_action |
188 | * @sdev: scsi device involved in the erp_action | ||
189 | * @want: wanted erp_action | ||
190 | * @need: required erp_action | ||
191 | * | ||
192 | * The adapter->erp_lock has to be held. | ||
507 | */ | 193 | */ |
508 | void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf) | 194 | void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, |
195 | struct zfcp_port *port, struct scsi_device *sdev, | ||
196 | u8 want, u8 need) | ||
509 | { | 197 | { |
510 | struct zfcp_adapter *adapter = dbf->adapter; | 198 | struct zfcp_dbf *dbf = adapter->dbf; |
511 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | 199 | struct zfcp_dbf_rec *rec = &dbf->rec_buf; |
512 | unsigned long flags = 0; | ||
513 | struct list_head *entry; | 200 | struct list_head *entry; |
514 | unsigned ready = 0, running = 0, total; | ||
515 | |||
516 | list_for_each(entry, &adapter->erp_ready_head) | ||
517 | ready++; | ||
518 | list_for_each(entry, &adapter->erp_running_head) | ||
519 | running++; | ||
520 | total = adapter->erp_total_count; | ||
521 | |||
522 | spin_lock_irqsave(&dbf->rec_lock, flags); | ||
523 | memset(r, 0, sizeof(*r)); | ||
524 | r->id = ZFCP_REC_DBF_ID_THREAD; | ||
525 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | ||
526 | r->u.thread.total = total; | ||
527 | r->u.thread.ready = ready; | ||
528 | r->u.thread.running = running; | ||
529 | debug_event(dbf->rec, 6, r, sizeof(*r)); | ||
530 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | ||
531 | } | ||
532 | |||
533 | /** | ||
534 | * zfcp_dbf_rec_thread - trace event related to recovery thread operation | ||
535 | * @id2: identifier for event | ||
536 | * @adapter: adapter | ||
537 | * This function assumes that the caller does not hold erp_lock. | ||
538 | */ | ||
539 | void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf) | ||
540 | { | ||
541 | struct zfcp_adapter *adapter = dbf->adapter; | ||
542 | unsigned long flags; | ||
543 | |||
544 | read_lock_irqsave(&adapter->erp_lock, flags); | ||
545 | zfcp_dbf_rec_thread(id2, dbf); | ||
546 | read_unlock_irqrestore(&adapter->erp_lock, flags); | ||
547 | } | ||
548 | |||
549 | static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf, | ||
550 | atomic_t *status, atomic_t *erp_count, u64 wwpn, | ||
551 | u32 d_id, u64 fcp_lun) | ||
552 | { | ||
553 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | ||
554 | unsigned long flags; | 201 | unsigned long flags; |
555 | 202 | ||
556 | spin_lock_irqsave(&dbf->rec_lock, flags); | 203 | spin_lock_irqsave(&dbf->rec_lock, flags); |
557 | memset(r, 0, sizeof(*r)); | 204 | memset(rec, 0, sizeof(*rec)); |
558 | r->id = ZFCP_REC_DBF_ID_TARGET; | ||
559 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | ||
560 | r->u.target.ref = (unsigned long)ref; | ||
561 | r->u.target.status = atomic_read(status); | ||
562 | r->u.target.wwpn = wwpn; | ||
563 | r->u.target.d_id = d_id; | ||
564 | r->u.target.fcp_lun = fcp_lun; | ||
565 | r->u.target.erp_count = atomic_read(erp_count); | ||
566 | debug_event(dbf->rec, 3, r, sizeof(*r)); | ||
567 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | ||
568 | } | ||
569 | |||
570 | /** | ||
571 | * zfcp_dbf_rec_adapter - trace event for adapter state change | ||
572 | * @id: identifier for trigger of state change | ||
573 | * @ref: additional reference (e.g. request) | ||
574 | * @dbf: reference to dbf structure | ||
575 | */ | ||
576 | void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf) | ||
577 | { | ||
578 | struct zfcp_adapter *adapter = dbf->adapter; | ||
579 | |||
580 | zfcp_dbf_rec_target(id, ref, dbf, &adapter->status, | ||
581 | &adapter->erp_counter, 0, 0, | ||
582 | ZFCP_DBF_INVALID_LUN); | ||
583 | } | ||
584 | |||
585 | /** | ||
586 | * zfcp_dbf_rec_port - trace event for port state change | ||
587 | * @id: identifier for trigger of state change | ||
588 | * @ref: additional reference (e.g. request) | ||
589 | * @port: port | ||
590 | */ | ||
591 | void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port) | ||
592 | { | ||
593 | struct zfcp_dbf *dbf = port->adapter->dbf; | ||
594 | 205 | ||
595 | zfcp_dbf_rec_target(id, ref, dbf, &port->status, | 206 | rec->id = ZFCP_DBF_REC_TRIG; |
596 | &port->erp_counter, port->wwpn, port->d_id, | 207 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
597 | ZFCP_DBF_INVALID_LUN); | 208 | zfcp_dbf_set_common(rec, adapter, port, sdev); |
598 | } | ||
599 | 209 | ||
600 | /** | 210 | list_for_each(entry, &adapter->erp_ready_head) |
601 | * zfcp_dbf_rec_lun - trace event for LUN state change | 211 | rec->u.trig.ready++; |
602 | * @id: identifier for trigger of state change | ||
603 | * @ref: additional reference (e.g. request) | ||
604 | * @sdev: SCSI device | ||
605 | */ | ||
606 | void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev) | ||
607 | { | ||
608 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | ||
609 | struct zfcp_port *port = zfcp_sdev->port; | ||
610 | struct zfcp_dbf *dbf = port->adapter->dbf; | ||
611 | 212 | ||
612 | zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status, | 213 | list_for_each(entry, &adapter->erp_running_head) |
613 | &zfcp_sdev->erp_counter, port->wwpn, port->d_id, | 214 | rec->u.trig.running++; |
614 | zfcp_scsi_dev_lun(sdev)); | ||
615 | } | ||
616 | 215 | ||
617 | /** | 216 | rec->u.trig.want = want; |
618 | * zfcp_dbf_rec_trigger - trace event for triggered error recovery | 217 | rec->u.trig.need = need; |
619 | * @id2: identifier for error recovery trigger | ||
620 | * @ref: additional reference (e.g. request) | ||
621 | * @want: originally requested error recovery action | ||
622 | * @need: error recovery action actually initiated | ||
623 | * @action: address of error recovery action struct | ||
624 | * @adapter: adapter | ||
625 | * @port: port | ||
626 | * @sdev: SCSI device | ||
627 | */ | ||
628 | void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action, | ||
629 | struct zfcp_adapter *adapter, struct zfcp_port *port, | ||
630 | struct scsi_device *sdev) | ||
631 | { | ||
632 | struct zfcp_dbf *dbf = adapter->dbf; | ||
633 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | ||
634 | unsigned long flags; | ||
635 | 218 | ||
636 | spin_lock_irqsave(&dbf->rec_lock, flags); | 219 | debug_event(dbf->rec, 1, rec, sizeof(*rec)); |
637 | memset(r, 0, sizeof(*r)); | ||
638 | r->id = ZFCP_REC_DBF_ID_TRIGGER; | ||
639 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | ||
640 | r->u.trigger.ref = (unsigned long)ref; | ||
641 | r->u.trigger.want = want; | ||
642 | r->u.trigger.need = need; | ||
643 | r->u.trigger.action = (unsigned long)action; | ||
644 | r->u.trigger.as = atomic_read(&adapter->status); | ||
645 | if (port) { | ||
646 | r->u.trigger.ps = atomic_read(&port->status); | ||
647 | r->u.trigger.wwpn = port->wwpn; | ||
648 | } | ||
649 | if (sdev) | ||
650 | r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status); | ||
651 | r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) : | ||
652 | ZFCP_DBF_INVALID_LUN; | ||
653 | debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r)); | ||
654 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | 220 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
655 | } | 221 | } |
656 | 222 | ||
223 | |||
657 | /** | 224 | /** |
658 | * zfcp_dbf_rec_action - trace event showing progress of recovery action | 225 | * zfcp_dbf_rec_run - trace event related to running recovery |
659 | * @id2: identifier | 226 | * @tag: identifier for event |
660 | * @erp_action: error recovery action struct pointer | 227 | * @erp: erp_action running |
661 | */ | 228 | */ |
662 | void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action) | 229 | void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) |
663 | { | 230 | { |
664 | struct zfcp_dbf *dbf = erp_action->adapter->dbf; | 231 | struct zfcp_dbf *dbf = erp->adapter->dbf; |
665 | struct zfcp_dbf_rec_record *r = &dbf->rec_buf; | 232 | struct zfcp_dbf_rec *rec = &dbf->rec_buf; |
666 | unsigned long flags; | 233 | unsigned long flags; |
667 | 234 | ||
668 | spin_lock_irqsave(&dbf->rec_lock, flags); | 235 | spin_lock_irqsave(&dbf->rec_lock, flags); |
669 | memset(r, 0, sizeof(*r)); | 236 | memset(rec, 0, sizeof(*rec)); |
670 | r->id = ZFCP_REC_DBF_ID_ACTION; | ||
671 | memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE); | ||
672 | r->u.action.action = (unsigned long)erp_action; | ||
673 | r->u.action.status = erp_action->status; | ||
674 | r->u.action.step = erp_action->step; | ||
675 | r->u.action.fsf_req = erp_action->fsf_req_id; | ||
676 | debug_event(dbf->rec, 5, r, sizeof(*r)); | ||
677 | spin_unlock_irqrestore(&dbf->rec_lock, flags); | ||
678 | } | ||
679 | 237 | ||
680 | /** | 238 | rec->id = ZFCP_DBF_REC_RUN; |
681 | * zfcp_dbf_san_ct_request - trace event for issued CT request | 239 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
682 | * @fsf_req: request containing issued CT data | 240 | zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev); |
683 | * @d_id: destination id where ct request is sent to | ||
684 | */ | ||
685 | void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id) | ||
686 | { | ||
687 | struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; | ||
688 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
689 | struct zfcp_dbf *dbf = adapter->dbf; | ||
690 | struct fc_ct_hdr *hdr = sg_virt(ct->req); | ||
691 | struct zfcp_dbf_san_record *r = &dbf->san_buf; | ||
692 | struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; | ||
693 | int level = 3; | ||
694 | unsigned long flags; | ||
695 | 241 | ||
696 | spin_lock_irqsave(&dbf->san_lock, flags); | 242 | rec->u.run.fsf_req_id = erp->fsf_req_id; |
697 | memset(r, 0, sizeof(*r)); | 243 | rec->u.run.rec_status = erp->status; |
698 | strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); | 244 | rec->u.run.rec_step = erp->step; |
699 | r->fsf_reqid = fsf_req->req_id; | 245 | rec->u.run.rec_action = erp->action; |
700 | r->fsf_seqno = fsf_req->seq_no; | ||
701 | oct->d_id = d_id; | ||
702 | oct->cmd_req_code = hdr->ct_cmd; | ||
703 | oct->revision = hdr->ct_rev; | ||
704 | oct->gs_type = hdr->ct_fs_type; | ||
705 | oct->gs_subtype = hdr->ct_fs_subtype; | ||
706 | oct->options = hdr->ct_options; | ||
707 | oct->max_res_size = hdr->ct_mr_size; | ||
708 | oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr), | ||
709 | ZFCP_DBF_SAN_MAX_PAYLOAD); | ||
710 | debug_event(dbf->san, level, r, sizeof(*r)); | ||
711 | zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, | ||
712 | (void *)hdr + sizeof(struct fc_ct_hdr), oct->len); | ||
713 | spin_unlock_irqrestore(&dbf->san_lock, flags); | ||
714 | } | ||
715 | 246 | ||
716 | /** | 247 | if (erp->sdev) |
717 | * zfcp_dbf_san_ct_response - trace event for completion of CT request | 248 | rec->u.run.rec_count = |
718 | * @fsf_req: request containing CT response | 249 | atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter); |
719 | */ | 250 | else if (erp->port) |
720 | void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) | 251 | rec->u.run.rec_count = atomic_read(&erp->port->erp_counter); |
721 | { | 252 | else |
722 | struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data; | 253 | rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); |
723 | struct zfcp_adapter *adapter = fsf_req->adapter; | ||
724 | struct fc_ct_hdr *hdr = sg_virt(ct->resp); | ||
725 | struct zfcp_dbf *dbf = adapter->dbf; | ||
726 | struct zfcp_dbf_san_record *r = &dbf->san_buf; | ||
727 | struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; | ||
728 | int level = 3; | ||
729 | unsigned long flags; | ||
730 | 254 | ||
731 | spin_lock_irqsave(&dbf->san_lock, flags); | 255 | debug_event(dbf->rec, 1, rec, sizeof(*rec)); |
732 | memset(r, 0, sizeof(*r)); | 256 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
733 | strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); | ||
734 | r->fsf_reqid = fsf_req->req_id; | ||
735 | r->fsf_seqno = fsf_req->seq_no; | ||
736 | rct->cmd_rsp_code = hdr->ct_cmd; | ||
737 | rct->revision = hdr->ct_rev; | ||
738 | rct->reason_code = hdr->ct_reason; | ||
739 | rct->expl = hdr->ct_explan; | ||
740 | rct->vendor_unique = hdr->ct_vendor; | ||
741 | rct->max_res_size = hdr->ct_mr_size; | ||
742 | rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr), | ||
743 | ZFCP_DBF_SAN_MAX_PAYLOAD); | ||
744 | debug_event(dbf->san, level, r, sizeof(*r)); | ||
745 | zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, | ||
746 | (void *)hdr + sizeof(struct fc_ct_hdr), rct->len); | ||
747 | spin_unlock_irqrestore(&dbf->san_lock, flags); | ||
748 | } | 257 | } |
749 | 258 | ||
750 | static void zfcp_dbf_san_els(const char *tag, int level, | 259 | static inline |
751 | struct zfcp_fsf_req *fsf_req, u32 d_id, | 260 | void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, |
752 | void *buffer, int buflen) | 261 | u64 req_id, u32 d_id) |
753 | { | 262 | { |
754 | struct zfcp_adapter *adapter = fsf_req->adapter; | 263 | struct zfcp_dbf_san *rec = &dbf->san_buf; |
755 | struct zfcp_dbf *dbf = adapter->dbf; | 264 | u16 rec_len; |
756 | struct zfcp_dbf_san_record *rec = &dbf->san_buf; | ||
757 | unsigned long flags; | 265 | unsigned long flags; |
758 | 266 | ||
759 | spin_lock_irqsave(&dbf->san_lock, flags); | 267 | spin_lock_irqsave(&dbf->san_lock, flags); |
760 | memset(rec, 0, sizeof(*rec)); | 268 | memset(rec, 0, sizeof(*rec)); |
761 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | 269 | |
762 | rec->fsf_reqid = fsf_req->req_id; | 270 | rec->id = id; |
763 | rec->fsf_seqno = fsf_req->seq_no; | 271 | rec->fsf_req_id = req_id; |
764 | rec->u.els.d_id = d_id; | 272 | rec->d_id = d_id; |
765 | debug_event(dbf->san, level, rec, sizeof(*rec)); | 273 | rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); |
766 | zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, | 274 | memcpy(rec->payload, data, rec_len); |
767 | buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); | 275 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
276 | |||
277 | debug_event(dbf->san, 1, rec, sizeof(*rec)); | ||
768 | spin_unlock_irqrestore(&dbf->san_lock, flags); | 278 | spin_unlock_irqrestore(&dbf->san_lock, flags); |
769 | } | 279 | } |
770 | 280 | ||
771 | /** | 281 | /** |
772 | * zfcp_dbf_san_els_request - trace event for issued ELS | 282 | * zfcp_dbf_san_req - trace event for issued SAN request |
773 | * @fsf_req: request containing issued ELS | 283 | * @tag: indentifier for event |
284 | * @fsf_req: request containing issued CT data | ||
285 | * d_id: destination ID | ||
774 | */ | 286 | */ |
775 | void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) | 287 | void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) |
776 | { | 288 | { |
777 | struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; | 289 | struct zfcp_dbf *dbf = fsf->adapter->dbf; |
778 | u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); | 290 | struct zfcp_fsf_ct_els *ct_els = fsf->data; |
291 | u16 length; | ||
779 | 292 | ||
780 | zfcp_dbf_san_els("oels", 2, fsf_req, d_id, | 293 | length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); |
781 | sg_virt(els->req), els->req->length); | 294 | zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, |
295 | fsf->req_id, d_id); | ||
782 | } | 296 | } |
783 | 297 | ||
784 | /** | 298 | /** |
785 | * zfcp_dbf_san_els_response - trace event for completed ELS | 299 | * zfcp_dbf_san_res - trace event for received SAN request |
786 | * @fsf_req: request containing ELS response | 300 | * @tag: indentifier for event |
301 | * @fsf_req: request containing issued CT data | ||
787 | */ | 302 | */ |
788 | void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) | 303 | void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) |
789 | { | 304 | { |
790 | struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data; | 305 | struct zfcp_dbf *dbf = fsf->adapter->dbf; |
791 | u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id); | 306 | struct zfcp_fsf_ct_els *ct_els = fsf->data; |
307 | u16 length; | ||
792 | 308 | ||
793 | zfcp_dbf_san_els("rels", 2, fsf_req, d_id, | 309 | length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); |
794 | sg_virt(els->resp), els->resp->length); | 310 | zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, |
311 | fsf->req_id, 0); | ||
795 | } | 312 | } |
796 | 313 | ||
797 | /** | 314 | /** |
798 | * zfcp_dbf_san_incoming_els - trace event for incomig ELS | 315 | * zfcp_dbf_san_in_els - trace event for incoming ELS |
799 | * @fsf_req: request containing unsolicited status buffer with incoming ELS | 316 | * @tag: indentifier for event |
317 | * @fsf_req: request containing issued CT data | ||
800 | */ | 318 | */ |
801 | void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) | 319 | void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) |
802 | { | 320 | { |
803 | struct fsf_status_read_buffer *buf = | 321 | struct zfcp_dbf *dbf = fsf->adapter->dbf; |
804 | (struct fsf_status_read_buffer *)fsf_req->data; | 322 | struct fsf_status_read_buffer *srb = |
805 | int length = (int)buf->length - | 323 | (struct fsf_status_read_buffer *) fsf->data; |
806 | (int)((void *)&buf->payload - (void *)buf); | 324 | u16 length; |
807 | 325 | ||
808 | zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id), | 326 | length = (u16)(srb->length - |
809 | (void *)buf->payload.data, length); | 327 | offsetof(struct fsf_status_read_buffer, payload)); |
810 | } | 328 | zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, |
811 | 329 | fsf->req_id, ntoh24(srb->d_id)); | |
812 | static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, | ||
813 | char *out_buf, const char *in_buf) | ||
814 | { | ||
815 | struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf; | ||
816 | char *p = out_buf; | ||
817 | |||
818 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | ||
819 | return 0; | ||
820 | |||
821 | zfcp_dbf_tag(&p, "tag", r->tag); | ||
822 | zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); | ||
823 | zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); | ||
824 | |||
825 | if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { | ||
826 | struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; | ||
827 | zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id); | ||
828 | zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); | ||
829 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); | ||
830 | zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); | ||
831 | zfcp_dbf_out(&p, "gs_subtype", "0x%02x", ct->gs_subtype); | ||
832 | zfcp_dbf_out(&p, "options", "0x%02x", ct->options); | ||
833 | zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); | ||
834 | } else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) { | ||
835 | struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp; | ||
836 | zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code); | ||
837 | zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); | ||
838 | zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code); | ||
839 | zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl); | ||
840 | zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique); | ||
841 | zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size); | ||
842 | } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 || | ||
843 | strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || | ||
844 | strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { | ||
845 | struct zfcp_dbf_san_record_els *els = &r->u.els; | ||
846 | zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id); | ||
847 | } | ||
848 | return p - out_buf; | ||
849 | } | 330 | } |
850 | 331 | ||
851 | static struct debug_view zfcp_dbf_san_view = { | 332 | /** |
852 | .name = "structured", | 333 | * zfcp_dbf_scsi - trace event for scsi commands |
853 | .header_proc = zfcp_dbf_view_header, | 334 | * @tag: identifier for event |
854 | .format_proc = zfcp_dbf_san_view_format, | 335 | * @sc: pointer to struct scsi_cmnd |
855 | }; | 336 | * @fsf: pointer to struct zfcp_fsf_req |
856 | 337 | */ | |
857 | void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level, | 338 | void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) |
858 | struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd, | ||
859 | struct zfcp_fsf_req *fsf_req, unsigned long old_req_id) | ||
860 | { | 339 | { |
861 | struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; | 340 | struct zfcp_adapter *adapter = |
862 | struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; | 341 | (struct zfcp_adapter *) sc->device->host->hostdata[0]; |
863 | unsigned long flags; | 342 | struct zfcp_dbf *dbf = adapter->dbf; |
343 | struct zfcp_dbf_scsi *rec = &dbf->scsi_buf; | ||
864 | struct fcp_resp_with_ext *fcp_rsp; | 344 | struct fcp_resp_with_ext *fcp_rsp; |
865 | struct fcp_resp_rsp_info *fcp_rsp_info = NULL; | 345 | struct fcp_resp_rsp_info *fcp_rsp_info; |
866 | char *fcp_sns_info = NULL; | 346 | unsigned long flags; |
867 | int offset = 0, buflen = 0; | ||
868 | 347 | ||
869 | spin_lock_irqsave(&dbf->scsi_lock, flags); | 348 | spin_lock_irqsave(&dbf->scsi_lock, flags); |
870 | do { | 349 | memset(rec, 0, sizeof(*rec)); |
871 | memset(rec, 0, sizeof(*rec)); | ||
872 | if (offset == 0) { | ||
873 | strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); | ||
874 | strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE); | ||
875 | if (scsi_cmnd != NULL) { | ||
876 | if (scsi_cmnd->device) { | ||
877 | rec->scsi_id = scsi_cmnd->device->id; | ||
878 | rec->scsi_lun = scsi_cmnd->device->lun; | ||
879 | } | ||
880 | rec->scsi_result = scsi_cmnd->result; | ||
881 | rec->scsi_cmnd = (unsigned long)scsi_cmnd; | ||
882 | memcpy(rec->scsi_opcode, scsi_cmnd->cmnd, | ||
883 | min((int)scsi_cmnd->cmd_len, | ||
884 | ZFCP_DBF_SCSI_OPCODE)); | ||
885 | rec->scsi_retries = scsi_cmnd->retries; | ||
886 | rec->scsi_allowed = scsi_cmnd->allowed; | ||
887 | } | ||
888 | if (fsf_req != NULL) { | ||
889 | fcp_rsp = (struct fcp_resp_with_ext *) | ||
890 | &(fsf_req->qtcb->bottom.io.fcp_rsp); | ||
891 | fcp_rsp_info = (struct fcp_resp_rsp_info *) | ||
892 | &fcp_rsp[1]; | ||
893 | fcp_sns_info = (char *) &fcp_rsp[1]; | ||
894 | if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) | ||
895 | fcp_sns_info += fcp_rsp->ext.fr_sns_len; | ||
896 | |||
897 | rec->rsp_validity = fcp_rsp->resp.fr_flags; | ||
898 | rec->rsp_scsi_status = fcp_rsp->resp.fr_status; | ||
899 | rec->rsp_resid = fcp_rsp->ext.fr_resid; | ||
900 | if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) | ||
901 | rec->rsp_code = fcp_rsp_info->rsp_code; | ||
902 | if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { | ||
903 | buflen = min(fcp_rsp->ext.fr_sns_len, | ||
904 | (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); | ||
905 | rec->sns_info_len = buflen; | ||
906 | memcpy(rec->sns_info, fcp_sns_info, | ||
907 | min(buflen, | ||
908 | ZFCP_DBF_SCSI_FCP_SNS_INFO)); | ||
909 | offset += min(buflen, | ||
910 | ZFCP_DBF_SCSI_FCP_SNS_INFO); | ||
911 | } | ||
912 | |||
913 | rec->fsf_reqid = fsf_req->req_id; | ||
914 | rec->fsf_seqno = fsf_req->seq_no; | ||
915 | rec->fsf_issued = fsf_req->issued; | ||
916 | } | ||
917 | rec->old_fsf_reqid = old_req_id; | ||
918 | } else { | ||
919 | strncpy(dump->tag, "dump", ZFCP_DBF_TAG_SIZE); | ||
920 | dump->total_size = buflen; | ||
921 | dump->offset = offset; | ||
922 | dump->size = min(buflen - offset, | ||
923 | (int)sizeof(struct | ||
924 | zfcp_dbf_scsi_record) - | ||
925 | (int)sizeof(struct zfcp_dbf_dump)); | ||
926 | memcpy(dump->data, fcp_sns_info + offset, dump->size); | ||
927 | offset += dump->size; | ||
928 | } | ||
929 | debug_event(dbf->scsi, level, rec, sizeof(*rec)); | ||
930 | } while (offset < buflen); | ||
931 | spin_unlock_irqrestore(&dbf->scsi_lock, flags); | ||
932 | } | ||
933 | 350 | ||
934 | static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view, | 351 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
935 | char *out_buf, const char *in_buf) | 352 | rec->id = ZFCP_DBF_SCSI_CMND; |
936 | { | 353 | rec->scsi_result = sc->result; |
937 | struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf; | 354 | rec->scsi_retries = sc->retries; |
938 | struct timespec t; | 355 | rec->scsi_allowed = sc->allowed; |
939 | char *p = out_buf; | 356 | rec->scsi_id = sc->device->id; |
940 | 357 | rec->scsi_lun = sc->device->lun; | |
941 | if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0) | 358 | rec->host_scribble = (unsigned long)sc->host_scribble; |
942 | return 0; | 359 | |
943 | 360 | memcpy(rec->scsi_opcode, sc->cmnd, | |
944 | zfcp_dbf_tag(&p, "tag", r->tag); | 361 | min((int)sc->cmd_len, ZFCP_DBF_SCSI_OPCODE)); |
945 | zfcp_dbf_tag(&p, "tag2", r->tag2); | 362 | |
946 | zfcp_dbf_out(&p, "scsi_id", "0x%08x", r->scsi_id); | 363 | if (fsf) { |
947 | zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun); | 364 | rec->fsf_req_id = fsf->req_id; |
948 | zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result); | 365 | fcp_rsp = (struct fcp_resp_with_ext *) |
949 | zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd); | 366 | &(fsf->qtcb->bottom.io.fcp_rsp); |
950 | zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE, | 367 | memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT); |
951 | 0, ZFCP_DBF_SCSI_OPCODE); | 368 | if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) { |
952 | zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries); | 369 | fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; |
953 | zfcp_dbf_out(&p, "scsi_allowed", "0x%02x", r->scsi_allowed); | 370 | rec->fcp_rsp_info = fcp_rsp_info->rsp_code; |
954 | if (strncmp(r->tag, "abrt", ZFCP_DBF_TAG_SIZE) == 0) | 371 | } |
955 | zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid); | 372 | if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) { |
956 | zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); | 373 | rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE, |
957 | zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); | 374 | (u16)ZFCP_DBF_PAY_MAX_REC); |
958 | stck_to_timespec(r->fsf_issued, &t); | 375 | zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len, |
959 | zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec); | 376 | "fcp_sns", fsf->req_id); |
960 | 377 | } | |
961 | if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) { | ||
962 | zfcp_dbf_out(&p, "fcp_rsp_validity", "0x%02x", r->rsp_validity); | ||
963 | zfcp_dbf_out(&p, "fcp_rsp_scsi_status", "0x%02x", | ||
964 | r->rsp_scsi_status); | ||
965 | zfcp_dbf_out(&p, "fcp_rsp_resid", "0x%08x", r->rsp_resid); | ||
966 | zfcp_dbf_out(&p, "fcp_rsp_code", "0x%08x", r->rsp_code); | ||
967 | zfcp_dbf_out(&p, "fcp_sns_info_len", "0x%08x", r->sns_info_len); | ||
968 | zfcp_dbf_outd(&p, "fcp_sns_info", r->sns_info, | ||
969 | min((int)r->sns_info_len, | ||
970 | ZFCP_DBF_SCSI_FCP_SNS_INFO), 0, | ||
971 | r->sns_info_len); | ||
972 | } | 378 | } |
973 | p += sprintf(p, "\n"); | ||
974 | return p - out_buf; | ||
975 | } | ||
976 | 379 | ||
977 | static struct debug_view zfcp_dbf_scsi_view = { | 380 | debug_event(dbf->scsi, 1, rec, sizeof(*rec)); |
978 | .name = "structured", | 381 | spin_unlock_irqrestore(&dbf->scsi_lock, flags); |
979 | .header_proc = zfcp_dbf_view_header, | 382 | } |
980 | .format_proc = zfcp_dbf_scsi_view_format, | ||
981 | }; | ||
982 | 383 | ||
983 | static debug_info_t *zfcp_dbf_reg(const char *name, int level, | 384 | static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size) |
984 | struct debug_view *view, int size) | ||
985 | { | 385 | { |
986 | struct debug_info *d; | 386 | struct debug_info *d; |
987 | 387 | ||
988 | d = debug_register(name, dbfsize, level, size); | 388 | d = debug_register(name, size, 1, rec_size); |
989 | if (!d) | 389 | if (!d) |
990 | return NULL; | 390 | return NULL; |
991 | 391 | ||
992 | debug_register_view(d, &debug_hex_ascii_view); | 392 | debug_register_view(d, &debug_hex_ascii_view); |
993 | debug_register_view(d, view); | 393 | debug_set_level(d, 3); |
994 | debug_set_level(d, level); | ||
995 | 394 | ||
996 | return d; | 395 | return d; |
997 | } | 396 | } |
998 | 397 | ||
398 | static void zfcp_dbf_unregister(struct zfcp_dbf *dbf) | ||
399 | { | ||
400 | if (!dbf) | ||
401 | return; | ||
402 | |||
403 | debug_unregister(dbf->scsi); | ||
404 | debug_unregister(dbf->san); | ||
405 | debug_unregister(dbf->hba); | ||
406 | debug_unregister(dbf->pay); | ||
407 | debug_unregister(dbf->rec); | ||
408 | kfree(dbf); | ||
409 | } | ||
410 | |||
999 | /** | 411 | /** |
1000 | * zfcp_adapter_debug_register - registers debug feature for an adapter | 412 | * zfcp_adapter_debug_register - registers debug feature for an adapter |
1001 | * @adapter: pointer to adapter for which debug features should be registered | 413 | * @adapter: pointer to adapter for which debug features should be registered |
@@ -1003,69 +415,66 @@ static debug_info_t *zfcp_dbf_reg(const char *name, int level, | |||
1003 | */ | 415 | */ |
1004 | int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) | 416 | int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter) |
1005 | { | 417 | { |
1006 | char dbf_name[DEBUG_MAX_NAME_LEN]; | 418 | char name[DEBUG_MAX_NAME_LEN]; |
1007 | struct zfcp_dbf *dbf; | 419 | struct zfcp_dbf *dbf; |
1008 | 420 | ||
1009 | dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); | 421 | dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL); |
1010 | if (!dbf) | 422 | if (!dbf) |
1011 | return -ENOMEM; | 423 | return -ENOMEM; |
1012 | 424 | ||
1013 | dbf->adapter = adapter; | 425 | spin_lock_init(&dbf->pay_lock); |
1014 | |||
1015 | spin_lock_init(&dbf->hba_lock); | 426 | spin_lock_init(&dbf->hba_lock); |
1016 | spin_lock_init(&dbf->san_lock); | 427 | spin_lock_init(&dbf->san_lock); |
1017 | spin_lock_init(&dbf->scsi_lock); | 428 | spin_lock_init(&dbf->scsi_lock); |
1018 | spin_lock_init(&dbf->rec_lock); | 429 | spin_lock_init(&dbf->rec_lock); |
1019 | 430 | ||
1020 | /* debug feature area which records recovery activity */ | 431 | /* debug feature area which records recovery activity */ |
1021 | sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); | 432 | sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev)); |
1022 | dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view, | 433 | dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec)); |
1023 | sizeof(struct zfcp_dbf_rec_record)); | ||
1024 | if (!dbf->rec) | 434 | if (!dbf->rec) |
1025 | goto err_out; | 435 | goto err_out; |
1026 | 436 | ||
1027 | /* debug feature area which records HBA (FSF and QDIO) conditions */ | 437 | /* debug feature area which records HBA (FSF and QDIO) conditions */ |
1028 | sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); | 438 | sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev)); |
1029 | dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view, | 439 | dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba)); |
1030 | sizeof(struct zfcp_dbf_hba_record)); | ||
1031 | if (!dbf->hba) | 440 | if (!dbf->hba) |
1032 | goto err_out; | 441 | goto err_out; |
1033 | 442 | ||
443 | /* debug feature area which records payload info */ | ||
444 | sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev)); | ||
445 | dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay)); | ||
446 | if (!dbf->pay) | ||
447 | goto err_out; | ||
448 | |||
1034 | /* debug feature area which records SAN command failures and recovery */ | 449 | /* debug feature area which records SAN command failures and recovery */ |
1035 | sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); | 450 | sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev)); |
1036 | dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view, | 451 | dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san)); |
1037 | sizeof(struct zfcp_dbf_san_record)); | ||
1038 | if (!dbf->san) | 452 | if (!dbf->san) |
1039 | goto err_out; | 453 | goto err_out; |
1040 | 454 | ||
1041 | /* debug feature area which records SCSI command failures and recovery */ | 455 | /* debug feature area which records SCSI command failures and recovery */ |
1042 | sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); | 456 | sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev)); |
1043 | dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view, | 457 | dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi)); |
1044 | sizeof(struct zfcp_dbf_scsi_record)); | ||
1045 | if (!dbf->scsi) | 458 | if (!dbf->scsi) |
1046 | goto err_out; | 459 | goto err_out; |
1047 | 460 | ||
1048 | adapter->dbf = dbf; | 461 | adapter->dbf = dbf; |
1049 | return 0; | ||
1050 | 462 | ||
463 | return 0; | ||
1051 | err_out: | 464 | err_out: |
1052 | zfcp_dbf_adapter_unregister(dbf); | 465 | zfcp_dbf_unregister(dbf); |
1053 | return -ENOMEM; | 466 | return -ENOMEM; |
1054 | } | 467 | } |
1055 | 468 | ||
1056 | /** | 469 | /** |
1057 | * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter | 470 | * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter |
1058 | * @dbf: pointer to dbf for which debug features should be unregistered | 471 | * @adapter: pointer to adapter for which debug features should be unregistered |
1059 | */ | 472 | */ |
1060 | void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) | 473 | void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter) |
1061 | { | 474 | { |
1062 | if (!dbf) | 475 | struct zfcp_dbf *dbf = adapter->dbf; |
1063 | return; | 476 | |
1064 | debug_unregister(dbf->scsi); | 477 | adapter->dbf = NULL; |
1065 | debug_unregister(dbf->san); | 478 | zfcp_dbf_unregister(dbf); |
1066 | debug_unregister(dbf->hba); | ||
1067 | debug_unregister(dbf->rec); | ||
1068 | dbf->adapter->dbf = NULL; | ||
1069 | kfree(dbf); | ||
1070 | } | 479 | } |
1071 | 480 | ||
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h index 04081b1b62b4..714f087eb7a9 100644 --- a/drivers/s390/scsi/zfcp_dbf.h +++ b/drivers/s390/scsi/zfcp_dbf.h | |||
@@ -1,22 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * This file is part of the zfcp device driver for | 2 | * zfcp device driver |
3 | * FCP adapters for IBM System z9 and zSeries. | 3 | * debug feature declarations |
4 | * | 4 | * |
5 | * Copyright IBM Corp. 2008, 2009 | 5 | * Copyright IBM Corp. 2008, 2010 |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
20 | */ | 6 | */ |
21 | 7 | ||
22 | #ifndef ZFCP_DBF_H | 8 | #ifndef ZFCP_DBF_H |
@@ -27,322 +13,350 @@ | |||
27 | #include "zfcp_fsf.h" | 13 | #include "zfcp_fsf.h" |
28 | #include "zfcp_def.h" | 14 | #include "zfcp_def.h" |
29 | 15 | ||
30 | #define ZFCP_DBF_TAG_SIZE 4 | 16 | #define ZFCP_DBF_TAG_LEN 7 |
31 | #define ZFCP_DBF_ID_SIZE 7 | ||
32 | 17 | ||
33 | #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull | 18 | #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull |
34 | 19 | ||
35 | struct zfcp_dbf_dump { | 20 | /** |
36 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 21 | * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action |
37 | u32 total_size; /* size of total dump data */ | 22 | * @ready: number of ready recovery actions |
38 | u32 offset; /* how much data has being already dumped */ | 23 | * @running: number of running recovery actions |
39 | u32 size; /* how much data comes with this record */ | 24 | * @want: wanted recovery action |
40 | u8 data[]; /* dump data */ | 25 | * @need: needed recovery action |
41 | } __attribute__ ((packed)); | 26 | */ |
42 | 27 | struct zfcp_dbf_rec_trigger { | |
43 | struct zfcp_dbf_rec_record_thread { | ||
44 | u32 total; | ||
45 | u32 ready; | 28 | u32 ready; |
46 | u32 running; | 29 | u32 running; |
47 | }; | ||
48 | |||
49 | struct zfcp_dbf_rec_record_target { | ||
50 | u64 ref; | ||
51 | u32 status; | ||
52 | u32 d_id; | ||
53 | u64 wwpn; | ||
54 | u64 fcp_lun; | ||
55 | u32 erp_count; | ||
56 | }; | ||
57 | |||
58 | struct zfcp_dbf_rec_record_trigger { | ||
59 | u8 want; | 30 | u8 want; |
60 | u8 need; | 31 | u8 need; |
61 | u32 as; | 32 | } __packed; |
62 | u32 ps; | ||
63 | u32 ls; | ||
64 | u64 ref; | ||
65 | u64 action; | ||
66 | u64 wwpn; | ||
67 | u64 fcp_lun; | ||
68 | }; | ||
69 | 33 | ||
70 | struct zfcp_dbf_rec_record_action { | 34 | /** |
71 | u32 status; | 35 | * struct zfcp_dbf_rec_running - trace record for running recovery |
72 | u32 step; | 36 | * @fsf_req_id: request id for fsf requests |
73 | u64 action; | 37 | * @rec_status: status of the fsf request |
74 | u64 fsf_req; | 38 | * @rec_step: current step of the recovery action |
39 | * rec_count: recovery counter | ||
40 | */ | ||
41 | struct zfcp_dbf_rec_running { | ||
42 | u64 fsf_req_id; | ||
43 | u32 rec_status; | ||
44 | u16 rec_step; | ||
45 | u8 rec_action; | ||
46 | u8 rec_count; | ||
47 | } __packed; | ||
48 | |||
49 | /** | ||
50 | * enum zfcp_dbf_rec_id - recovery trace record id | ||
51 | * @ZFCP_DBF_REC_TRIG: triggered recovery identifier | ||
52 | * @ZFCP_DBF_REC_RUN: running recovery identifier | ||
53 | */ | ||
54 | enum zfcp_dbf_rec_id { | ||
55 | ZFCP_DBF_REC_TRIG = 1, | ||
56 | ZFCP_DBF_REC_RUN = 2, | ||
75 | }; | 57 | }; |
76 | 58 | ||
77 | struct zfcp_dbf_rec_record { | 59 | /** |
60 | * struct zfcp_dbf_rec - trace record for error recovery actions | ||
61 | * @id: unique number of recovery record type | ||
62 | * @tag: identifier string specifying the location of initiation | ||
63 | * @lun: logical unit number | ||
64 | * @wwpn: word wide port number | ||
65 | * @d_id: destination ID | ||
66 | * @adapter_status: current status of the adapter | ||
67 | * @port_status: current status of the port | ||
68 | * @lun_status: current status of the lun | ||
69 | * @u.trig: structure zfcp_dbf_rec_trigger | ||
70 | * @u.run: structure zfcp_dbf_rec_running | ||
71 | */ | ||
72 | struct zfcp_dbf_rec { | ||
78 | u8 id; | 73 | u8 id; |
79 | char id2[7]; | 74 | char tag[ZFCP_DBF_TAG_LEN]; |
75 | u64 lun; | ||
76 | u64 wwpn; | ||
77 | u32 d_id; | ||
78 | u32 adapter_status; | ||
79 | u32 port_status; | ||
80 | u32 lun_status; | ||
80 | union { | 81 | union { |
81 | struct zfcp_dbf_rec_record_action action; | 82 | struct zfcp_dbf_rec_trigger trig; |
82 | struct zfcp_dbf_rec_record_thread thread; | 83 | struct zfcp_dbf_rec_running run; |
83 | struct zfcp_dbf_rec_record_target target; | ||
84 | struct zfcp_dbf_rec_record_trigger trigger; | ||
85 | } u; | 84 | } u; |
86 | }; | 85 | } __packed; |
87 | 86 | ||
88 | enum { | 87 | /** |
89 | ZFCP_REC_DBF_ID_ACTION, | 88 | * enum zfcp_dbf_san_id - SAN trace record identifier |
90 | ZFCP_REC_DBF_ID_THREAD, | 89 | * @ZFCP_DBF_SAN_REQ: request trace record id |
91 | ZFCP_REC_DBF_ID_TARGET, | 90 | * @ZFCP_DBF_SAN_RES: response trace record id |
92 | ZFCP_REC_DBF_ID_TRIGGER, | 91 | * @ZFCP_DBF_SAN_ELS: extended link service record id |
92 | */ | ||
93 | enum zfcp_dbf_san_id { | ||
94 | ZFCP_DBF_SAN_REQ = 1, | ||
95 | ZFCP_DBF_SAN_RES = 2, | ||
96 | ZFCP_DBF_SAN_ELS = 3, | ||
93 | }; | 97 | }; |
94 | 98 | ||
95 | struct zfcp_dbf_hba_record_response { | 99 | /** struct zfcp_dbf_san - trace record for SAN requests and responses |
96 | u32 fsf_command; | 100 | * @id: unique number of recovery record type |
97 | u64 fsf_reqid; | 101 | * @tag: identifier string specifying the location of initiation |
98 | u32 fsf_seqno; | 102 | * @fsf_req_id: request id for fsf requests |
99 | u64 fsf_issued; | 103 | * @payload: unformatted information related to request/response |
100 | u32 fsf_prot_status; | 104 | * @d_id: destination id |
105 | */ | ||
106 | struct zfcp_dbf_san { | ||
107 | u8 id; | ||
108 | char tag[ZFCP_DBF_TAG_LEN]; | ||
109 | u64 fsf_req_id; | ||
110 | u32 d_id; | ||
111 | #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) | ||
112 | char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; | ||
113 | } __packed; | ||
114 | |||
115 | /** | ||
116 | * struct zfcp_dbf_hba_res - trace record for hba responses | ||
117 | * @req_issued: timestamp when request was issued | ||
118 | * @prot_status: protocol status | ||
119 | * @prot_status_qual: protocol status qualifier | ||
120 | * @fsf_status: fsf status | ||
121 | * @fsf_status_qual: fsf status qualifier | ||
122 | */ | ||
123 | struct zfcp_dbf_hba_res { | ||
124 | u64 req_issued; | ||
125 | u32 prot_status; | ||
126 | u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; | ||
101 | u32 fsf_status; | 127 | u32 fsf_status; |
102 | u8 fsf_prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; | 128 | u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; |
103 | u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; | 129 | } __packed; |
104 | u32 fsf_req_status; | ||
105 | u8 sbal_first; | ||
106 | u8 sbal_last; | ||
107 | u8 sbal_response; | ||
108 | u8 pool; | ||
109 | u64 erp_action; | ||
110 | union { | ||
111 | struct { | ||
112 | u64 cmnd; | ||
113 | u32 data_dir; | ||
114 | } fcp; | ||
115 | struct { | ||
116 | u64 wwpn; | ||
117 | u32 d_id; | ||
118 | u32 port_handle; | ||
119 | } port; | ||
120 | struct { | ||
121 | u64 wwpn; | ||
122 | u64 fcp_lun; | ||
123 | u32 port_handle; | ||
124 | u32 lun_handle; | ||
125 | } unit; | ||
126 | struct { | ||
127 | u32 d_id; | ||
128 | } els; | ||
129 | } u; | ||
130 | } __attribute__ ((packed)); | ||
131 | 130 | ||
132 | struct zfcp_dbf_hba_record_status { | 131 | /** |
133 | u8 failed; | 132 | * struct zfcp_dbf_hba_uss - trace record for unsolicited status |
133 | * @status_type: type of unsolicited status | ||
134 | * @status_subtype: subtype of unsolicited status | ||
135 | * @d_id: destination ID | ||
136 | * @lun: logical unit number | ||
137 | * @queue_designator: queue designator | ||
138 | */ | ||
139 | struct zfcp_dbf_hba_uss { | ||
134 | u32 status_type; | 140 | u32 status_type; |
135 | u32 status_subtype; | 141 | u32 status_subtype; |
136 | struct fsf_queue_designator | ||
137 | queue_designator; | ||
138 | u32 payload_size; | ||
139 | #define ZFCP_DBF_UNSOL_PAYLOAD 80 | ||
140 | #define ZFCP_DBF_UNSOL_PAYLOAD_SENSE_DATA_AVAIL 32 | ||
141 | #define ZFCP_DBF_UNSOL_PAYLOAD_BIT_ERROR_THRESHOLD 56 | ||
142 | #define ZFCP_DBF_UNSOL_PAYLOAD_FEATURE_UPDATE_ALERT 2 * sizeof(u32) | ||
143 | u8 payload[ZFCP_DBF_UNSOL_PAYLOAD]; | ||
144 | } __attribute__ ((packed)); | ||
145 | |||
146 | struct zfcp_dbf_hba_record_qdio { | ||
147 | u32 qdio_error; | ||
148 | u8 sbal_index; | ||
149 | u8 sbal_count; | ||
150 | } __attribute__ ((packed)); | ||
151 | |||
152 | struct zfcp_dbf_hba_record { | ||
153 | u8 tag[ZFCP_DBF_TAG_SIZE]; | ||
154 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | ||
155 | union { | ||
156 | struct zfcp_dbf_hba_record_response response; | ||
157 | struct zfcp_dbf_hba_record_status status; | ||
158 | struct zfcp_dbf_hba_record_qdio qdio; | ||
159 | struct fsf_bit_error_payload berr; | ||
160 | } u; | ||
161 | } __attribute__ ((packed)); | ||
162 | |||
163 | struct zfcp_dbf_san_record_ct_request { | ||
164 | u16 cmd_req_code; | ||
165 | u8 revision; | ||
166 | u8 gs_type; | ||
167 | u8 gs_subtype; | ||
168 | u8 options; | ||
169 | u16 max_res_size; | ||
170 | u32 len; | ||
171 | u32 d_id; | ||
172 | } __attribute__ ((packed)); | ||
173 | |||
174 | struct zfcp_dbf_san_record_ct_response { | ||
175 | u16 cmd_rsp_code; | ||
176 | u8 revision; | ||
177 | u8 reason_code; | ||
178 | u8 expl; | ||
179 | u8 vendor_unique; | ||
180 | u16 max_res_size; | ||
181 | u32 len; | ||
182 | } __attribute__ ((packed)); | ||
183 | |||
184 | struct zfcp_dbf_san_record_els { | ||
185 | u32 d_id; | 142 | u32 d_id; |
186 | } __attribute__ ((packed)); | 143 | u64 lun; |
144 | u64 queue_designator; | ||
145 | } __packed; | ||
187 | 146 | ||
188 | struct zfcp_dbf_san_record { | 147 | /** |
189 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 148 | * enum zfcp_dbf_hba_id - HBA trace record identifier |
190 | u64 fsf_reqid; | 149 | * @ZFCP_DBF_HBA_RES: response trace record |
191 | u32 fsf_seqno; | 150 | * @ZFCP_DBF_HBA_USS: unsolicited status trace record |
151 | * @ZFCP_DBF_HBA_BIT: bit error trace record | ||
152 | */ | ||
153 | enum zfcp_dbf_hba_id { | ||
154 | ZFCP_DBF_HBA_RES = 1, | ||
155 | ZFCP_DBF_HBA_USS = 2, | ||
156 | ZFCP_DBF_HBA_BIT = 3, | ||
157 | }; | ||
158 | |||
159 | /** | ||
160 | * struct zfcp_dbf_hba - common trace record for HBA records | ||
161 | * @id: unique number of recovery record type | ||
162 | * @tag: identifier string specifying the location of initiation | ||
163 | * @fsf_req_id: request id for fsf requests | ||
164 | * @fsf_req_status: status of fsf request | ||
165 | * @fsf_cmd: fsf command | ||
166 | * @fsf_seq_no: fsf sequence number | ||
167 | * @pl_len: length of payload stored as zfcp_dbf_pay | ||
168 | * @u: record type specific data | ||
169 | */ | ||
170 | struct zfcp_dbf_hba { | ||
171 | u8 id; | ||
172 | char tag[ZFCP_DBF_TAG_LEN]; | ||
173 | u64 fsf_req_id; | ||
174 | u32 fsf_req_status; | ||
175 | u32 fsf_cmd; | ||
176 | u32 fsf_seq_no; | ||
177 | u16 pl_len; | ||
192 | union { | 178 | union { |
193 | struct zfcp_dbf_san_record_ct_request ct_req; | 179 | struct zfcp_dbf_hba_res res; |
194 | struct zfcp_dbf_san_record_ct_response ct_resp; | 180 | struct zfcp_dbf_hba_uss uss; |
195 | struct zfcp_dbf_san_record_els els; | 181 | struct fsf_bit_error_payload be; |
196 | } u; | 182 | } u; |
197 | } __attribute__ ((packed)); | 183 | } __packed; |
198 | 184 | ||
199 | #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024 | 185 | /** |
186 | * enum zfcp_dbf_scsi_id - scsi trace record identifier | ||
187 | * @ZFCP_DBF_SCSI_CMND: scsi command trace record | ||
188 | */ | ||
189 | enum zfcp_dbf_scsi_id { | ||
190 | ZFCP_DBF_SCSI_CMND = 1, | ||
191 | }; | ||
200 | 192 | ||
201 | struct zfcp_dbf_scsi_record { | 193 | /** |
202 | u8 tag[ZFCP_DBF_TAG_SIZE]; | 194 | * struct zfcp_dbf_scsi - common trace record for SCSI records |
203 | u8 tag2[ZFCP_DBF_TAG_SIZE]; | 195 | * @id: unique number of recovery record type |
196 | * @tag: identifier string specifying the location of initiation | ||
197 | * @scsi_id: scsi device id | ||
198 | * @scsi_lun: scsi device logical unit number | ||
199 | * @scsi_result: scsi result | ||
200 | * @scsi_retries: current retry number of scsi request | ||
201 | * @scsi_allowed: allowed retries | ||
202 | * @fcp_rsp_info: FCP response info | ||
203 | * @scsi_opcode: scsi opcode | ||
204 | * @fsf_req_id: request id of fsf request | ||
205 | * @host_scribble: LLD specific data attached to SCSI request | ||
206 | * @pl_len: length of paload stored as zfcp_dbf_pay | ||
207 | * @fsf_rsp: response for fsf request | ||
208 | */ | ||
209 | struct zfcp_dbf_scsi { | ||
210 | u8 id; | ||
211 | char tag[ZFCP_DBF_TAG_LEN]; | ||
204 | u32 scsi_id; | 212 | u32 scsi_id; |
205 | u32 scsi_lun; | 213 | u32 scsi_lun; |
206 | u32 scsi_result; | 214 | u32 scsi_result; |
207 | u64 scsi_cmnd; | ||
208 | #define ZFCP_DBF_SCSI_OPCODE 16 | ||
209 | u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; | ||
210 | u8 scsi_retries; | 215 | u8 scsi_retries; |
211 | u8 scsi_allowed; | 216 | u8 scsi_allowed; |
212 | u64 fsf_reqid; | 217 | u8 fcp_rsp_info; |
213 | u32 fsf_seqno; | 218 | #define ZFCP_DBF_SCSI_OPCODE 16 |
214 | u64 fsf_issued; | 219 | u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE]; |
215 | u64 old_fsf_reqid; | 220 | u64 fsf_req_id; |
216 | u8 rsp_validity; | 221 | u64 host_scribble; |
217 | u8 rsp_scsi_status; | 222 | u16 pl_len; |
218 | u32 rsp_resid; | 223 | struct fcp_resp_with_ext fcp_rsp; |
219 | u8 rsp_code; | 224 | } __packed; |
220 | #define ZFCP_DBF_SCSI_FCP_SNS_INFO 16 | ||
221 | #define ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO 256 | ||
222 | u32 sns_info_len; | ||
223 | u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO]; | ||
224 | } __attribute__ ((packed)); | ||
225 | 225 | ||
226 | /** | ||
227 | * struct zfcp_dbf_pay - trace record for unformatted payload information | ||
228 | * @area: area this record is originated from | ||
229 | * @counter: ascending record number | ||
230 | * @fsf_req_id: request id of fsf request | ||
231 | * @data: unformatted data | ||
232 | */ | ||
233 | struct zfcp_dbf_pay { | ||
234 | u8 counter; | ||
235 | char area[ZFCP_DBF_TAG_LEN]; | ||
236 | u64 fsf_req_id; | ||
237 | #define ZFCP_DBF_PAY_MAX_REC 0x100 | ||
238 | char data[ZFCP_DBF_PAY_MAX_REC]; | ||
239 | } __packed; | ||
240 | |||
241 | /** | ||
242 | * struct zfcp_dbf - main dbf trace structure | ||
243 | * @pay: reference to payload trace area | ||
244 | * @rec: reference to recovery trace area | ||
245 | * @hba: reference to hba trace area | ||
246 | * @san: reference to san trace area | ||
247 | * @scsi: reference to scsi trace area | ||
248 | * @pay_lock: lock protecting payload trace buffer | ||
249 | * @rec_lock: lock protecting recovery trace buffer | ||
250 | * @hba_lock: lock protecting hba trace buffer | ||
251 | * @san_lock: lock protecting san trace buffer | ||
252 | * @scsi_lock: lock protecting scsi trace buffer | ||
253 | * @pay_buf: pre-allocated buffer for payload | ||
254 | * @rec_buf: pre-allocated buffer for recovery | ||
255 | * @hba_buf: pre-allocated buffer for hba | ||
256 | * @san_buf: pre-allocated buffer for san | ||
257 | * @scsi_buf: pre-allocated buffer for scsi | ||
258 | */ | ||
226 | struct zfcp_dbf { | 259 | struct zfcp_dbf { |
260 | debug_info_t *pay; | ||
227 | debug_info_t *rec; | 261 | debug_info_t *rec; |
228 | debug_info_t *hba; | 262 | debug_info_t *hba; |
229 | debug_info_t *san; | 263 | debug_info_t *san; |
230 | debug_info_t *scsi; | 264 | debug_info_t *scsi; |
265 | spinlock_t pay_lock; | ||
231 | spinlock_t rec_lock; | 266 | spinlock_t rec_lock; |
232 | spinlock_t hba_lock; | 267 | spinlock_t hba_lock; |
233 | spinlock_t san_lock; | 268 | spinlock_t san_lock; |
234 | spinlock_t scsi_lock; | 269 | spinlock_t scsi_lock; |
235 | struct zfcp_dbf_rec_record rec_buf; | 270 | struct zfcp_dbf_pay pay_buf; |
236 | struct zfcp_dbf_hba_record hba_buf; | 271 | struct zfcp_dbf_rec rec_buf; |
237 | struct zfcp_dbf_san_record san_buf; | 272 | struct zfcp_dbf_hba hba_buf; |
238 | struct zfcp_dbf_scsi_record scsi_buf; | 273 | struct zfcp_dbf_san san_buf; |
239 | struct zfcp_adapter *adapter; | 274 | struct zfcp_dbf_scsi scsi_buf; |
240 | }; | 275 | }; |
241 | 276 | ||
242 | static inline | 277 | static inline |
243 | void zfcp_dbf_hba_fsf_resp(const char *tag2, int level, | 278 | void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) |
244 | struct zfcp_fsf_req *req, struct zfcp_dbf *dbf) | ||
245 | { | 279 | { |
246 | if (level <= dbf->hba->level) | 280 | if (level <= req->adapter->dbf->hba->level) |
247 | _zfcp_dbf_hba_fsf_response(tag2, level, req, dbf); | 281 | zfcp_dbf_hba_fsf_res(tag, req); |
248 | } | 282 | } |
249 | 283 | ||
250 | /** | 284 | /** |
251 | * zfcp_dbf_hba_fsf_response - trace event for request completion | 285 | * zfcp_dbf_hba_fsf_response - trace event for request completion |
252 | * @fsf_req: request that has been completed | 286 | * @req: request that has been completed |
253 | */ | 287 | */ |
254 | static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) | 288 | static inline |
289 | void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) | ||
255 | { | 290 | { |
256 | struct zfcp_dbf *dbf = req->adapter->dbf; | ||
257 | struct fsf_qtcb *qtcb = req->qtcb; | 291 | struct fsf_qtcb *qtcb = req->qtcb; |
258 | 292 | ||
259 | if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && | 293 | if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) && |
260 | (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { | 294 | (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) { |
261 | zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf); | 295 | zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); |
262 | 296 | ||
263 | } else if (qtcb->header.fsf_status != FSF_GOOD) { | 297 | } else if (qtcb->header.fsf_status != FSF_GOOD) { |
264 | zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf); | 298 | zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); |
265 | 299 | ||
266 | } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || | 300 | } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || |
267 | (req->fsf_command == FSF_QTCB_OPEN_LUN)) { | 301 | (req->fsf_command == FSF_QTCB_OPEN_LUN)) { |
268 | zfcp_dbf_hba_fsf_resp("open", 4, req, dbf); | 302 | zfcp_dbf_hba_fsf_resp("fs_open", 4, req); |
269 | 303 | ||
270 | } else if (qtcb->header.log_length) { | 304 | } else if (qtcb->header.log_length) { |
271 | zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf); | 305 | zfcp_dbf_hba_fsf_resp("fs_qtcb", 5, req); |
272 | 306 | ||
273 | } else { | 307 | } else { |
274 | zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf); | 308 | zfcp_dbf_hba_fsf_resp("fs_norm", 6, req); |
275 | } | 309 | } |
276 | } | ||
277 | |||
278 | /** | ||
279 | * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer | ||
280 | * @tag: tag indicating which kind of unsolicited status has been received | ||
281 | * @dbf: reference to dbf structure | ||
282 | * @status_buffer: buffer containing payload of unsolicited status | ||
283 | */ | ||
284 | static inline | ||
285 | void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf, | ||
286 | struct fsf_status_read_buffer *buf) | ||
287 | { | ||
288 | int level = 2; | ||
289 | |||
290 | if (level <= dbf->hba->level) | ||
291 | _zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf); | ||
292 | } | 310 | } |
293 | 311 | ||
294 | static inline | 312 | static inline |
295 | void zfcp_dbf_scsi(const char *tag, const char *tag2, int level, | 313 | void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, |
296 | struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, | 314 | struct zfcp_fsf_req *req) |
297 | struct zfcp_fsf_req *req, unsigned long old_id) | ||
298 | { | 315 | { |
299 | if (level <= dbf->scsi->level) | 316 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) |
300 | _zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id); | 317 | scmd->device->host->hostdata[0]; |
318 | |||
319 | if (level <= adapter->dbf->scsi->level) | ||
320 | zfcp_dbf_scsi(tag, scmd, req); | ||
301 | } | 321 | } |
302 | 322 | ||
303 | /** | 323 | /** |
304 | * zfcp_dbf_scsi_result - trace event for SCSI command completion | 324 | * zfcp_dbf_scsi_result - trace event for SCSI command completion |
305 | * @dbf: adapter dbf trace | ||
306 | * @scmd: SCSI command pointer | 325 | * @scmd: SCSI command pointer |
307 | * @req: FSF request used to issue SCSI command | 326 | * @req: FSF request used to issue SCSI command |
308 | */ | 327 | */ |
309 | static inline | 328 | static inline |
310 | void zfcp_dbf_scsi_result(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd, | 329 | void zfcp_dbf_scsi_result(struct scsi_cmnd *scmd, struct zfcp_fsf_req *req) |
311 | struct zfcp_fsf_req *req) | ||
312 | { | 330 | { |
313 | if (scmd->result != 0) | 331 | if (scmd->result != 0) |
314 | zfcp_dbf_scsi("rslt", "erro", 3, dbf, scmd, req, 0); | 332 | _zfcp_dbf_scsi("rsl_err", 3, scmd, req); |
315 | else if (scmd->retries > 0) | 333 | else if (scmd->retries > 0) |
316 | zfcp_dbf_scsi("rslt", "retr", 4, dbf, scmd, req, 0); | 334 | _zfcp_dbf_scsi("rsl_ret", 4, scmd, req); |
317 | else | 335 | else |
318 | zfcp_dbf_scsi("rslt", "norm", 6, dbf, scmd, req, 0); | 336 | _zfcp_dbf_scsi("rsl_nor", 6, scmd, req); |
319 | } | 337 | } |
320 | 338 | ||
321 | /** | 339 | /** |
322 | * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command | 340 | * zfcp_dbf_scsi_fail_send - trace event for failure to send SCSI command |
323 | * @dbf: adapter dbf trace | ||
324 | * @scmd: SCSI command pointer | 341 | * @scmd: SCSI command pointer |
325 | */ | 342 | */ |
326 | static inline | 343 | static inline |
327 | void zfcp_dbf_scsi_fail_send(struct zfcp_dbf *dbf, struct scsi_cmnd *scmd) | 344 | void zfcp_dbf_scsi_fail_send(struct scsi_cmnd *scmd) |
328 | { | 345 | { |
329 | zfcp_dbf_scsi("rslt", "fail", 4, dbf, scmd, NULL, 0); | 346 | _zfcp_dbf_scsi("rsl_fai", 4, scmd, NULL); |
330 | } | 347 | } |
331 | 348 | ||
332 | /** | 349 | /** |
333 | * zfcp_dbf_scsi_abort - trace event for SCSI command abort | 350 | * zfcp_dbf_scsi_abort - trace event for SCSI command abort |
334 | * @tag: tag indicating success or failure of abort operation | 351 | * @tag: tag indicating success or failure of abort operation |
335 | * @adapter: adapter thas has been used to issue SCSI command to be aborted | ||
336 | * @scmd: SCSI command to be aborted | 352 | * @scmd: SCSI command to be aborted |
337 | * @new_req: request containing abort (might be NULL) | 353 | * @fsf_req: request containing abort (might be NULL) |
338 | * @old_id: identifier of request containg SCSI command to be aborted | ||
339 | */ | 354 | */ |
340 | static inline | 355 | static inline |
341 | void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, | 356 | void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd, |
342 | struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req, | 357 | struct zfcp_fsf_req *fsf_req) |
343 | unsigned long old_id) | ||
344 | { | 358 | { |
345 | zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id); | 359 | _zfcp_dbf_scsi(tag, 1, scmd, fsf_req); |
346 | } | 360 | } |
347 | 361 | ||
348 | /** | 362 | /** |
@@ -352,12 +366,17 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf, | |||
352 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) | 366 | * @flag: indicates type of reset (Target Reset, Logical Unit Reset) |
353 | */ | 367 | */ |
354 | static inline | 368 | static inline |
355 | void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag) | 369 | void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) |
356 | { | 370 | { |
357 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); | 371 | char tmp_tag[ZFCP_DBF_TAG_LEN]; |
372 | |||
373 | if (flag == FCP_TMF_TGT_RESET) | ||
374 | memcpy(tmp_tag, "tr_", 3); | ||
375 | else | ||
376 | memcpy(tmp_tag, "lr_", 3); | ||
358 | 377 | ||
359 | zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1, | 378 | memcpy(&tmp_tag[3], tag, 4); |
360 | zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0); | 379 | _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); |
361 | } | 380 | } |
362 | 381 | ||
363 | #endif /* ZFCP_DBF_H */ | 382 | #endif /* ZFCP_DBF_H */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 0bcd5806bd9a..e003e306f870 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -76,9 +76,9 @@ static void zfcp_erp_action_ready(struct zfcp_erp_action *act) | |||
76 | struct zfcp_adapter *adapter = act->adapter; | 76 | struct zfcp_adapter *adapter = act->adapter; |
77 | 77 | ||
78 | list_move(&act->list, &act->adapter->erp_ready_head); | 78 | list_move(&act->list, &act->adapter->erp_ready_head); |
79 | zfcp_dbf_rec_action("erardy1", act); | 79 | zfcp_dbf_rec_run("erardy1", act); |
80 | wake_up(&adapter->erp_ready_wq); | 80 | wake_up(&adapter->erp_ready_wq); |
81 | zfcp_dbf_rec_thread("erardy2", adapter->dbf); | 81 | zfcp_dbf_rec_run("erardy2", act); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) | 84 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act) |
@@ -236,10 +236,10 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
236 | static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | 236 | static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, |
237 | struct zfcp_port *port, | 237 | struct zfcp_port *port, |
238 | struct scsi_device *sdev, | 238 | struct scsi_device *sdev, |
239 | char *id, void *ref, u32 act_status) | 239 | char *id, u32 act_status) |
240 | { | 240 | { |
241 | int retval = 1, need; | 241 | int retval = 1, need; |
242 | struct zfcp_erp_action *act = NULL; | 242 | struct zfcp_erp_action *act; |
243 | 243 | ||
244 | if (!adapter->erp_thread) | 244 | if (!adapter->erp_thread) |
245 | return -EIO; | 245 | return -EIO; |
@@ -255,15 +255,14 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter, | |||
255 | ++adapter->erp_total_count; | 255 | ++adapter->erp_total_count; |
256 | list_add_tail(&act->list, &adapter->erp_ready_head); | 256 | list_add_tail(&act->list, &adapter->erp_ready_head); |
257 | wake_up(&adapter->erp_ready_wq); | 257 | wake_up(&adapter->erp_ready_wq); |
258 | zfcp_dbf_rec_thread("eracte1", adapter->dbf); | ||
259 | retval = 0; | 258 | retval = 0; |
260 | out: | 259 | out: |
261 | zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev); | 260 | zfcp_dbf_rec_trig(id, adapter, port, sdev, want, need); |
262 | return retval; | 261 | return retval; |
263 | } | 262 | } |
264 | 263 | ||
265 | static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, | 264 | static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, |
266 | int clear_mask, char *id, void *ref) | 265 | int clear_mask, char *id) |
267 | { | 266 | { |
268 | zfcp_erp_adapter_block(adapter, clear_mask); | 267 | zfcp_erp_adapter_block(adapter, clear_mask); |
269 | zfcp_scsi_schedule_rports_block(adapter); | 268 | zfcp_scsi_schedule_rports_block(adapter); |
@@ -275,7 +274,7 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, | |||
275 | return -EIO; | 274 | return -EIO; |
276 | } | 275 | } |
277 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, | 276 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, |
278 | adapter, NULL, NULL, id, ref, 0); | 277 | adapter, NULL, NULL, id, 0); |
279 | } | 278 | } |
280 | 279 | ||
281 | /** | 280 | /** |
@@ -283,10 +282,8 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, | |||
283 | * @adapter: Adapter to reopen. | 282 | * @adapter: Adapter to reopen. |
284 | * @clear: Status flags to clear. | 283 | * @clear: Status flags to clear. |
285 | * @id: Id for debug trace event. | 284 | * @id: Id for debug trace event. |
286 | * @ref: Reference for debug trace event. | ||
287 | */ | 285 | */ |
288 | void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, | 286 | void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id) |
289 | char *id, void *ref) | ||
290 | { | 287 | { |
291 | unsigned long flags; | 288 | unsigned long flags; |
292 | 289 | ||
@@ -299,7 +296,7 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, | |||
299 | ZFCP_STATUS_COMMON_ERP_FAILED); | 296 | ZFCP_STATUS_COMMON_ERP_FAILED); |
300 | else | 297 | else |
301 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, | 298 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter, |
302 | NULL, NULL, id, ref, 0); | 299 | NULL, NULL, id, 0); |
303 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 300 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
304 | } | 301 | } |
305 | 302 | ||
@@ -308,13 +305,12 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, | |||
308 | * @adapter: Adapter to shut down. | 305 | * @adapter: Adapter to shut down. |
309 | * @clear: Status flags to clear. | 306 | * @clear: Status flags to clear. |
310 | * @id: Id for debug trace event. | 307 | * @id: Id for debug trace event. |
311 | * @ref: Reference for debug trace event. | ||
312 | */ | 308 | */ |
313 | void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, | 309 | void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, |
314 | char *id, void *ref) | 310 | char *id) |
315 | { | 311 | { |
316 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; | 312 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; |
317 | zfcp_erp_adapter_reopen(adapter, clear | flags, id, ref); | 313 | zfcp_erp_adapter_reopen(adapter, clear | flags, id); |
318 | } | 314 | } |
319 | 315 | ||
320 | /** | 316 | /** |
@@ -322,13 +318,11 @@ void zfcp_erp_adapter_shutdown(struct zfcp_adapter *adapter, int clear, | |||
322 | * @port: Port to shut down. | 318 | * @port: Port to shut down. |
323 | * @clear: Status flags to clear. | 319 | * @clear: Status flags to clear. |
324 | * @id: Id for debug trace event. | 320 | * @id: Id for debug trace event. |
325 | * @ref: Reference for debug trace event. | ||
326 | */ | 321 | */ |
327 | void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id, | 322 | void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id) |
328 | void *ref) | ||
329 | { | 323 | { |
330 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; | 324 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; |
331 | zfcp_erp_port_reopen(port, clear | flags, id, ref); | 325 | zfcp_erp_port_reopen(port, clear | flags, id); |
332 | } | 326 | } |
333 | 327 | ||
334 | static void zfcp_erp_port_block(struct zfcp_port *port, int clear) | 328 | static void zfcp_erp_port_block(struct zfcp_port *port, int clear) |
@@ -337,8 +331,8 @@ static void zfcp_erp_port_block(struct zfcp_port *port, int clear) | |||
337 | ZFCP_STATUS_COMMON_UNBLOCKED | clear); | 331 | ZFCP_STATUS_COMMON_UNBLOCKED | clear); |
338 | } | 332 | } |
339 | 333 | ||
340 | static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, | 334 | static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, |
341 | int clear, char *id, void *ref) | 335 | char *id) |
342 | { | 336 | { |
343 | zfcp_erp_port_block(port, clear); | 337 | zfcp_erp_port_block(port, clear); |
344 | zfcp_scsi_schedule_rport_block(port); | 338 | zfcp_scsi_schedule_rport_block(port); |
@@ -347,28 +341,26 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, | |||
347 | return; | 341 | return; |
348 | 342 | ||
349 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, | 343 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED, |
350 | port->adapter, port, NULL, id, ref, 0); | 344 | port->adapter, port, NULL, id, 0); |
351 | } | 345 | } |
352 | 346 | ||
353 | /** | 347 | /** |
354 | * zfcp_erp_port_forced_reopen - Forced close of port and open again | 348 | * zfcp_erp_port_forced_reopen - Forced close of port and open again |
355 | * @port: Port to force close and to reopen. | 349 | * @port: Port to force close and to reopen. |
350 | * @clear: Status flags to clear. | ||
356 | * @id: Id for debug trace event. | 351 | * @id: Id for debug trace event. |
357 | * @ref: Reference for debug trace event. | ||
358 | */ | 352 | */ |
359 | void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id, | 353 | void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id) |
360 | void *ref) | ||
361 | { | 354 | { |
362 | unsigned long flags; | 355 | unsigned long flags; |
363 | struct zfcp_adapter *adapter = port->adapter; | 356 | struct zfcp_adapter *adapter = port->adapter; |
364 | 357 | ||
365 | write_lock_irqsave(&adapter->erp_lock, flags); | 358 | write_lock_irqsave(&adapter->erp_lock, flags); |
366 | _zfcp_erp_port_forced_reopen(port, clear, id, ref); | 359 | _zfcp_erp_port_forced_reopen(port, clear, id); |
367 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 360 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
368 | } | 361 | } |
369 | 362 | ||
370 | static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, | 363 | static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) |
371 | void *ref) | ||
372 | { | 364 | { |
373 | zfcp_erp_port_block(port, clear); | 365 | zfcp_erp_port_block(port, clear); |
374 | zfcp_scsi_schedule_rport_block(port); | 366 | zfcp_scsi_schedule_rport_block(port); |
@@ -380,24 +372,25 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, | |||
380 | } | 372 | } |
381 | 373 | ||
382 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, | 374 | return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT, |
383 | port->adapter, port, NULL, id, ref, 0); | 375 | port->adapter, port, NULL, id, 0); |
384 | } | 376 | } |
385 | 377 | ||
386 | /** | 378 | /** |
387 | * zfcp_erp_port_reopen - trigger remote port recovery | 379 | * zfcp_erp_port_reopen - trigger remote port recovery |
388 | * @port: port to recover | 380 | * @port: port to recover |
389 | * @clear_mask: flags in port status to be cleared | 381 | * @clear_mask: flags in port status to be cleared |
382 | * @id: Id for debug trace event. | ||
390 | * | 383 | * |
391 | * Returns 0 if recovery has been triggered, < 0 if not. | 384 | * Returns 0 if recovery has been triggered, < 0 if not. |
392 | */ | 385 | */ |
393 | int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) | 386 | int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id) |
394 | { | 387 | { |
395 | int retval; | 388 | int retval; |
396 | unsigned long flags; | 389 | unsigned long flags; |
397 | struct zfcp_adapter *adapter = port->adapter; | 390 | struct zfcp_adapter *adapter = port->adapter; |
398 | 391 | ||
399 | write_lock_irqsave(&adapter->erp_lock, flags); | 392 | write_lock_irqsave(&adapter->erp_lock, flags); |
400 | retval = _zfcp_erp_port_reopen(port, clear, id, ref); | 393 | retval = _zfcp_erp_port_reopen(port, clear, id); |
401 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 394 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
402 | 395 | ||
403 | return retval; | 396 | return retval; |
@@ -410,7 +403,7 @@ static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask) | |||
410 | } | 403 | } |
411 | 404 | ||
412 | static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, | 405 | static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, |
413 | void *ref, u32 act_status) | 406 | u32 act_status) |
414 | { | 407 | { |
415 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 408 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
416 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | 409 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; |
@@ -421,17 +414,18 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, | |||
421 | return; | 414 | return; |
422 | 415 | ||
423 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, | 416 | zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, |
424 | zfcp_sdev->port, sdev, id, ref, act_status); | 417 | zfcp_sdev->port, sdev, id, act_status); |
425 | } | 418 | } |
426 | 419 | ||
427 | /** | 420 | /** |
428 | * zfcp_erp_lun_reopen - initiate reopen of a LUN | 421 | * zfcp_erp_lun_reopen - initiate reopen of a LUN |
429 | * @sdev: SCSI device / LUN to be reopened | 422 | * @sdev: SCSI device / LUN to be reopened |
430 | * @clear_mask: specifies flags in LUN status to be cleared | 423 | * @clear_mask: specifies flags in LUN status to be cleared |
424 | * @id: Id for debug trace event. | ||
425 | * | ||
431 | * Return: 0 on success, < 0 on error | 426 | * Return: 0 on success, < 0 on error |
432 | */ | 427 | */ |
433 | void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, | 428 | void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id) |
434 | void *ref) | ||
435 | { | 429 | { |
436 | unsigned long flags; | 430 | unsigned long flags; |
437 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 431 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
@@ -439,7 +433,7 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, | |||
439 | struct zfcp_adapter *adapter = port->adapter; | 433 | struct zfcp_adapter *adapter = port->adapter; |
440 | 434 | ||
441 | write_lock_irqsave(&adapter->erp_lock, flags); | 435 | write_lock_irqsave(&adapter->erp_lock, flags); |
442 | _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); | 436 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); |
443 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 437 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
444 | } | 438 | } |
445 | 439 | ||
@@ -448,13 +442,11 @@ void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, | |||
448 | * @sdev: SCSI device / LUN to shut down. | 442 | * @sdev: SCSI device / LUN to shut down. |
449 | * @clear: Status flags to clear. | 443 | * @clear: Status flags to clear. |
450 | * @id: Id for debug trace event. | 444 | * @id: Id for debug trace event. |
451 | * @ref: Reference for debug trace event. | ||
452 | */ | 445 | */ |
453 | void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id, | 446 | void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id) |
454 | void *ref) | ||
455 | { | 447 | { |
456 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; | 448 | int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; |
457 | zfcp_erp_lun_reopen(sdev, clear | flags, id, ref); | 449 | zfcp_erp_lun_reopen(sdev, clear | flags, id); |
458 | } | 450 | } |
459 | 451 | ||
460 | /** | 452 | /** |
@@ -476,7 +468,7 @@ void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) | |||
476 | int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; | 468 | int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; |
477 | 469 | ||
478 | write_lock_irqsave(&adapter->erp_lock, flags); | 470 | write_lock_irqsave(&adapter->erp_lock, flags); |
479 | _zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF); | 471 | _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF); |
480 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 472 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
481 | 473 | ||
482 | zfcp_erp_wait(adapter); | 474 | zfcp_erp_wait(adapter); |
@@ -490,14 +482,14 @@ static int status_change_set(unsigned long mask, atomic_t *status) | |||
490 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | 482 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) |
491 | { | 483 | { |
492 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) | 484 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status)) |
493 | zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf); | 485 | zfcp_dbf_rec_run("eraubl1", &adapter->erp_action); |
494 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); | 486 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); |
495 | } | 487 | } |
496 | 488 | ||
497 | static void zfcp_erp_port_unblock(struct zfcp_port *port) | 489 | static void zfcp_erp_port_unblock(struct zfcp_port *port) |
498 | { | 490 | { |
499 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) | 491 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status)) |
500 | zfcp_dbf_rec_port("erpubl1", NULL, port); | 492 | zfcp_dbf_rec_run("erpubl1", &port->erp_action); |
501 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); | 493 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status); |
502 | } | 494 | } |
503 | 495 | ||
@@ -506,14 +498,14 @@ static void zfcp_erp_lun_unblock(struct scsi_device *sdev) | |||
506 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 498 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
507 | 499 | ||
508 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) | 500 | if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) |
509 | zfcp_dbf_rec_lun("erlubl1", NULL, sdev); | 501 | zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); |
510 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); | 502 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); |
511 | } | 503 | } |
512 | 504 | ||
513 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) | 505 | static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action) |
514 | { | 506 | { |
515 | list_move(&erp_action->list, &erp_action->adapter->erp_running_head); | 507 | list_move(&erp_action->list, &erp_action->adapter->erp_running_head); |
516 | zfcp_dbf_rec_action("erator1", erp_action); | 508 | zfcp_dbf_rec_run("erator1", erp_action); |
517 | } | 509 | } |
518 | 510 | ||
519 | static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) | 511 | static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) |
@@ -530,11 +522,11 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act) | |||
530 | if (act->status & (ZFCP_STATUS_ERP_DISMISSED | | 522 | if (act->status & (ZFCP_STATUS_ERP_DISMISSED | |
531 | ZFCP_STATUS_ERP_TIMEDOUT)) { | 523 | ZFCP_STATUS_ERP_TIMEDOUT)) { |
532 | req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | 524 | req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; |
533 | zfcp_dbf_rec_action("erscf_1", act); | 525 | zfcp_dbf_rec_run("erscf_1", act); |
534 | req->erp_action = NULL; | 526 | req->erp_action = NULL; |
535 | } | 527 | } |
536 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) | 528 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) |
537 | zfcp_dbf_rec_action("erscf_2", act); | 529 | zfcp_dbf_rec_run("erscf_2", act); |
538 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) | 530 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) |
539 | act->fsf_req_id = 0; | 531 | act->fsf_req_id = 0; |
540 | } else | 532 | } else |
@@ -585,40 +577,40 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action) | |||
585 | } | 577 | } |
586 | 578 | ||
587 | static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, | 579 | static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, |
588 | int clear, char *id, void *ref) | 580 | int clear, char *id) |
589 | { | 581 | { |
590 | struct zfcp_port *port; | 582 | struct zfcp_port *port; |
591 | 583 | ||
592 | read_lock(&adapter->port_list_lock); | 584 | read_lock(&adapter->port_list_lock); |
593 | list_for_each_entry(port, &adapter->port_list, list) | 585 | list_for_each_entry(port, &adapter->port_list, list) |
594 | _zfcp_erp_port_reopen(port, clear, id, ref); | 586 | _zfcp_erp_port_reopen(port, clear, id); |
595 | read_unlock(&adapter->port_list_lock); | 587 | read_unlock(&adapter->port_list_lock); |
596 | } | 588 | } |
597 | 589 | ||
598 | static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, | 590 | static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, |
599 | char *id, void *ref) | 591 | char *id) |
600 | { | 592 | { |
601 | struct scsi_device *sdev; | 593 | struct scsi_device *sdev; |
602 | 594 | ||
603 | shost_for_each_device(sdev, port->adapter->scsi_host) | 595 | shost_for_each_device(sdev, port->adapter->scsi_host) |
604 | if (sdev_to_zfcp(sdev)->port == port) | 596 | if (sdev_to_zfcp(sdev)->port == port) |
605 | _zfcp_erp_lun_reopen(sdev, clear, id, ref, 0); | 597 | _zfcp_erp_lun_reopen(sdev, clear, id, 0); |
606 | } | 598 | } |
607 | 599 | ||
608 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) | 600 | static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) |
609 | { | 601 | { |
610 | switch (act->action) { | 602 | switch (act->action) { |
611 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 603 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
612 | _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1", NULL); | 604 | _zfcp_erp_adapter_reopen(act->adapter, 0, "ersff_1"); |
613 | break; | 605 | break; |
614 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 606 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
615 | _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2", NULL); | 607 | _zfcp_erp_port_forced_reopen(act->port, 0, "ersff_2"); |
616 | break; | 608 | break; |
617 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 609 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
618 | _zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL); | 610 | _zfcp_erp_port_reopen(act->port, 0, "ersff_3"); |
619 | break; | 611 | break; |
620 | case ZFCP_ERP_ACTION_REOPEN_LUN: | 612 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
621 | _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0); | 613 | _zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", 0); |
622 | break; | 614 | break; |
623 | } | 615 | } |
624 | } | 616 | } |
@@ -627,13 +619,13 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act) | |||
627 | { | 619 | { |
628 | switch (act->action) { | 620 | switch (act->action) { |
629 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 621 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
630 | _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1", NULL); | 622 | _zfcp_erp_port_reopen_all(act->adapter, 0, "ersfs_1"); |
631 | break; | 623 | break; |
632 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: | 624 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
633 | _zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL); | 625 | _zfcp_erp_port_reopen(act->port, 0, "ersfs_2"); |
634 | break; | 626 | break; |
635 | case ZFCP_ERP_ACTION_REOPEN_PORT: | 627 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
636 | _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL); | 628 | _zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3"); |
637 | break; | 629 | break; |
638 | } | 630 | } |
639 | } | 631 | } |
@@ -652,17 +644,6 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter) | |||
652 | read_unlock_irqrestore(&adapter->erp_lock, flags); | 644 | read_unlock_irqrestore(&adapter->erp_lock, flags); |
653 | } | 645 | } |
654 | 646 | ||
655 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) | ||
656 | { | ||
657 | struct zfcp_qdio *qdio = act->adapter->qdio; | ||
658 | |||
659 | if (zfcp_qdio_open(qdio)) | ||
660 | return ZFCP_ERP_FAILED; | ||
661 | init_waitqueue_head(&qdio->req_q_wq); | ||
662 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status); | ||
663 | return ZFCP_ERP_SUCCEEDED; | ||
664 | } | ||
665 | |||
666 | static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) | 647 | static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) |
667 | { | 648 | { |
668 | struct zfcp_port *port; | 649 | struct zfcp_port *port; |
@@ -670,7 +651,7 @@ static void zfcp_erp_enqueue_ptp_port(struct zfcp_adapter *adapter) | |||
670 | adapter->peer_d_id); | 651 | adapter->peer_d_id); |
671 | if (IS_ERR(port)) /* error or port already attached */ | 652 | if (IS_ERR(port)) /* error or port already attached */ |
672 | return; | 653 | return; |
673 | _zfcp_erp_port_reopen(port, 0, "ereptp1", NULL); | 654 | _zfcp_erp_port_reopen(port, 0, "ereptp1"); |
674 | } | 655 | } |
675 | 656 | ||
676 | static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) | 657 | static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) |
@@ -693,10 +674,8 @@ static int zfcp_erp_adapter_strat_fsf_xconf(struct zfcp_erp_action *erp_action) | |||
693 | return ZFCP_ERP_FAILED; | 674 | return ZFCP_ERP_FAILED; |
694 | } | 675 | } |
695 | 676 | ||
696 | zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf); | ||
697 | wait_event(adapter->erp_ready_wq, | 677 | wait_event(adapter->erp_ready_wq, |
698 | !list_empty(&adapter->erp_ready_head)); | 678 | !list_empty(&adapter->erp_ready_head)); |
699 | zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf); | ||
700 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) | 679 | if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT) |
701 | break; | 680 | break; |
702 | 681 | ||
@@ -735,10 +714,10 @@ static int zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *act) | |||
735 | if (ret) | 714 | if (ret) |
736 | return ZFCP_ERP_FAILED; | 715 | return ZFCP_ERP_FAILED; |
737 | 716 | ||
738 | zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf); | 717 | zfcp_dbf_rec_run("erasox1", act); |
739 | wait_event(adapter->erp_ready_wq, | 718 | wait_event(adapter->erp_ready_wq, |
740 | !list_empty(&adapter->erp_ready_head)); | 719 | !list_empty(&adapter->erp_ready_head)); |
741 | zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf); | 720 | zfcp_dbf_rec_run("erasox2", act); |
742 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) | 721 | if (act->status & ZFCP_STATUS_ERP_TIMEDOUT) |
743 | return ZFCP_ERP_FAILED; | 722 | return ZFCP_ERP_FAILED; |
744 | 723 | ||
@@ -788,7 +767,7 @@ static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *act) | |||
788 | { | 767 | { |
789 | struct zfcp_adapter *adapter = act->adapter; | 768 | struct zfcp_adapter *adapter = act->adapter; |
790 | 769 | ||
791 | if (zfcp_erp_adapter_strategy_open_qdio(act)) { | 770 | if (zfcp_qdio_open(adapter->qdio)) { |
792 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | | 771 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK | |
793 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, | 772 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, |
794 | &adapter->status); | 773 | &adapter->status); |
@@ -1166,7 +1145,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) | |||
1166 | if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { | 1145 | if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { |
1167 | _zfcp_erp_adapter_reopen(adapter, | 1146 | _zfcp_erp_adapter_reopen(adapter, |
1168 | ZFCP_STATUS_COMMON_ERP_FAILED, | 1147 | ZFCP_STATUS_COMMON_ERP_FAILED, |
1169 | "ersscg1", NULL); | 1148 | "ersscg1"); |
1170 | return ZFCP_ERP_EXIT; | 1149 | return ZFCP_ERP_EXIT; |
1171 | } | 1150 | } |
1172 | break; | 1151 | break; |
@@ -1176,7 +1155,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) | |||
1176 | if (zfcp_erp_strat_change_det(&port->status, erp_status)) { | 1155 | if (zfcp_erp_strat_change_det(&port->status, erp_status)) { |
1177 | _zfcp_erp_port_reopen(port, | 1156 | _zfcp_erp_port_reopen(port, |
1178 | ZFCP_STATUS_COMMON_ERP_FAILED, | 1157 | ZFCP_STATUS_COMMON_ERP_FAILED, |
1179 | "ersscg2", NULL); | 1158 | "ersscg2"); |
1180 | return ZFCP_ERP_EXIT; | 1159 | return ZFCP_ERP_EXIT; |
1181 | } | 1160 | } |
1182 | break; | 1161 | break; |
@@ -1186,7 +1165,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) | |||
1186 | if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { | 1165 | if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { |
1187 | _zfcp_erp_lun_reopen(sdev, | 1166 | _zfcp_erp_lun_reopen(sdev, |
1188 | ZFCP_STATUS_COMMON_ERP_FAILED, | 1167 | ZFCP_STATUS_COMMON_ERP_FAILED, |
1189 | "ersscg3", NULL, 0); | 1168 | "ersscg3", 0); |
1190 | return ZFCP_ERP_EXIT; | 1169 | return ZFCP_ERP_EXIT; |
1191 | } | 1170 | } |
1192 | break; | 1171 | break; |
@@ -1206,7 +1185,7 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) | |||
1206 | } | 1185 | } |
1207 | 1186 | ||
1208 | list_del(&erp_action->list); | 1187 | list_del(&erp_action->list); |
1209 | zfcp_dbf_rec_action("eractd1", erp_action); | 1188 | zfcp_dbf_rec_run("eractd1", erp_action); |
1210 | 1189 | ||
1211 | switch (erp_action->action) { | 1190 | switch (erp_action->action) { |
1212 | case ZFCP_ERP_ACTION_REOPEN_LUN: | 1191 | case ZFCP_ERP_ACTION_REOPEN_LUN: |
@@ -1313,7 +1292,7 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) | |||
1313 | erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; | 1292 | erp_action->status |= ZFCP_STATUS_ERP_LOWMEM; |
1314 | } | 1293 | } |
1315 | if (adapter->erp_total_count == adapter->erp_low_mem_count) | 1294 | if (adapter->erp_total_count == adapter->erp_low_mem_count) |
1316 | _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1", NULL); | 1295 | _zfcp_erp_adapter_reopen(adapter, 0, "erstgy1"); |
1317 | else { | 1296 | else { |
1318 | zfcp_erp_strategy_memwait(erp_action); | 1297 | zfcp_erp_strategy_memwait(erp_action); |
1319 | retval = ZFCP_ERP_CONTINUES; | 1298 | retval = ZFCP_ERP_CONTINUES; |
@@ -1357,11 +1336,9 @@ static int zfcp_erp_thread(void *data) | |||
1357 | unsigned long flags; | 1336 | unsigned long flags; |
1358 | 1337 | ||
1359 | for (;;) { | 1338 | for (;;) { |
1360 | zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf); | ||
1361 | wait_event_interruptible(adapter->erp_ready_wq, | 1339 | wait_event_interruptible(adapter->erp_ready_wq, |
1362 | !list_empty(&adapter->erp_ready_head) || | 1340 | !list_empty(&adapter->erp_ready_head) || |
1363 | kthread_should_stop()); | 1341 | kthread_should_stop()); |
1364 | zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf); | ||
1365 | 1342 | ||
1366 | if (kthread_should_stop()) | 1343 | if (kthread_should_stop()) |
1367 | break; | 1344 | break; |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index bf8f3e514839..6e325284fbe7 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -45,47 +45,33 @@ extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *); | |||
45 | 45 | ||
46 | /* zfcp_dbf.c */ | 46 | /* zfcp_dbf.c */ |
47 | extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); | 47 | extern int zfcp_dbf_adapter_register(struct zfcp_adapter *); |
48 | extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *); | 48 | extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); |
49 | extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *); | 49 | extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, |
50 | extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *); | 50 | struct zfcp_port *, struct scsi_device *, u8, u8); |
51 | extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *); | 51 | extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); |
52 | extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *); | 52 | extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); |
53 | extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *); | 53 | extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); |
54 | extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *, | 54 | extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); |
55 | struct zfcp_adapter *, struct zfcp_port *, | ||
56 | struct scsi_device *); | ||
57 | extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *); | ||
58 | extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *, | ||
59 | struct zfcp_dbf *); | ||
60 | extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *, | ||
61 | struct fsf_status_read_buffer *); | ||
62 | extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); | ||
63 | extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); | 55 | extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); |
64 | extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32); | 56 | extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); |
65 | extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); | 57 | extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); |
66 | extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); | 58 | extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); |
67 | extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); | 59 | extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); |
68 | extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *); | ||
69 | extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *, | ||
70 | struct scsi_cmnd *, struct zfcp_fsf_req *, | ||
71 | unsigned long); | ||
72 | 60 | ||
73 | /* zfcp_erp.c */ | 61 | /* zfcp_erp.c */ |
74 | extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); | 62 | extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); |
75 | extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); | 63 | extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32); |
76 | extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *); | 64 | extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *); |
77 | extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *, | 65 | extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *); |
78 | void *); | ||
79 | extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); | 66 | extern void zfcp_erp_set_port_status(struct zfcp_port *, u32); |
80 | extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); | 67 | extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32); |
81 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *); | 68 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *); |
82 | extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *); | 69 | extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); |
83 | extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *, | 70 | extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); |
84 | void *); | ||
85 | extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); | 71 | extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); |
86 | extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); | 72 | extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); |
87 | extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *); | 73 | extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); |
88 | extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *); | 74 | extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *); |
89 | extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); | 75 | extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *); |
90 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); | 76 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); |
91 | extern void zfcp_erp_thread_kill(struct zfcp_adapter *); | 77 | extern void zfcp_erp_thread_kill(struct zfcp_adapter *); |
@@ -149,6 +135,8 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *, | |||
149 | extern int zfcp_qdio_open(struct zfcp_qdio *); | 135 | extern int zfcp_qdio_open(struct zfcp_qdio *); |
150 | extern void zfcp_qdio_close(struct zfcp_qdio *); | 136 | extern void zfcp_qdio_close(struct zfcp_qdio *); |
151 | extern void zfcp_qdio_siosl(struct zfcp_adapter *); | 137 | extern void zfcp_qdio_siosl(struct zfcp_adapter *); |
138 | extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *, | ||
139 | struct qdio_buffer *); | ||
152 | 140 | ||
153 | /* zfcp_scsi.c */ | 141 | /* zfcp_scsi.c */ |
154 | extern struct zfcp_data zfcp_data; | 142 | extern struct zfcp_data zfcp_data; |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 86fd905df48b..30cf91a787a3 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -174,7 +174,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, | |||
174 | if (!port->d_id) | 174 | if (!port->d_id) |
175 | zfcp_erp_port_reopen(port, | 175 | zfcp_erp_port_reopen(port, |
176 | ZFCP_STATUS_COMMON_ERP_FAILED, | 176 | ZFCP_STATUS_COMMON_ERP_FAILED, |
177 | "fcrscn1", NULL); | 177 | "fcrscn1"); |
178 | } | 178 | } |
179 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 179 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
180 | } | 180 | } |
@@ -215,7 +215,7 @@ static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) | |||
215 | read_lock_irqsave(&adapter->port_list_lock, flags); | 215 | read_lock_irqsave(&adapter->port_list_lock, flags); |
216 | list_for_each_entry(port, &adapter->port_list, list) | 216 | list_for_each_entry(port, &adapter->port_list, list) |
217 | if (port->wwpn == wwpn) { | 217 | if (port->wwpn == wwpn) { |
218 | zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req); | 218 | zfcp_erp_port_forced_reopen(port, 0, "fciwwp1"); |
219 | break; | 219 | break; |
220 | } | 220 | } |
221 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 221 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
@@ -251,7 +251,7 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) | |||
251 | (struct fsf_status_read_buffer *) fsf_req->data; | 251 | (struct fsf_status_read_buffer *) fsf_req->data; |
252 | unsigned int els_type = status_buffer->payload.data[0]; | 252 | unsigned int els_type = status_buffer->payload.data[0]; |
253 | 253 | ||
254 | zfcp_dbf_san_incoming_els(fsf_req); | 254 | zfcp_dbf_san_in_els("fciels1", fsf_req); |
255 | if (els_type == ELS_PLOGI) | 255 | if (els_type == ELS_PLOGI) |
256 | zfcp_fc_incoming_plogi(fsf_req); | 256 | zfcp_fc_incoming_plogi(fsf_req); |
257 | else if (els_type == ELS_LOGO) | 257 | else if (els_type == ELS_LOGO) |
@@ -360,7 +360,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) | |||
360 | ret = zfcp_fc_ns_gid_pn(port); | 360 | ret = zfcp_fc_ns_gid_pn(port); |
361 | if (ret) { | 361 | if (ret) { |
362 | /* could not issue gid_pn for some reason */ | 362 | /* could not issue gid_pn for some reason */ |
363 | zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL); | 363 | zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1"); |
364 | goto out; | 364 | goto out; |
365 | } | 365 | } |
366 | 366 | ||
@@ -369,7 +369,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work) | |||
369 | goto out; | 369 | goto out; |
370 | } | 370 | } |
371 | 371 | ||
372 | zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); | 372 | zfcp_erp_port_reopen(port, 0, "fcgpn_3"); |
373 | out: | 373 | out: |
374 | put_device(&port->dev); | 374 | put_device(&port->dev); |
375 | } | 375 | } |
@@ -426,7 +426,7 @@ static void zfcp_fc_adisc_handler(void *data) | |||
426 | if (adisc->els.status) { | 426 | if (adisc->els.status) { |
427 | /* request rejected or timed out */ | 427 | /* request rejected or timed out */ |
428 | zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, | 428 | zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, |
429 | "fcadh_1", NULL); | 429 | "fcadh_1"); |
430 | goto out; | 430 | goto out; |
431 | } | 431 | } |
432 | 432 | ||
@@ -436,7 +436,7 @@ static void zfcp_fc_adisc_handler(void *data) | |||
436 | if ((port->wwpn != adisc_resp->adisc_wwpn) || | 436 | if ((port->wwpn != adisc_resp->adisc_wwpn) || |
437 | !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { | 437 | !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { |
438 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, | 438 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, |
439 | "fcadh_2", NULL); | 439 | "fcadh_2"); |
440 | goto out; | 440 | goto out; |
441 | } | 441 | } |
442 | 442 | ||
@@ -507,7 +507,7 @@ void zfcp_fc_link_test_work(struct work_struct *work) | |||
507 | 507 | ||
508 | /* send of ADISC was not possible */ | 508 | /* send of ADISC was not possible */ |
509 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); | 509 | atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); |
510 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); | 510 | zfcp_erp_port_forced_reopen(port, 0, "fcltwk1"); |
511 | 511 | ||
512 | out: | 512 | out: |
513 | put_device(&port->dev); | 513 | put_device(&port->dev); |
@@ -659,7 +659,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, | |||
659 | port = zfcp_port_enqueue(adapter, acc->fp_wwpn, | 659 | port = zfcp_port_enqueue(adapter, acc->fp_wwpn, |
660 | ZFCP_STATUS_COMMON_NOESC, d_id); | 660 | ZFCP_STATUS_COMMON_NOESC, d_id); |
661 | if (!IS_ERR(port)) | 661 | if (!IS_ERR(port)) |
662 | zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); | 662 | zfcp_erp_port_reopen(port, 0, "fcegpf1"); |
663 | else if (PTR_ERR(port) != -EEXIST) | 663 | else if (PTR_ERR(port) != -EEXIST) |
664 | ret = PTR_ERR(port); | 664 | ret = PTR_ERR(port); |
665 | } | 665 | } |
@@ -671,7 +671,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, | |||
671 | write_unlock_irqrestore(&adapter->port_list_lock, flags); | 671 | write_unlock_irqrestore(&adapter->port_list_lock, flags); |
672 | 672 | ||
673 | list_for_each_entry_safe(port, tmp, &remove_lh, list) { | 673 | list_for_each_entry_safe(port, tmp, &remove_lh, list) { |
674 | zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL); | 674 | zfcp_erp_port_shutdown(port, 0, "fcegpf2"); |
675 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); | 675 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); |
676 | } | 676 | } |
677 | 677 | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 2eb7dd56ab80..60ff9d172c79 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -23,7 +23,7 @@ static void zfcp_fsf_request_timeout_handler(unsigned long data) | |||
23 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; | 23 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; |
24 | zfcp_qdio_siosl(adapter); | 24 | zfcp_qdio_siosl(adapter); |
25 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 25 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
26 | "fsrth_1", NULL); | 26 | "fsrth_1"); |
27 | } | 27 | } |
28 | 28 | ||
29 | static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, | 29 | static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, |
@@ -65,7 +65,7 @@ static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) | |||
65 | { | 65 | { |
66 | dev_err(&req->adapter->ccw_device->dev, "FCP device not " | 66 | dev_err(&req->adapter->ccw_device->dev, "FCP device not " |
67 | "operational because of an unsupported FC class\n"); | 67 | "operational because of an unsupported FC class\n"); |
68 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req); | 68 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); |
69 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 69 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
70 | } | 70 | } |
71 | 71 | ||
@@ -98,7 +98,7 @@ static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) | |||
98 | read_lock_irqsave(&adapter->port_list_lock, flags); | 98 | read_lock_irqsave(&adapter->port_list_lock, flags); |
99 | list_for_each_entry(port, &adapter->port_list, list) | 99 | list_for_each_entry(port, &adapter->port_list, list) |
100 | if (port->d_id == d_id) { | 100 | if (port->d_id == d_id) { |
101 | zfcp_erp_port_reopen(port, 0, "fssrpc1", req); | 101 | zfcp_erp_port_reopen(port, 0, "fssrpc1"); |
102 | break; | 102 | break; |
103 | } | 103 | } |
104 | read_unlock_irqrestore(&adapter->port_list_lock, flags); | 104 | read_unlock_irqrestore(&adapter->port_list_lock, flags); |
@@ -211,13 +211,13 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
211 | struct fsf_status_read_buffer *sr_buf = req->data; | 211 | struct fsf_status_read_buffer *sr_buf = req->data; |
212 | 212 | ||
213 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { | 213 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { |
214 | zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf); | 214 | zfcp_dbf_hba_fsf_uss("fssrh_1", req); |
215 | mempool_free(sr_buf, adapter->pool.status_read_data); | 215 | mempool_free(sr_buf, adapter->pool.status_read_data); |
216 | zfcp_fsf_req_free(req); | 216 | zfcp_fsf_req_free(req); |
217 | return; | 217 | return; |
218 | } | 218 | } |
219 | 219 | ||
220 | zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf); | 220 | zfcp_dbf_hba_fsf_uss("fssrh_2", req); |
221 | 221 | ||
222 | switch (sr_buf->status_type) { | 222 | switch (sr_buf->status_type) { |
223 | case FSF_STATUS_READ_PORT_CLOSED: | 223 | case FSF_STATUS_READ_PORT_CLOSED: |
@@ -232,7 +232,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
232 | dev_warn(&adapter->ccw_device->dev, | 232 | dev_warn(&adapter->ccw_device->dev, |
233 | "The error threshold for checksum statistics " | 233 | "The error threshold for checksum statistics " |
234 | "has been exceeded\n"); | 234 | "has been exceeded\n"); |
235 | zfcp_dbf_hba_berr(adapter->dbf, req); | 235 | zfcp_dbf_hba_bit_err("fssrh_3", req); |
236 | break; | 236 | break; |
237 | case FSF_STATUS_READ_LINK_DOWN: | 237 | case FSF_STATUS_READ_LINK_DOWN: |
238 | zfcp_fsf_status_read_link_down(req); | 238 | zfcp_fsf_status_read_link_down(req); |
@@ -247,7 +247,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
247 | zfcp_erp_adapter_reopen(adapter, | 247 | zfcp_erp_adapter_reopen(adapter, |
248 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 248 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
249 | ZFCP_STATUS_COMMON_ERP_FAILED, | 249 | ZFCP_STATUS_COMMON_ERP_FAILED, |
250 | "fssrh_2", req); | 250 | "fssrh_2"); |
251 | zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); | 251 | zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); |
252 | 252 | ||
253 | break; | 253 | break; |
@@ -287,7 +287,7 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) | |||
287 | "The FCP adapter reported a problem " | 287 | "The FCP adapter reported a problem " |
288 | "that cannot be recovered\n"); | 288 | "that cannot be recovered\n"); |
289 | zfcp_qdio_siosl(req->adapter); | 289 | zfcp_qdio_siosl(req->adapter); |
290 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req); | 290 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); |
291 | break; | 291 | break; |
292 | } | 292 | } |
293 | /* all non-return stats set FSFREQ_ERROR*/ | 293 | /* all non-return stats set FSFREQ_ERROR*/ |
@@ -304,7 +304,7 @@ static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) | |||
304 | dev_err(&req->adapter->ccw_device->dev, | 304 | dev_err(&req->adapter->ccw_device->dev, |
305 | "The FCP adapter does not recognize the command 0x%x\n", | 305 | "The FCP adapter does not recognize the command 0x%x\n", |
306 | req->qtcb->header.fsf_command); | 306 | req->qtcb->header.fsf_command); |
307 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req); | 307 | zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); |
308 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 308 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
309 | break; | 309 | break; |
310 | case FSF_ADAPTER_STATUS_AVAILABLE: | 310 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -335,17 +335,17 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
335 | "QTCB version 0x%x not supported by FCP adapter " | 335 | "QTCB version 0x%x not supported by FCP adapter " |
336 | "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, | 336 | "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, |
337 | psq->word[0], psq->word[1]); | 337 | psq->word[0], psq->word[1]); |
338 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req); | 338 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); |
339 | break; | 339 | break; |
340 | case FSF_PROT_ERROR_STATE: | 340 | case FSF_PROT_ERROR_STATE: |
341 | case FSF_PROT_SEQ_NUMB_ERROR: | 341 | case FSF_PROT_SEQ_NUMB_ERROR: |
342 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); | 342 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); |
343 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 343 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
344 | break; | 344 | break; |
345 | case FSF_PROT_UNSUPP_QTCB_TYPE: | 345 | case FSF_PROT_UNSUPP_QTCB_TYPE: |
346 | dev_err(&adapter->ccw_device->dev, | 346 | dev_err(&adapter->ccw_device->dev, |
347 | "The QTCB type is not supported by the FCP adapter\n"); | 347 | "The QTCB type is not supported by the FCP adapter\n"); |
348 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req); | 348 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); |
349 | break; | 349 | break; |
350 | case FSF_PROT_HOST_CONNECTION_INITIALIZING: | 350 | case FSF_PROT_HOST_CONNECTION_INITIALIZING: |
351 | atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, | 351 | atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, |
@@ -355,12 +355,12 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
355 | dev_err(&adapter->ccw_device->dev, | 355 | dev_err(&adapter->ccw_device->dev, |
356 | "0x%Lx is an ambiguous request identifier\n", | 356 | "0x%Lx is an ambiguous request identifier\n", |
357 | (unsigned long long)qtcb->bottom.support.req_handle); | 357 | (unsigned long long)qtcb->bottom.support.req_handle); |
358 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req); | 358 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); |
359 | break; | 359 | break; |
360 | case FSF_PROT_LINK_DOWN: | 360 | case FSF_PROT_LINK_DOWN: |
361 | zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); | 361 | zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); |
362 | /* go through reopen to flush pending requests */ | 362 | /* go through reopen to flush pending requests */ |
363 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req); | 363 | zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); |
364 | break; | 364 | break; |
365 | case FSF_PROT_REEST_QUEUE: | 365 | case FSF_PROT_REEST_QUEUE: |
366 | /* All ports should be marked as ready to run again */ | 366 | /* All ports should be marked as ready to run again */ |
@@ -369,14 +369,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) | |||
369 | zfcp_erp_adapter_reopen(adapter, | 369 | zfcp_erp_adapter_reopen(adapter, |
370 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 370 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
371 | ZFCP_STATUS_COMMON_ERP_FAILED, | 371 | ZFCP_STATUS_COMMON_ERP_FAILED, |
372 | "fspse_8", req); | 372 | "fspse_8"); |
373 | break; | 373 | break; |
374 | default: | 374 | default: |
375 | dev_err(&adapter->ccw_device->dev, | 375 | dev_err(&adapter->ccw_device->dev, |
376 | "0x%x is not a valid transfer protocol status\n", | 376 | "0x%x is not a valid transfer protocol status\n", |
377 | qtcb->prefix.prot_status); | 377 | qtcb->prefix.prot_status); |
378 | zfcp_qdio_siosl(adapter); | 378 | zfcp_qdio_siosl(adapter); |
379 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req); | 379 | zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); |
380 | } | 380 | } |
381 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 381 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
382 | } | 382 | } |
@@ -482,7 +482,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) | |||
482 | dev_err(&adapter->ccw_device->dev, | 482 | dev_err(&adapter->ccw_device->dev, |
483 | "Unknown or unsupported arbitrated loop " | 483 | "Unknown or unsupported arbitrated loop " |
484 | "fibre channel topology detected\n"); | 484 | "fibre channel topology detected\n"); |
485 | zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req); | 485 | zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); |
486 | return -EIO; | 486 | return -EIO; |
487 | } | 487 | } |
488 | 488 | ||
@@ -518,7 +518,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) | |||
518 | "FCP adapter maximum QTCB size (%d bytes) " | 518 | "FCP adapter maximum QTCB size (%d bytes) " |
519 | "is too small\n", | 519 | "is too small\n", |
520 | bottom->max_qtcb_size); | 520 | bottom->max_qtcb_size); |
521 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req); | 521 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); |
522 | return; | 522 | return; |
523 | } | 523 | } |
524 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, | 524 | atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, |
@@ -536,7 +536,7 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) | |||
536 | &qtcb->header.fsf_status_qual.link_down_info); | 536 | &qtcb->header.fsf_status_qual.link_down_info); |
537 | break; | 537 | break; |
538 | default: | 538 | default: |
539 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req); | 539 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); |
540 | return; | 540 | return; |
541 | } | 541 | } |
542 | 542 | ||
@@ -552,14 +552,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) | |||
552 | dev_err(&adapter->ccw_device->dev, | 552 | dev_err(&adapter->ccw_device->dev, |
553 | "The FCP adapter only supports newer " | 553 | "The FCP adapter only supports newer " |
554 | "control block versions\n"); | 554 | "control block versions\n"); |
555 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req); | 555 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); |
556 | return; | 556 | return; |
557 | } | 557 | } |
558 | if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { | 558 | if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { |
559 | dev_err(&adapter->ccw_device->dev, | 559 | dev_err(&adapter->ccw_device->dev, |
560 | "The FCP adapter only supports older " | 560 | "The FCP adapter only supports older " |
561 | "control block versions\n"); | 561 | "control block versions\n"); |
562 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req); | 562 | zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); |
563 | } | 563 | } |
564 | } | 564 | } |
565 | 565 | ||
@@ -700,7 +700,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) | |||
700 | del_timer(&req->timer); | 700 | del_timer(&req->timer); |
701 | /* lookup request again, list might have changed */ | 701 | /* lookup request again, list might have changed */ |
702 | zfcp_reqlist_find_rm(adapter->req_list, req_id); | 702 | zfcp_reqlist_find_rm(adapter->req_list, req_id); |
703 | zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req); | 703 | zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); |
704 | return -EIO; | 704 | return -EIO; |
705 | } | 705 | } |
706 | 706 | ||
@@ -754,10 +754,11 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
754 | goto out; | 754 | goto out; |
755 | 755 | ||
756 | failed_req_send: | 756 | failed_req_send: |
757 | req->data = NULL; | ||
757 | mempool_free(sr_buf, adapter->pool.status_read_data); | 758 | mempool_free(sr_buf, adapter->pool.status_read_data); |
758 | failed_buf: | 759 | failed_buf: |
760 | zfcp_dbf_hba_fsf_uss("fssr__1", req); | ||
759 | zfcp_fsf_req_free(req); | 761 | zfcp_fsf_req_free(req); |
760 | zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL); | ||
761 | out: | 762 | out: |
762 | spin_unlock_irq(&qdio->req_q_lock); | 763 | spin_unlock_irq(&qdio->req_q_lock); |
763 | return retval; | 764 | return retval; |
@@ -776,14 +777,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | |||
776 | case FSF_PORT_HANDLE_NOT_VALID: | 777 | case FSF_PORT_HANDLE_NOT_VALID: |
777 | if (fsq->word[0] == fsq->word[1]) { | 778 | if (fsq->word[0] == fsq->word[1]) { |
778 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, | 779 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, |
779 | "fsafch1", req); | 780 | "fsafch1"); |
780 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 781 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
781 | } | 782 | } |
782 | break; | 783 | break; |
783 | case FSF_LUN_HANDLE_NOT_VALID: | 784 | case FSF_LUN_HANDLE_NOT_VALID: |
784 | if (fsq->word[0] == fsq->word[1]) { | 785 | if (fsq->word[0] == fsq->word[1]) { |
785 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2", | 786 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); |
786 | req); | ||
787 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 787 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
788 | } | 788 | } |
789 | break; | 789 | break; |
@@ -794,14 +794,13 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) | |||
794 | zfcp_erp_set_port_status(zfcp_sdev->port, | 794 | zfcp_erp_set_port_status(zfcp_sdev->port, |
795 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | 795 | ZFCP_STATUS_COMMON_ACCESS_BOXED); |
796 | zfcp_erp_port_reopen(zfcp_sdev->port, | 796 | zfcp_erp_port_reopen(zfcp_sdev->port, |
797 | ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3", | 797 | ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); |
798 | req); | ||
799 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 798 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
800 | break; | 799 | break; |
801 | case FSF_LUN_BOXED: | 800 | case FSF_LUN_BOXED: |
802 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); | 801 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); |
803 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, | 802 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, |
804 | "fsafch4", req); | 803 | "fsafch4"); |
805 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 804 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
806 | break; | 805 | break; |
807 | case FSF_ADAPTER_STATUS_AVAILABLE: | 806 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -882,7 +881,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) | |||
882 | 881 | ||
883 | switch (header->fsf_status) { | 882 | switch (header->fsf_status) { |
884 | case FSF_GOOD: | 883 | case FSF_GOOD: |
885 | zfcp_dbf_san_ct_response(req); | 884 | zfcp_dbf_san_res("fsscth1", req); |
886 | ct->status = 0; | 885 | ct->status = 0; |
887 | break; | 886 | break; |
888 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | 887 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
@@ -902,7 +901,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) | |||
902 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 901 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
903 | break; | 902 | break; |
904 | case FSF_PORT_HANDLE_NOT_VALID: | 903 | case FSF_PORT_HANDLE_NOT_VALID: |
905 | zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); | 904 | zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); |
906 | /* fall through */ | 905 | /* fall through */ |
907 | case FSF_GENERIC_COMMAND_REJECTED: | 906 | case FSF_GENERIC_COMMAND_REJECTED: |
908 | case FSF_PAYLOAD_SIZE_MISMATCH: | 907 | case FSF_PAYLOAD_SIZE_MISMATCH: |
@@ -1025,7 +1024,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, | |||
1025 | req->qtcb->header.port_handle = wka_port->handle; | 1024 | req->qtcb->header.port_handle = wka_port->handle; |
1026 | req->data = ct; | 1025 | req->data = ct; |
1027 | 1026 | ||
1028 | zfcp_dbf_san_ct_request(req, wka_port->d_id); | 1027 | zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); |
1029 | 1028 | ||
1030 | ret = zfcp_fsf_req_send(req); | 1029 | ret = zfcp_fsf_req_send(req); |
1031 | if (ret) | 1030 | if (ret) |
@@ -1053,7 +1052,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) | |||
1053 | 1052 | ||
1054 | switch (header->fsf_status) { | 1053 | switch (header->fsf_status) { |
1055 | case FSF_GOOD: | 1054 | case FSF_GOOD: |
1056 | zfcp_dbf_san_els_response(req); | 1055 | zfcp_dbf_san_res("fsselh1", req); |
1057 | send_els->status = 0; | 1056 | send_els->status = 0; |
1058 | break; | 1057 | break; |
1059 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | 1058 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
@@ -1127,7 +1126,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, | |||
1127 | req->handler = zfcp_fsf_send_els_handler; | 1126 | req->handler = zfcp_fsf_send_els_handler; |
1128 | req->data = els; | 1127 | req->data = els; |
1129 | 1128 | ||
1130 | zfcp_dbf_san_els_request(req); | 1129 | zfcp_dbf_san_req("fssels1", req, d_id); |
1131 | 1130 | ||
1132 | ret = zfcp_fsf_req_send(req); | 1131 | ret = zfcp_fsf_req_send(req); |
1133 | if (ret) | 1132 | if (ret) |
@@ -1448,7 +1447,7 @@ static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) | |||
1448 | 1447 | ||
1449 | switch (req->qtcb->header.fsf_status) { | 1448 | switch (req->qtcb->header.fsf_status) { |
1450 | case FSF_PORT_HANDLE_NOT_VALID: | 1449 | case FSF_PORT_HANDLE_NOT_VALID: |
1451 | zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req); | 1450 | zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); |
1452 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1451 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1453 | break; | 1452 | break; |
1454 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1453 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -1580,7 +1579,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) | |||
1580 | 1579 | ||
1581 | if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { | 1580 | if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { |
1582 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1581 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1583 | zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); | 1582 | zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); |
1584 | } | 1583 | } |
1585 | 1584 | ||
1586 | wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; | 1585 | wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; |
@@ -1638,7 +1637,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1638 | 1637 | ||
1639 | switch (header->fsf_status) { | 1638 | switch (header->fsf_status) { |
1640 | case FSF_PORT_HANDLE_NOT_VALID: | 1639 | case FSF_PORT_HANDLE_NOT_VALID: |
1641 | zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req); | 1640 | zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); |
1642 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1641 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1643 | break; | 1642 | break; |
1644 | case FSF_ACCESS_DENIED: | 1643 | case FSF_ACCESS_DENIED: |
@@ -1654,7 +1653,7 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) | |||
1654 | &sdev_to_zfcp(sdev)->status); | 1653 | &sdev_to_zfcp(sdev)->status); |
1655 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); | 1654 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); |
1656 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, | 1655 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, |
1657 | "fscpph2", req); | 1656 | "fscpph2"); |
1658 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1657 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1659 | break; | 1658 | break; |
1660 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1659 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -1743,7 +1742,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) | |||
1743 | switch (header->fsf_status) { | 1742 | switch (header->fsf_status) { |
1744 | 1743 | ||
1745 | case FSF_PORT_HANDLE_NOT_VALID: | 1744 | case FSF_PORT_HANDLE_NOT_VALID: |
1746 | zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1", req); | 1745 | zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); |
1747 | /* fall through */ | 1746 | /* fall through */ |
1748 | case FSF_LUN_ALREADY_OPEN: | 1747 | case FSF_LUN_ALREADY_OPEN: |
1749 | break; | 1748 | break; |
@@ -1755,8 +1754,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) | |||
1755 | zfcp_erp_set_port_status(zfcp_sdev->port, | 1754 | zfcp_erp_set_port_status(zfcp_sdev->port, |
1756 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | 1755 | ZFCP_STATUS_COMMON_ACCESS_BOXED); |
1757 | zfcp_erp_port_reopen(zfcp_sdev->port, | 1756 | zfcp_erp_port_reopen(zfcp_sdev->port, |
1758 | ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2", | 1757 | ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); |
1759 | req); | ||
1760 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1758 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1761 | break; | 1759 | break; |
1762 | case FSF_LUN_SHARING_VIOLATION: | 1760 | case FSF_LUN_SHARING_VIOLATION: |
@@ -1852,20 +1850,18 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) | |||
1852 | 1850 | ||
1853 | switch (req->qtcb->header.fsf_status) { | 1851 | switch (req->qtcb->header.fsf_status) { |
1854 | case FSF_PORT_HANDLE_NOT_VALID: | 1852 | case FSF_PORT_HANDLE_NOT_VALID: |
1855 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1", | 1853 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); |
1856 | req); | ||
1857 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1854 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1858 | break; | 1855 | break; |
1859 | case FSF_LUN_HANDLE_NOT_VALID: | 1856 | case FSF_LUN_HANDLE_NOT_VALID: |
1860 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2", req); | 1857 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); |
1861 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1858 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1862 | break; | 1859 | break; |
1863 | case FSF_PORT_BOXED: | 1860 | case FSF_PORT_BOXED: |
1864 | zfcp_erp_set_port_status(zfcp_sdev->port, | 1861 | zfcp_erp_set_port_status(zfcp_sdev->port, |
1865 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | 1862 | ZFCP_STATUS_COMMON_ACCESS_BOXED); |
1866 | zfcp_erp_port_reopen(zfcp_sdev->port, | 1863 | zfcp_erp_port_reopen(zfcp_sdev->port, |
1867 | ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3", | 1864 | ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); |
1868 | req); | ||
1869 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 1865 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
1870 | break; | 1866 | break; |
1871 | case FSF_ADAPTER_STATUS_AVAILABLE: | 1867 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -2002,13 +1998,12 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) | |||
2002 | switch (header->fsf_status) { | 1998 | switch (header->fsf_status) { |
2003 | case FSF_HANDLE_MISMATCH: | 1999 | case FSF_HANDLE_MISMATCH: |
2004 | case FSF_PORT_HANDLE_NOT_VALID: | 2000 | case FSF_PORT_HANDLE_NOT_VALID: |
2005 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1", | 2001 | zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1"); |
2006 | req); | ||
2007 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2002 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2008 | break; | 2003 | break; |
2009 | case FSF_FCPLUN_NOT_VALID: | 2004 | case FSF_FCPLUN_NOT_VALID: |
2010 | case FSF_LUN_HANDLE_NOT_VALID: | 2005 | case FSF_LUN_HANDLE_NOT_VALID: |
2011 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2", req); | 2006 | zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); |
2012 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2007 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2013 | break; | 2008 | break; |
2014 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: | 2009 | case FSF_SERVICE_CLASS_NOT_SUPPORTED: |
@@ -2026,7 +2021,7 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) | |||
2026 | (unsigned long long)zfcp_scsi_dev_lun(sdev), | 2021 | (unsigned long long)zfcp_scsi_dev_lun(sdev), |
2027 | (unsigned long long)zfcp_sdev->port->wwpn); | 2022 | (unsigned long long)zfcp_sdev->port->wwpn); |
2028 | zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, | 2023 | zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, |
2029 | "fssfch3", req); | 2024 | "fssfch3"); |
2030 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2025 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2031 | break; | 2026 | break; |
2032 | case FSF_CMND_LENGTH_NOT_VALID: | 2027 | case FSF_CMND_LENGTH_NOT_VALID: |
@@ -2037,21 +2032,20 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) | |||
2037 | (unsigned long long)zfcp_scsi_dev_lun(sdev), | 2032 | (unsigned long long)zfcp_scsi_dev_lun(sdev), |
2038 | (unsigned long long)zfcp_sdev->port->wwpn); | 2033 | (unsigned long long)zfcp_sdev->port->wwpn); |
2039 | zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, | 2034 | zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, |
2040 | "fssfch4", req); | 2035 | "fssfch4"); |
2041 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2036 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2042 | break; | 2037 | break; |
2043 | case FSF_PORT_BOXED: | 2038 | case FSF_PORT_BOXED: |
2044 | zfcp_erp_set_port_status(zfcp_sdev->port, | 2039 | zfcp_erp_set_port_status(zfcp_sdev->port, |
2045 | ZFCP_STATUS_COMMON_ACCESS_BOXED); | 2040 | ZFCP_STATUS_COMMON_ACCESS_BOXED); |
2046 | zfcp_erp_port_reopen(zfcp_sdev->port, | 2041 | zfcp_erp_port_reopen(zfcp_sdev->port, |
2047 | ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5", | 2042 | ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); |
2048 | req); | ||
2049 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2043 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2050 | break; | 2044 | break; |
2051 | case FSF_LUN_BOXED: | 2045 | case FSF_LUN_BOXED: |
2052 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); | 2046 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); |
2053 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, | 2047 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, |
2054 | "fssfch6", req); | 2048 | "fssfch6"); |
2055 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; | 2049 | req->status |= ZFCP_STATUS_FSFREQ_ERROR; |
2056 | break; | 2050 | break; |
2057 | case FSF_ADAPTER_STATUS_AVAILABLE: | 2051 | case FSF_ADAPTER_STATUS_AVAILABLE: |
@@ -2104,7 +2098,7 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) | |||
2104 | 2098 | ||
2105 | skip_fsfstatus: | 2099 | skip_fsfstatus: |
2106 | zfcp_fsf_req_trace(req, scpnt); | 2100 | zfcp_fsf_req_trace(req, scpnt); |
2107 | zfcp_dbf_scsi_result(req->adapter->dbf, scpnt, req); | 2101 | zfcp_dbf_scsi_result(scpnt, req); |
2108 | 2102 | ||
2109 | scpnt->host_scribble = NULL; | 2103 | scpnt->host_scribble = NULL; |
2110 | (scpnt->scsi_done) (scpnt); | 2104 | (scpnt->scsi_done) (scpnt); |
@@ -2420,3 +2414,12 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) | |||
2420 | break; | 2414 | break; |
2421 | } | 2415 | } |
2422 | } | 2416 | } |
2417 | |||
2418 | struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio, | ||
2419 | struct qdio_buffer *sbal) | ||
2420 | { | ||
2421 | struct qdio_buffer_element *sbale = &sbal->element[0]; | ||
2422 | u64 req_id = (unsigned long) sbale->addr; | ||
2423 | |||
2424 | return zfcp_reqlist_find(qdio->adapter->req_list, req_id); | ||
2425 | } | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index a0554beb4179..2511f92302dd 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -41,7 +41,7 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, | |||
41 | zfcp_qdio_siosl(adapter); | 41 | zfcp_qdio_siosl(adapter); |
42 | zfcp_erp_adapter_reopen(adapter, | 42 | zfcp_erp_adapter_reopen(adapter, |
43 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | | 43 | ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | |
44 | ZFCP_STATUS_COMMON_ERP_FAILED, id, NULL); | 44 | ZFCP_STATUS_COMMON_ERP_FAILED, id); |
45 | } | 45 | } |
46 | 46 | ||
47 | static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) | 47 | static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) |
@@ -74,7 +74,6 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, | |||
74 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; | 74 | struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; |
75 | 75 | ||
76 | if (unlikely(qdio_err)) { | 76 | if (unlikely(qdio_err)) { |
77 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count); | ||
78 | zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); | 77 | zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); |
79 | return; | 78 | return; |
80 | } | 79 | } |
@@ -97,7 +96,6 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
97 | int sbal_idx, sbal_no; | 96 | int sbal_idx, sbal_no; |
98 | 97 | ||
99 | if (unlikely(qdio_err)) { | 98 | if (unlikely(qdio_err)) { |
100 | zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, idx, count); | ||
101 | zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); | 99 | zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); |
102 | return; | 100 | return; |
103 | } | 101 | } |
@@ -116,7 +114,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, | |||
116 | * put SBALs back to response queue | 114 | * put SBALs back to response queue |
117 | */ | 115 | */ |
118 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) | 116 | if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) |
119 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2", NULL); | 117 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); |
120 | } | 118 | } |
121 | 119 | ||
122 | static struct qdio_buffer_element * | 120 | static struct qdio_buffer_element * |
@@ -236,7 +234,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) | |||
236 | if (!ret) { | 234 | if (!ret) { |
237 | atomic_inc(&qdio->req_q_full); | 235 | atomic_inc(&qdio->req_q_full); |
238 | /* assume hanging outbound queue, try queue recovery */ | 236 | /* assume hanging outbound queue, try queue recovery */ |
239 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL); | 237 | zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); |
240 | } | 238 | } |
241 | 239 | ||
242 | spin_lock_irq(&qdio->req_q_lock); | 240 | spin_lock_irq(&qdio->req_q_lock); |
@@ -309,6 +307,7 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) | |||
309 | return -ENOMEM; | 307 | return -ENOMEM; |
310 | 308 | ||
311 | zfcp_qdio_setup_init_data(&init_data, qdio); | 309 | zfcp_qdio_setup_init_data(&init_data, qdio); |
310 | init_waitqueue_head(&qdio->req_q_wq); | ||
312 | 311 | ||
313 | return qdio_allocate(&init_data); | 312 | return qdio_allocate(&init_data); |
314 | } | 313 | } |
@@ -393,6 +392,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) | |||
393 | /* set index of first avalable SBALS / number of available SBALS */ | 392 | /* set index of first avalable SBALS / number of available SBALS */ |
394 | qdio->req_q_idx = 0; | 393 | qdio->req_q_idx = 0; |
395 | atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); | 394 | atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); |
395 | atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); | ||
396 | 396 | ||
397 | return 0; | 397 | return 0; |
398 | 398 | ||
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 63529ed801eb..ddb5800823a9 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -30,6 +30,10 @@ module_param_named(dif, enable_dif, bool, 0600); | |||
30 | MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); | 30 | MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | static bool allow_lun_scan = 1; | ||
34 | module_param(allow_lun_scan, bool, 0600); | ||
35 | MODULE_PARM_DESC(allow_lun_scan, "For NPIV, scan and attach all storage LUNs"); | ||
36 | |||
33 | static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, | 37 | static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth, |
34 | int reason) | 38 | int reason) |
35 | { | 39 | { |
@@ -68,11 +72,8 @@ static int zfcp_scsi_slave_configure(struct scsi_device *sdp) | |||
68 | 72 | ||
69 | static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) | 73 | static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) |
70 | { | 74 | { |
71 | struct zfcp_adapter *adapter = | ||
72 | (struct zfcp_adapter *) scpnt->device->host->hostdata[0]; | ||
73 | |||
74 | set_host_byte(scpnt, result); | 75 | set_host_byte(scpnt, result); |
75 | zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); | 76 | zfcp_dbf_scsi_fail_send(scpnt); |
76 | scpnt->scsi_done(scpnt); | 77 | scpnt->scsi_done(scpnt); |
77 | } | 78 | } |
78 | 79 | ||
@@ -80,7 +81,6 @@ static | |||
80 | int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) | 81 | int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) |
81 | { | 82 | { |
82 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); | 83 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); |
83 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | ||
84 | struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); | 84 | struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); |
85 | int status, scsi_result, ret; | 85 | int status, scsi_result, ret; |
86 | 86 | ||
@@ -91,7 +91,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) | |||
91 | scsi_result = fc_remote_port_chkready(rport); | 91 | scsi_result = fc_remote_port_chkready(rport); |
92 | if (unlikely(scsi_result)) { | 92 | if (unlikely(scsi_result)) { |
93 | scpnt->result = scsi_result; | 93 | scpnt->result = scsi_result; |
94 | zfcp_dbf_scsi_fail_send(adapter->dbf, scpnt); | 94 | zfcp_dbf_scsi_fail_send(scpnt); |
95 | scpnt->scsi_done(scpnt); | 95 | scpnt->scsi_done(scpnt); |
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
@@ -134,6 +134,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) | |||
134 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); | 134 | struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); |
135 | struct zfcp_port *port; | 135 | struct zfcp_port *port; |
136 | struct zfcp_unit *unit; | 136 | struct zfcp_unit *unit; |
137 | int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; | ||
137 | 138 | ||
138 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); | 139 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
139 | if (!port) | 140 | if (!port) |
@@ -143,7 +144,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) | |||
143 | if (unit) | 144 | if (unit) |
144 | put_device(&unit->dev); | 145 | put_device(&unit->dev); |
145 | 146 | ||
146 | if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) { | 147 | if (!unit && !(allow_lun_scan && npiv)) { |
147 | put_device(&port->dev); | 148 | put_device(&port->dev); |
148 | return -ENXIO; | 149 | return -ENXIO; |
149 | } | 150 | } |
@@ -158,7 +159,7 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) | |||
158 | spin_lock_init(&zfcp_sdev->latencies.lock); | 159 | spin_lock_init(&zfcp_sdev->latencies.lock); |
159 | 160 | ||
160 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); | 161 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); |
161 | zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL); | 162 | zfcp_erp_lun_reopen(sdev, 0, "scsla_1"); |
162 | zfcp_erp_wait(port->adapter); | 163 | zfcp_erp_wait(port->adapter); |
163 | 164 | ||
164 | return 0; | 165 | return 0; |
@@ -182,8 +183,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
182 | old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); | 183 | old_req = zfcp_reqlist_find(adapter->req_list, old_reqid); |
183 | if (!old_req) { | 184 | if (!old_req) { |
184 | write_unlock_irqrestore(&adapter->abort_lock, flags); | 185 | write_unlock_irqrestore(&adapter->abort_lock, flags); |
185 | zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL, | 186 | zfcp_dbf_scsi_abort("abrt_or", scpnt, NULL); |
186 | old_reqid); | ||
187 | return FAILED; /* completion could be in progress */ | 187 | return FAILED; /* completion could be in progress */ |
188 | } | 188 | } |
189 | old_req->data = NULL; | 189 | old_req->data = NULL; |
@@ -198,29 +198,32 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
198 | 198 | ||
199 | zfcp_erp_wait(adapter); | 199 | zfcp_erp_wait(adapter); |
200 | ret = fc_block_scsi_eh(scpnt); | 200 | ret = fc_block_scsi_eh(scpnt); |
201 | if (ret) | 201 | if (ret) { |
202 | zfcp_dbf_scsi_abort("abrt_bl", scpnt, NULL); | ||
202 | return ret; | 203 | return ret; |
204 | } | ||
203 | if (!(atomic_read(&adapter->status) & | 205 | if (!(atomic_read(&adapter->status) & |
204 | ZFCP_STATUS_COMMON_RUNNING)) { | 206 | ZFCP_STATUS_COMMON_RUNNING)) { |
205 | zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, | 207 | zfcp_dbf_scsi_abort("abrt_ru", scpnt, NULL); |
206 | old_reqid); | ||
207 | return SUCCESS; | 208 | return SUCCESS; |
208 | } | 209 | } |
209 | } | 210 | } |
210 | if (!abrt_req) | 211 | if (!abrt_req) { |
212 | zfcp_dbf_scsi_abort("abrt_ar", scpnt, NULL); | ||
211 | return FAILED; | 213 | return FAILED; |
214 | } | ||
212 | 215 | ||
213 | wait_for_completion(&abrt_req->completion); | 216 | wait_for_completion(&abrt_req->completion); |
214 | 217 | ||
215 | if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) | 218 | if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED) |
216 | dbf_tag = "okay"; | 219 | dbf_tag = "abrt_ok"; |
217 | else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) | 220 | else if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED) |
218 | dbf_tag = "lte2"; | 221 | dbf_tag = "abrt_nn"; |
219 | else { | 222 | else { |
220 | dbf_tag = "fail"; | 223 | dbf_tag = "abrt_fa"; |
221 | retval = FAILED; | 224 | retval = FAILED; |
222 | } | 225 | } |
223 | zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid); | 226 | zfcp_dbf_scsi_abort(dbf_tag, scpnt, abrt_req); |
224 | zfcp_fsf_req_free(abrt_req); | 227 | zfcp_fsf_req_free(abrt_req); |
225 | return retval; | 228 | return retval; |
226 | } | 229 | } |
@@ -280,7 +283,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | |||
280 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; | 283 | struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; |
281 | int ret; | 284 | int ret; |
282 | 285 | ||
283 | zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); | 286 | zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); |
284 | zfcp_erp_wait(adapter); | 287 | zfcp_erp_wait(adapter); |
285 | ret = fc_block_scsi_eh(scpnt); | 288 | ret = fc_block_scsi_eh(scpnt); |
286 | if (ret) | 289 | if (ret) |
@@ -518,7 +521,7 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) | |||
518 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); | 521 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
519 | 522 | ||
520 | if (port) { | 523 | if (port) { |
521 | zfcp_erp_port_forced_reopen(port, 0, "sctrpi1", NULL); | 524 | zfcp_erp_port_forced_reopen(port, 0, "sctrpi1"); |
522 | put_device(&port->dev); | 525 | put_device(&port->dev); |
523 | } | 526 | } |
524 | } | 527 | } |
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 2f2c54f4718f..cdc4ff78a7ba 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -105,8 +105,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev, | |||
105 | return -EINVAL; | 105 | return -EINVAL; |
106 | 106 | ||
107 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); | 107 | zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING); |
108 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2", | 108 | zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2"); |
109 | NULL); | ||
110 | zfcp_erp_wait(port->adapter); | 109 | zfcp_erp_wait(port->adapter); |
111 | 110 | ||
112 | return count; | 111 | return count; |
@@ -148,7 +147,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev, | |||
148 | if (sdev) { | 147 | if (sdev) { |
149 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); | 148 | zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); |
150 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, | 149 | zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, |
151 | "syufai2", NULL); | 150 | "syufai2"); |
152 | zfcp_erp_wait(unit->port->adapter); | 151 | zfcp_erp_wait(unit->port->adapter); |
153 | } else | 152 | } else |
154 | zfcp_unit_scsi_scan(unit); | 153 | zfcp_unit_scsi_scan(unit); |
@@ -198,7 +197,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev, | |||
198 | 197 | ||
199 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); | 198 | zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); |
200 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | 199 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, |
201 | "syafai2", NULL); | 200 | "syafai2"); |
202 | zfcp_erp_wait(adapter); | 201 | zfcp_erp_wait(adapter); |
203 | out: | 202 | out: |
204 | zfcp_ccw_adapter_put(adapter); | 203 | zfcp_ccw_adapter_put(adapter); |
@@ -256,7 +255,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, | |||
256 | 255 | ||
257 | put_device(&port->dev); | 256 | put_device(&port->dev); |
258 | 257 | ||
259 | zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); | 258 | zfcp_erp_port_shutdown(port, 0, "syprs_1"); |
260 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); | 259 | zfcp_device_unregister(&port->dev, &zfcp_sysfs_port_attrs); |
261 | out: | 260 | out: |
262 | zfcp_ccw_adapter_put(adapter); | 261 | zfcp_ccw_adapter_put(adapter); |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 17e3df4f016f..1cadcd6b7da6 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -1171,9 +1171,8 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb, | |||
1171 | arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); | 1171 | arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); |
1172 | if ( arccdbsize > 256) | 1172 | if ( arccdbsize > 256) |
1173 | arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; | 1173 | arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; |
1174 | if (pcmd->cmnd[0]|WRITE_6 || pcmd->cmnd[0]|WRITE_10 || pcmd->cmnd[0]|WRITE_12 ){ | 1174 | if (pcmd->sc_data_direction == DMA_TO_DEVICE) |
1175 | arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; | 1175 | arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; |
1176 | } | ||
1177 | ccb->arc_cdb_size = arccdbsize; | 1176 | ccb->arc_cdb_size = arccdbsize; |
1178 | return SUCCESS; | 1177 | return SUCCESS; |
1179 | } | 1178 | } |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 75a85aa9e882..79cefbe31367 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -3785,7 +3785,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) | |||
3785 | dma_addr_t paddr; | 3785 | dma_addr_t paddr; |
3786 | 3786 | ||
3787 | io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, | 3787 | io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, |
3788 | GFP_KERNEL, &paddr); | 3788 | GFP_ATOMIC, &paddr); |
3789 | if (!io_task->cmd_bhs) | 3789 | if (!io_task->cmd_bhs) |
3790 | return -ENOMEM; | 3790 | return -ENOMEM; |
3791 | io_task->bhs_pa.u.a64.address = paddr; | 3791 | io_task->bhs_pa.u.a64.address = paddr; |
@@ -3914,7 +3914,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) | |||
3914 | io_task->psgl_handle = NULL; | 3914 | io_task->psgl_handle = NULL; |
3915 | } | 3915 | } |
3916 | } else { | 3916 | } else { |
3917 | if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) | 3917 | if (task->hdr && |
3918 | ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)) | ||
3918 | return; | 3919 | return; |
3919 | if (io_task->psgl_handle) { | 3920 | if (io_task->psgl_handle) { |
3920 | spin_lock(&phba->mgmt_sgl_lock); | 3921 | spin_lock(&phba->mgmt_sgl_lock); |
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile index d2eefd3e3bd5..4ce6f4942327 100644 --- a/drivers/scsi/bfa/Makefile +++ b/drivers/scsi/bfa/Makefile | |||
@@ -3,6 +3,4 @@ obj-$(CONFIG_SCSI_BFA_FC) := bfa.o | |||
3 | bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o | 3 | bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o |
4 | bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o | 4 | bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o |
5 | bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o | 5 | bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o |
6 | bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o | 6 | bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_svc.o |
7 | |||
8 | ccflags-y := -DBFA_PERF_BUILD | ||
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index ff2bd07161f7..7be6b5a8114b 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #ifndef __BFA_H__ | 17 | #ifndef __BFA_H__ |
18 | #define __BFA_H__ | 18 | #define __BFA_H__ |
19 | 19 | ||
20 | #include "bfa_os_inc.h" | 20 | #include "bfad_drv.h" |
21 | #include "bfa_cs.h" | 21 | #include "bfa_cs.h" |
22 | #include "bfa_plog.h" | 22 | #include "bfa_plog.h" |
23 | #include "bfa_defs_svc.h" | 23 | #include "bfa_defs_svc.h" |
@@ -33,7 +33,6 @@ typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); | |||
33 | * Interrupt message handlers | 33 | * Interrupt message handlers |
34 | */ | 34 | */ |
35 | void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); | 35 | void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); |
36 | void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); | ||
37 | 36 | ||
38 | /* | 37 | /* |
39 | * Request and response queue related defines | 38 | * Request and response queue related defines |
@@ -121,8 +120,8 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), | |||
121 | \ | 120 | \ |
122 | struct list_head *waitq = bfa_reqq(__bfa, __reqq); \ | 121 | struct list_head *waitq = bfa_reqq(__bfa, __reqq); \ |
123 | \ | 122 | \ |
124 | bfa_assert(((__reqq) < BFI_IOC_MAX_CQS)); \ | 123 | WARN_ON(((__reqq) >= BFI_IOC_MAX_CQS)); \ |
125 | bfa_assert((__wqe)->qresume && (__wqe)->cbarg); \ | 124 | WARN_ON(!((__wqe)->qresume && (__wqe)->cbarg)); \ |
126 | \ | 125 | \ |
127 | list_add_tail(&(__wqe)->qe, waitq); \ | 126 | list_add_tail(&(__wqe)->qe, waitq); \ |
128 | } while (0) | 127 | } while (0) |
@@ -297,7 +296,6 @@ void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, | |||
297 | struct bfa_iocfc_cfg_s *cfg, | 296 | struct bfa_iocfc_cfg_s *cfg, |
298 | struct bfa_meminfo_s *meminfo, | 297 | struct bfa_meminfo_s *meminfo, |
299 | struct bfa_pcidev_s *pcidev); | 298 | struct bfa_pcidev_s *pcidev); |
300 | void bfa_iocfc_detach(struct bfa_s *bfa); | ||
301 | void bfa_iocfc_init(struct bfa_s *bfa); | 299 | void bfa_iocfc_init(struct bfa_s *bfa); |
302 | void bfa_iocfc_start(struct bfa_s *bfa); | 300 | void bfa_iocfc_start(struct bfa_s *bfa); |
303 | void bfa_iocfc_stop(struct bfa_s *bfa); | 301 | void bfa_iocfc_stop(struct bfa_s *bfa); |
@@ -333,12 +331,9 @@ void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, | |||
333 | u32 *maxvec); | 331 | u32 *maxvec); |
334 | void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, | 332 | void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, |
335 | u32 *end); | 333 | u32 *end); |
336 | void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi); | ||
337 | void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); | 334 | void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns); |
338 | wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa); | 335 | wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa); |
339 | wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa); | 336 | wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa); |
340 | void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, | ||
341 | struct bfa_boot_pbc_s *pbcfg); | ||
342 | int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, | 337 | int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, |
343 | struct bfi_pbc_vport_s *pbc_vport); | 338 | struct bfi_pbc_vport_s *pbc_vport); |
344 | 339 | ||
@@ -386,19 +381,11 @@ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, | |||
386 | void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | 381 | void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, |
387 | struct bfa_meminfo_s *meminfo, | 382 | struct bfa_meminfo_s *meminfo, |
388 | struct bfa_pcidev_s *pcidev); | 383 | struct bfa_pcidev_s *pcidev); |
389 | void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod); | ||
390 | void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog); | ||
391 | void bfa_detach(struct bfa_s *bfa); | 384 | void bfa_detach(struct bfa_s *bfa); |
392 | void bfa_init(struct bfa_s *bfa); | ||
393 | void bfa_start(struct bfa_s *bfa); | ||
394 | void bfa_stop(struct bfa_s *bfa); | ||
395 | void bfa_attach_fcs(struct bfa_s *bfa); | ||
396 | void bfa_cb_init(void *bfad, bfa_status_t status); | 385 | void bfa_cb_init(void *bfad, bfa_status_t status); |
397 | void bfa_cb_updateq(void *bfad, bfa_status_t status); | 386 | void bfa_cb_updateq(void *bfad, bfa_status_t status); |
398 | 387 | ||
399 | bfa_boolean_t bfa_intx(struct bfa_s *bfa); | 388 | bfa_boolean_t bfa_intx(struct bfa_s *bfa); |
400 | void bfa_intx_disable(struct bfa_s *bfa); | ||
401 | void bfa_intx_enable(struct bfa_s *bfa); | ||
402 | void bfa_isr_enable(struct bfa_s *bfa); | 389 | void bfa_isr_enable(struct bfa_s *bfa); |
403 | void bfa_isr_disable(struct bfa_s *bfa); | 390 | void bfa_isr_disable(struct bfa_s *bfa); |
404 | 391 | ||
@@ -408,31 +395,14 @@ void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q); | |||
408 | 395 | ||
409 | typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status); | 396 | typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status); |
410 | void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr); | 397 | void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr); |
411 | void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr); | ||
412 | 398 | ||
413 | void bfa_adapter_get_attr(struct bfa_s *bfa, | ||
414 | struct bfa_adapter_attr_s *ad_attr); | ||
415 | u64 bfa_adapter_get_id(struct bfa_s *bfa); | ||
416 | 399 | ||
417 | bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, | 400 | bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa, |
418 | struct bfa_iocfc_intr_attr_s *attr); | 401 | struct bfa_iocfc_intr_attr_s *attr); |
419 | 402 | ||
420 | void bfa_iocfc_enable(struct bfa_s *bfa); | 403 | void bfa_iocfc_enable(struct bfa_s *bfa); |
421 | void bfa_iocfc_disable(struct bfa_s *bfa); | 404 | void bfa_iocfc_disable(struct bfa_s *bfa); |
422 | void bfa_chip_reset(struct bfa_s *bfa); | ||
423 | void bfa_timer_tick(struct bfa_s *bfa); | ||
424 | #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ | 405 | #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout) \ |
425 | bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) | 406 | bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout) |
426 | 407 | ||
427 | /* | ||
428 | * BFA debug API functions | ||
429 | */ | ||
430 | bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen); | ||
431 | bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen); | ||
432 | bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf, | ||
433 | u32 *offset, int *buflen); | ||
434 | void bfa_debug_fwsave_clear(struct bfa_s *bfa); | ||
435 | bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data); | ||
436 | bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa); | ||
437 | |||
438 | #endif /* __BFA_H__ */ | 408 | #endif /* __BFA_H__ */ |
diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h deleted file mode 100644 index 6f021015f1f6..000000000000 --- a/drivers/scsi/bfa/bfa_cb_ioim.h +++ /dev/null | |||
@@ -1,169 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | ||
3 | * All rights reserved | ||
4 | * www.brocade.com | ||
5 | * | ||
6 | * Linux driver for Brocade Fibre Channel Host Bus Adapter. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License (GPL) Version 2 as | ||
10 | * published by the Free Software Foundation | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef __BFA_HCB_IOIM_H__ | ||
19 | #define __BFA_HCB_IOIM_H__ | ||
20 | |||
21 | #include "bfa_os_inc.h" | ||
22 | /* | ||
23 | * task attribute values in FCP-2 FCP_CMND IU | ||
24 | */ | ||
25 | #define SIMPLE_Q 0 | ||
26 | #define HEAD_OF_Q 1 | ||
27 | #define ORDERED_Q 2 | ||
28 | #define ACA_Q 4 | ||
29 | #define UNTAGGED 5 | ||
30 | |||
31 | static inline lun_t | ||
32 | bfad_int_to_lun(u32 luno) | ||
33 | { | ||
34 | union { | ||
35 | u16 scsi_lun[4]; | ||
36 | lun_t bfa_lun; | ||
37 | } lun; | ||
38 | |||
39 | lun.bfa_lun = 0; | ||
40 | lun.scsi_lun[0] = cpu_to_be16(luno); | ||
41 | |||
42 | return lun.bfa_lun; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Get LUN for the I/O request | ||
47 | */ | ||
48 | #define bfa_cb_ioim_get_lun(__dio) \ | ||
49 | bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun) | ||
50 | |||
51 | /* | ||
52 | * Get CDB for the I/O request | ||
53 | */ | ||
54 | static inline u8 * | ||
55 | bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio) | ||
56 | { | ||
57 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | ||
58 | |||
59 | return (u8 *) cmnd->cmnd; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * Get I/O direction (read/write) for the I/O request | ||
64 | */ | ||
65 | static inline enum fcp_iodir | ||
66 | bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio) | ||
67 | { | ||
68 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | ||
69 | enum dma_data_direction dmadir; | ||
70 | |||
71 | dmadir = cmnd->sc_data_direction; | ||
72 | if (dmadir == DMA_TO_DEVICE) | ||
73 | return FCP_IODIR_WRITE; | ||
74 | else if (dmadir == DMA_FROM_DEVICE) | ||
75 | return FCP_IODIR_READ; | ||
76 | else | ||
77 | return FCP_IODIR_NONE; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Get IO size in bytes for the I/O request | ||
82 | */ | ||
83 | static inline u32 | ||
84 | bfa_cb_ioim_get_size(struct bfad_ioim_s *dio) | ||
85 | { | ||
86 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | ||
87 | |||
88 | return scsi_bufflen(cmnd); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * Get timeout for the I/O request | ||
93 | */ | ||
94 | static inline u8 | ||
95 | bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio) | ||
96 | { | ||
97 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | ||
98 | /* | ||
99 | * TBD: need a timeout for scsi passthru | ||
100 | */ | ||
101 | if (cmnd->device->host == NULL) | ||
102 | return 4; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Get Command Reference Number for the I/O request. 0 if none. | ||
109 | */ | ||
110 | static inline u8 | ||
111 | bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Get SAM-3 priority for the I/O request. 0 is default. | ||
118 | */ | ||
119 | static inline u8 | ||
120 | bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio) | ||
121 | { | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0). | ||
127 | */ | ||
128 | static inline u8 | ||
129 | bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio) | ||
130 | { | ||
131 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | ||
132 | u8 task_attr = UNTAGGED; | ||
133 | |||
134 | if (cmnd->device->tagged_supported) { | ||
135 | switch (cmnd->tag) { | ||
136 | case HEAD_OF_QUEUE_TAG: | ||
137 | task_attr = HEAD_OF_Q; | ||
138 | break; | ||
139 | case ORDERED_QUEUE_TAG: | ||
140 | task_attr = ORDERED_Q; | ||
141 | break; | ||
142 | default: | ||
143 | task_attr = SIMPLE_Q; | ||
144 | break; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | return task_attr; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16). | ||
153 | */ | ||
154 | static inline u8 | ||
155 | bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio) | ||
156 | { | ||
157 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | ||
158 | |||
159 | return cmnd->cmd_len; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Assign queue to be used for the I/O request. This value depends on whether | ||
164 | * the driver wants to use the queues via any specific algorithm. Currently, | ||
165 | * this is not supported. | ||
166 | */ | ||
167 | #define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE | ||
168 | |||
169 | #endif /* __BFA_HCB_IOIM_H__ */ | ||
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 2345f48dc57f..1cd5c8b0618d 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c | |||
@@ -15,13 +15,100 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_modules.h" | 19 | #include "bfa_modules.h" |
19 | #include "bfi_ctreg.h" | 20 | #include "bfi_ctreg.h" |
20 | #include "bfad_drv.h" | ||
21 | 21 | ||
22 | BFA_TRC_FILE(HAL, CORE); | 22 | BFA_TRC_FILE(HAL, CORE); |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * BFA module list terminated by NULL | ||
26 | */ | ||
27 | static struct bfa_module_s *hal_mods[] = { | ||
28 | &hal_mod_sgpg, | ||
29 | &hal_mod_fcport, | ||
30 | &hal_mod_fcxp, | ||
31 | &hal_mod_lps, | ||
32 | &hal_mod_uf, | ||
33 | &hal_mod_rport, | ||
34 | &hal_mod_fcpim, | ||
35 | NULL | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * Message handlers for various modules. | ||
40 | */ | ||
41 | static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { | ||
42 | bfa_isr_unhandled, /* NONE */ | ||
43 | bfa_isr_unhandled, /* BFI_MC_IOC */ | ||
44 | bfa_isr_unhandled, /* BFI_MC_DIAG */ | ||
45 | bfa_isr_unhandled, /* BFI_MC_FLASH */ | ||
46 | bfa_isr_unhandled, /* BFI_MC_CEE */ | ||
47 | bfa_fcport_isr, /* BFI_MC_FCPORT */ | ||
48 | bfa_isr_unhandled, /* BFI_MC_IOCFC */ | ||
49 | bfa_isr_unhandled, /* BFI_MC_LL */ | ||
50 | bfa_uf_isr, /* BFI_MC_UF */ | ||
51 | bfa_fcxp_isr, /* BFI_MC_FCXP */ | ||
52 | bfa_lps_isr, /* BFI_MC_LPS */ | ||
53 | bfa_rport_isr, /* BFI_MC_RPORT */ | ||
54 | bfa_itnim_isr, /* BFI_MC_ITNIM */ | ||
55 | bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ | ||
56 | bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ | ||
57 | bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ | ||
58 | bfa_ioim_isr, /* BFI_MC_IOIM */ | ||
59 | bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ | ||
60 | bfa_tskim_isr, /* BFI_MC_TSKIM */ | ||
61 | bfa_isr_unhandled, /* BFI_MC_SBOOT */ | ||
62 | bfa_isr_unhandled, /* BFI_MC_IPFC */ | ||
63 | bfa_isr_unhandled, /* BFI_MC_PORT */ | ||
64 | bfa_isr_unhandled, /* --------- */ | ||
65 | bfa_isr_unhandled, /* --------- */ | ||
66 | bfa_isr_unhandled, /* --------- */ | ||
67 | bfa_isr_unhandled, /* --------- */ | ||
68 | bfa_isr_unhandled, /* --------- */ | ||
69 | bfa_isr_unhandled, /* --------- */ | ||
70 | bfa_isr_unhandled, /* --------- */ | ||
71 | bfa_isr_unhandled, /* --------- */ | ||
72 | bfa_isr_unhandled, /* --------- */ | ||
73 | bfa_isr_unhandled, /* --------- */ | ||
74 | }; | ||
75 | /* | ||
76 | * Message handlers for mailbox command classes | ||
77 | */ | ||
78 | static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { | ||
79 | NULL, | ||
80 | NULL, /* BFI_MC_IOC */ | ||
81 | NULL, /* BFI_MC_DIAG */ | ||
82 | NULL, /* BFI_MC_FLASH */ | ||
83 | NULL, /* BFI_MC_CEE */ | ||
84 | NULL, /* BFI_MC_PORT */ | ||
85 | bfa_iocfc_isr, /* BFI_MC_IOCFC */ | ||
86 | NULL, | ||
87 | }; | ||
88 | |||
89 | |||
90 | |||
91 | static void | ||
92 | bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) | ||
93 | { | ||
94 | struct bfa_port_s *port = &bfa->modules.port; | ||
95 | u32 dm_len; | ||
96 | u8 *dm_kva; | ||
97 | u64 dm_pa; | ||
98 | |||
99 | dm_len = bfa_port_meminfo(); | ||
100 | dm_kva = bfa_meminfo_dma_virt(mi); | ||
101 | dm_pa = bfa_meminfo_dma_phys(mi); | ||
102 | |||
103 | memset(port, 0, sizeof(struct bfa_port_s)); | ||
104 | bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); | ||
105 | bfa_port_mem_claim(port, dm_kva, dm_pa); | ||
106 | |||
107 | bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; | ||
108 | bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; | ||
109 | } | ||
110 | |||
111 | /* | ||
25 | * BFA IOC FC related definitions | 112 | * BFA IOC FC related definitions |
26 | */ | 113 | */ |
27 | 114 | ||
@@ -67,18 +154,6 @@ static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; | |||
67 | * BFA Interrupt handling functions | 154 | * BFA Interrupt handling functions |
68 | */ | 155 | */ |
69 | static void | 156 | static void |
70 | bfa_msix_errint(struct bfa_s *bfa, u32 intr) | ||
71 | { | ||
72 | bfa_ioc_error_isr(&bfa->ioc); | ||
73 | } | ||
74 | |||
75 | static void | ||
76 | bfa_msix_lpu(struct bfa_s *bfa) | ||
77 | { | ||
78 | bfa_ioc_mbox_isr(&bfa->ioc); | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | bfa_reqq_resume(struct bfa_s *bfa, int qid) | 157 | bfa_reqq_resume(struct bfa_s *bfa, int qid) |
83 | { | 158 | { |
84 | struct list_head *waitq, *qe, *qen; | 159 | struct list_head *waitq, *qe, *qen; |
@@ -104,9 +179,6 @@ bfa_msix_all(struct bfa_s *bfa, int vec) | |||
104 | bfa_intx(bfa); | 179 | bfa_intx(bfa); |
105 | } | 180 | } |
106 | 181 | ||
107 | /* | ||
108 | * hal_intr_api | ||
109 | */ | ||
110 | bfa_boolean_t | 182 | bfa_boolean_t |
111 | bfa_intx(struct bfa_s *bfa) | 183 | bfa_intx(struct bfa_s *bfa) |
112 | { | 184 | { |
@@ -151,18 +223,6 @@ bfa_intx(struct bfa_s *bfa) | |||
151 | } | 223 | } |
152 | 224 | ||
153 | void | 225 | void |
154 | bfa_intx_enable(struct bfa_s *bfa) | ||
155 | { | ||
156 | writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask); | ||
157 | } | ||
158 | |||
159 | void | ||
160 | bfa_intx_disable(struct bfa_s *bfa) | ||
161 | { | ||
162 | writel(-1L, bfa->iocfc.bfa_regs.intr_mask); | ||
163 | } | ||
164 | |||
165 | void | ||
166 | bfa_isr_enable(struct bfa_s *bfa) | 226 | bfa_isr_enable(struct bfa_s *bfa) |
167 | { | 227 | { |
168 | u32 intr_unmask; | 228 | u32 intr_unmask; |
@@ -225,7 +285,7 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
225 | bfa_trc(bfa, m->mhdr.msg_class); | 285 | bfa_trc(bfa, m->mhdr.msg_class); |
226 | bfa_trc(bfa, m->mhdr.msg_id); | 286 | bfa_trc(bfa, m->mhdr.msg_id); |
227 | bfa_trc(bfa, m->mhdr.mtag.i2htok); | 287 | bfa_trc(bfa, m->mhdr.mtag.i2htok); |
228 | bfa_assert(0); | 288 | WARN_ON(1); |
229 | bfa_trc_stop(bfa->trcmod); | 289 | bfa_trc_stop(bfa->trcmod); |
230 | } | 290 | } |
231 | 291 | ||
@@ -236,8 +296,6 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid) | |||
236 | u32 pi, ci; | 296 | u32 pi, ci; |
237 | struct list_head *waitq; | 297 | struct list_head *waitq; |
238 | 298 | ||
239 | bfa_trc_fp(bfa, qid); | ||
240 | |||
241 | qid &= (BFI_IOC_MAX_CQS - 1); | 299 | qid &= (BFI_IOC_MAX_CQS - 1); |
242 | 300 | ||
243 | bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); | 301 | bfa->iocfc.hwif.hw_rspq_ack(bfa, qid); |
@@ -245,16 +303,10 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid) | |||
245 | ci = bfa_rspq_ci(bfa, qid); | 303 | ci = bfa_rspq_ci(bfa, qid); |
246 | pi = bfa_rspq_pi(bfa, qid); | 304 | pi = bfa_rspq_pi(bfa, qid); |
247 | 305 | ||
248 | bfa_trc_fp(bfa, ci); | ||
249 | bfa_trc_fp(bfa, pi); | ||
250 | |||
251 | if (bfa->rme_process) { | 306 | if (bfa->rme_process) { |
252 | while (ci != pi) { | 307 | while (ci != pi) { |
253 | m = bfa_rspq_elem(bfa, qid, ci); | 308 | m = bfa_rspq_elem(bfa, qid, ci); |
254 | bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX); | ||
255 | |||
256 | bfa_isrs[m->mhdr.msg_class] (bfa, m); | 309 | bfa_isrs[m->mhdr.msg_class] (bfa, m); |
257 | |||
258 | CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); | 310 | CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems); |
259 | } | 311 | } |
260 | } | 312 | } |
@@ -282,7 +334,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) | |||
282 | intr = readl(bfa->iocfc.bfa_regs.intr_status); | 334 | intr = readl(bfa->iocfc.bfa_regs.intr_status); |
283 | 335 | ||
284 | if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) | 336 | if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) |
285 | bfa_msix_lpu(bfa); | 337 | bfa_ioc_mbox_isr(&bfa->ioc); |
286 | 338 | ||
287 | intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | | 339 | intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | |
288 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); | 340 | __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT); |
@@ -313,22 +365,16 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) | |||
313 | } | 365 | } |
314 | 366 | ||
315 | writel(intr, bfa->iocfc.bfa_regs.intr_status); | 367 | writel(intr, bfa->iocfc.bfa_regs.intr_status); |
316 | bfa_msix_errint(bfa, intr); | 368 | bfa_ioc_error_isr(&bfa->ioc); |
317 | } | 369 | } |
318 | } | 370 | } |
319 | 371 | ||
320 | void | ||
321 | bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func) | ||
322 | { | ||
323 | bfa_isrs[mc] = isr_func; | ||
324 | } | ||
325 | |||
326 | /* | 372 | /* |
327 | * BFA IOC FC related functions | 373 | * BFA IOC FC related functions |
328 | */ | 374 | */ |
329 | 375 | ||
330 | /* | 376 | /* |
331 | * hal_ioc_pvt BFA IOC private functions | 377 | * BFA IOC private functions |
332 | */ | 378 | */ |
333 | 379 | ||
334 | static void | 380 | static void |
@@ -379,7 +425,7 @@ bfa_iocfc_send_cfg(void *bfa_arg) | |||
379 | struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; | 425 | struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg; |
380 | int i; | 426 | int i; |
381 | 427 | ||
382 | bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS); | 428 | WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS); |
383 | bfa_trc(bfa, cfg->fwcfg.num_cqs); | 429 | bfa_trc(bfa, cfg->fwcfg.num_cqs); |
384 | 430 | ||
385 | bfa_iocfc_reset_queues(bfa); | 431 | bfa_iocfc_reset_queues(bfa); |
@@ -488,8 +534,8 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, | |||
488 | * First allocate dma memory for IOC. | 534 | * First allocate dma memory for IOC. |
489 | */ | 535 | */ |
490 | bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); | 536 | bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa); |
491 | dm_kva += bfa_ioc_meminfo(); | 537 | dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); |
492 | dm_pa += bfa_ioc_meminfo(); | 538 | dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); |
493 | 539 | ||
494 | /* | 540 | /* |
495 | * Claim DMA-able memory for the request/response queues and for shadow | 541 | * Claim DMA-able memory for the request/response queues and for shadow |
@@ -552,7 +598,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, | |||
552 | bfa_meminfo_dma_virt(meminfo) = dm_kva; | 598 | bfa_meminfo_dma_virt(meminfo) = dm_kva; |
553 | bfa_meminfo_dma_phys(meminfo) = dm_pa; | 599 | bfa_meminfo_dma_phys(meminfo) = dm_pa; |
554 | 600 | ||
555 | dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover); | 601 | dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; |
556 | if (dbgsz > 0) { | 602 | if (dbgsz > 0) { |
557 | bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); | 603 | bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo)); |
558 | bfa_meminfo_kva(meminfo) += dbgsz; | 604 | bfa_meminfo_kva(meminfo) += dbgsz; |
@@ -699,7 +745,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg) | |||
699 | bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, | 745 | bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, |
700 | bfa); | 746 | bfa); |
701 | else { | 747 | else { |
702 | bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE); | 748 | WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); |
703 | bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, | 749 | bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, |
704 | bfa); | 750 | bfa); |
705 | } | 751 | } |
@@ -735,9 +781,6 @@ bfa_iocfc_reset_cbfn(void *bfa_arg) | |||
735 | bfa_isr_enable(bfa); | 781 | bfa_isr_enable(bfa); |
736 | } | 782 | } |
737 | 783 | ||
738 | /* | ||
739 | * hal_ioc_public | ||
740 | */ | ||
741 | 784 | ||
742 | /* | 785 | /* |
743 | * Query IOC memory requirement information. | 786 | * Query IOC memory requirement information. |
@@ -747,11 +790,11 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, | |||
747 | u32 *dm_len) | 790 | u32 *dm_len) |
748 | { | 791 | { |
749 | /* dma memory for IOC */ | 792 | /* dma memory for IOC */ |
750 | *dm_len += bfa_ioc_meminfo(); | 793 | *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); |
751 | 794 | ||
752 | bfa_iocfc_fw_cfg_sz(cfg, dm_len); | 795 | bfa_iocfc_fw_cfg_sz(cfg, dm_len); |
753 | bfa_iocfc_cqs_sz(cfg, dm_len); | 796 | bfa_iocfc_cqs_sz(cfg, dm_len); |
754 | *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); | 797 | *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0; |
755 | } | 798 | } |
756 | 799 | ||
757 | /* | 800 | /* |
@@ -783,7 +826,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
783 | 826 | ||
784 | bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); | 827 | bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev); |
785 | bfa_iocfc_mem_claim(bfa, cfg, meminfo); | 828 | bfa_iocfc_mem_claim(bfa, cfg, meminfo); |
786 | bfa_timer_init(&bfa->timer_mod); | 829 | INIT_LIST_HEAD(&bfa->timer_mod.timer_q); |
787 | 830 | ||
788 | INIT_LIST_HEAD(&bfa->comp_q); | 831 | INIT_LIST_HEAD(&bfa->comp_q); |
789 | for (i = 0; i < BFI_IOC_MAX_CQS; i++) | 832 | for (i = 0; i < BFI_IOC_MAX_CQS; i++) |
@@ -794,15 +837,6 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
794 | * Query IOC memory requirement information. | 837 | * Query IOC memory requirement information. |
795 | */ | 838 | */ |
796 | void | 839 | void |
797 | bfa_iocfc_detach(struct bfa_s *bfa) | ||
798 | { | ||
799 | bfa_ioc_detach(&bfa->ioc); | ||
800 | } | ||
801 | |||
802 | /* | ||
803 | * Query IOC memory requirement information. | ||
804 | */ | ||
805 | void | ||
806 | bfa_iocfc_init(struct bfa_s *bfa) | 840 | bfa_iocfc_init(struct bfa_s *bfa) |
807 | { | 841 | { |
808 | bfa->iocfc.action = BFA_IOCFC_ACT_INIT; | 842 | bfa->iocfc.action = BFA_IOCFC_ACT_INIT; |
@@ -852,23 +886,11 @@ bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m) | |||
852 | iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); | 886 | iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK); |
853 | break; | 887 | break; |
854 | default: | 888 | default: |
855 | bfa_assert(0); | 889 | WARN_ON(1); |
856 | } | 890 | } |
857 | } | 891 | } |
858 | 892 | ||
859 | void | 893 | void |
860 | bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr) | ||
861 | { | ||
862 | bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr); | ||
863 | } | ||
864 | |||
865 | u64 | ||
866 | bfa_adapter_get_id(struct bfa_s *bfa) | ||
867 | { | ||
868 | return bfa_ioc_get_adid(&bfa->ioc); | ||
869 | } | ||
870 | |||
871 | void | ||
872 | bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) | 894 | bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) |
873 | { | 895 | { |
874 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | 896 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; |
@@ -976,18 +998,6 @@ bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns) | |||
976 | memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); | 998 | memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn)); |
977 | } | 999 | } |
978 | 1000 | ||
979 | void | ||
980 | bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg) | ||
981 | { | ||
982 | struct bfa_iocfc_s *iocfc = &bfa->iocfc; | ||
983 | struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp; | ||
984 | |||
985 | pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled; | ||
986 | pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns; | ||
987 | pbcfg->speed = cfgrsp->pbc_cfg.port_speed; | ||
988 | memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun)); | ||
989 | } | ||
990 | |||
991 | int | 1001 | int |
992 | bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) | 1002 | bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) |
993 | { | 1003 | { |
@@ -998,9 +1008,6 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) | |||
998 | return cfgrsp->pbc_cfg.nvports; | 1008 | return cfgrsp->pbc_cfg.nvports; |
999 | } | 1009 | } |
1000 | 1010 | ||
1001 | /* | ||
1002 | * hal_api | ||
1003 | */ | ||
1004 | 1011 | ||
1005 | /* | 1012 | /* |
1006 | * Use this function query the memory requirement of the BFA library. | 1013 | * Use this function query the memory requirement of the BFA library. |
@@ -1036,7 +1043,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) | |||
1036 | int i; | 1043 | int i; |
1037 | u32 km_len = 0, dm_len = 0; | 1044 | u32 km_len = 0, dm_len = 0; |
1038 | 1045 | ||
1039 | bfa_assert((cfg != NULL) && (meminfo != NULL)); | 1046 | WARN_ON((cfg == NULL) || (meminfo == NULL)); |
1040 | 1047 | ||
1041 | memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); | 1048 | memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); |
1042 | meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = | 1049 | meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = |
@@ -1090,7 +1097,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
1090 | 1097 | ||
1091 | bfa->fcs = BFA_FALSE; | 1098 | bfa->fcs = BFA_FALSE; |
1092 | 1099 | ||
1093 | bfa_assert((cfg != NULL) && (meminfo != NULL)); | 1100 | WARN_ON((cfg == NULL) || (meminfo == NULL)); |
1094 | 1101 | ||
1095 | /* | 1102 | /* |
1096 | * initialize all memory pointers for iterative allocation | 1103 | * initialize all memory pointers for iterative allocation |
@@ -1129,79 +1136,7 @@ bfa_detach(struct bfa_s *bfa) | |||
1129 | 1136 | ||
1130 | for (i = 0; hal_mods[i]; i++) | 1137 | for (i = 0; hal_mods[i]; i++) |
1131 | hal_mods[i]->detach(bfa); | 1138 | hal_mods[i]->detach(bfa); |
1132 | 1139 | bfa_ioc_detach(&bfa->ioc); | |
1133 | bfa_iocfc_detach(bfa); | ||
1134 | } | ||
1135 | |||
1136 | |||
1137 | void | ||
1138 | bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod) | ||
1139 | { | ||
1140 | bfa->trcmod = trcmod; | ||
1141 | } | ||
1142 | |||
1143 | void | ||
1144 | bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) | ||
1145 | { | ||
1146 | bfa->plog = plog; | ||
1147 | } | ||
1148 | |||
1149 | /* | ||
1150 | * Initialize IOC. | ||
1151 | * | ||
1152 | * This function will return immediately, when the IOC initialization is | ||
1153 | * completed, the bfa_cb_init() will be called. | ||
1154 | * | ||
1155 | * @param[in] bfa instance | ||
1156 | * | ||
1157 | * @return void | ||
1158 | * | ||
1159 | * Special Considerations: | ||
1160 | * | ||
1161 | * @note | ||
1162 | * When this function returns, the driver should register the interrupt service | ||
1163 | * routine(s) and enable the device interrupts. If this is not done, | ||
1164 | * bfa_cb_init() will never get called | ||
1165 | */ | ||
1166 | void | ||
1167 | bfa_init(struct bfa_s *bfa) | ||
1168 | { | ||
1169 | bfa_iocfc_init(bfa); | ||
1170 | } | ||
1171 | |||
1172 | /* | ||
1173 | * Use this function initiate the IOC configuration setup. This function | ||
1174 | * will return immediately. | ||
1175 | * | ||
1176 | * @param[in] bfa instance | ||
1177 | * | ||
1178 | * @return None | ||
1179 | */ | ||
1180 | void | ||
1181 | bfa_start(struct bfa_s *bfa) | ||
1182 | { | ||
1183 | bfa_iocfc_start(bfa); | ||
1184 | } | ||
1185 | |||
1186 | /* | ||
1187 | * Use this function quiese the IOC. This function will return immediately, | ||
1188 | * when the IOC is actually stopped, the bfad->comp will be set. | ||
1189 | * | ||
1190 | * @param[in]bfa - pointer to bfa_t. | ||
1191 | * | ||
1192 | * @return None | ||
1193 | * | ||
1194 | * Special Considerations: | ||
1195 | * bfad->comp can be set before or after bfa_stop() returns. | ||
1196 | * | ||
1197 | * @note | ||
1198 | * In case of any failure, we could handle it automatically by doing a | ||
1199 | * reset and then succeed the bfa_stop() call. | ||
1200 | */ | ||
1201 | void | ||
1202 | bfa_stop(struct bfa_s *bfa) | ||
1203 | { | ||
1204 | bfa_iocfc_stop(bfa); | ||
1205 | } | 1140 | } |
1206 | 1141 | ||
1207 | void | 1142 | void |
@@ -1237,20 +1172,6 @@ bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q) | |||
1237 | } | 1172 | } |
1238 | } | 1173 | } |
1239 | 1174 | ||
1240 | void | ||
1241 | bfa_attach_fcs(struct bfa_s *bfa) | ||
1242 | { | ||
1243 | bfa->fcs = BFA_TRUE; | ||
1244 | } | ||
1245 | |||
1246 | /* | ||
1247 | * Periodic timer heart beat from driver | ||
1248 | */ | ||
1249 | void | ||
1250 | bfa_timer_tick(struct bfa_s *bfa) | ||
1251 | { | ||
1252 | bfa_timer_beat(&bfa->timer_mod); | ||
1253 | } | ||
1254 | 1175 | ||
1255 | /* | 1176 | /* |
1256 | * Return the list of PCI vendor/device id lists supported by this | 1177 | * Return the list of PCI vendor/device id lists supported by this |
@@ -1321,89 +1242,3 @@ bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg) | |||
1321 | cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; | 1242 | cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN; |
1322 | cfg->drvcfg.min_cfg = BFA_TRUE; | 1243 | cfg->drvcfg.min_cfg = BFA_TRUE; |
1323 | } | 1244 | } |
1324 | |||
1325 | void | ||
1326 | bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr) | ||
1327 | { | ||
1328 | bfa_ioc_get_attr(&bfa->ioc, ioc_attr); | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * Retrieve firmware trace information on IOC failure. | ||
1333 | */ | ||
1334 | bfa_status_t | ||
1335 | bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen) | ||
1336 | { | ||
1337 | return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); | ||
1338 | } | ||
1339 | |||
1340 | /* | ||
1341 | * Clear the saved firmware trace information of an IOC. | ||
1342 | */ | ||
1343 | void | ||
1344 | bfa_debug_fwsave_clear(struct bfa_s *bfa) | ||
1345 | { | ||
1346 | bfa_ioc_debug_fwsave_clear(&bfa->ioc); | ||
1347 | } | ||
1348 | |||
1349 | /* | ||
1350 | * Fetch firmware trace data. | ||
1351 | * | ||
1352 | * @param[in] bfa BFA instance | ||
1353 | * @param[out] trcdata Firmware trace buffer | ||
1354 | * @param[in,out] trclen Firmware trace buffer len | ||
1355 | * | ||
1356 | * @retval BFA_STATUS_OK Firmware trace is fetched. | ||
1357 | * @retval BFA_STATUS_INPROGRESS Firmware trace fetch is in progress. | ||
1358 | */ | ||
1359 | bfa_status_t | ||
1360 | bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) | ||
1361 | { | ||
1362 | return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); | ||
1363 | } | ||
1364 | |||
1365 | /* | ||
1366 | * Dump firmware memory. | ||
1367 | * | ||
1368 | * @param[in] bfa BFA instance | ||
1369 | * @param[out] buf buffer for dump | ||
1370 | * @param[in,out] offset smem offset to start read | ||
1371 | * @param[in,out] buflen length of buffer | ||
1372 | * | ||
1373 | * @retval BFA_STATUS_OK Firmware memory is dumped. | ||
1374 | * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress. | ||
1375 | */ | ||
1376 | bfa_status_t | ||
1377 | bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen) | ||
1378 | { | ||
1379 | return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); | ||
1380 | } | ||
1381 | /* | ||
1382 | * Reset hw semaphore & usage cnt regs and initialize. | ||
1383 | */ | ||
1384 | void | ||
1385 | bfa_chip_reset(struct bfa_s *bfa) | ||
1386 | { | ||
1387 | bfa_ioc_ownership_reset(&bfa->ioc); | ||
1388 | bfa_ioc_pll_init(&bfa->ioc); | ||
1389 | } | ||
1390 | |||
1391 | /* | ||
1392 | * Fetch firmware statistics data. | ||
1393 | * | ||
1394 | * @param[in] bfa BFA instance | ||
1395 | * @param[out] data Firmware stats buffer | ||
1396 | * | ||
1397 | * @retval BFA_STATUS_OK Firmware trace is fetched. | ||
1398 | */ | ||
1399 | bfa_status_t | ||
1400 | bfa_fw_stats_get(struct bfa_s *bfa, void *data) | ||
1401 | { | ||
1402 | return bfa_ioc_fw_stats_get(&bfa->ioc, data); | ||
1403 | } | ||
1404 | |||
1405 | bfa_status_t | ||
1406 | bfa_fw_stats_clear(struct bfa_s *bfa) | ||
1407 | { | ||
1408 | return bfa_ioc_fw_stats_clear(&bfa->ioc); | ||
1409 | } | ||
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h index 99f242b9aa31..12bfeed268eb 100644 --- a/drivers/scsi/bfa/bfa_cs.h +++ b/drivers/scsi/bfa/bfa_cs.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #ifndef __BFA_CS_H__ | 22 | #ifndef __BFA_CS_H__ |
23 | #define __BFA_CS_H__ | 23 | #define __BFA_CS_H__ |
24 | 24 | ||
25 | #include "bfa_os_inc.h" | 25 | #include "bfad_drv.h" |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * BFA TRC | 28 | * BFA TRC |
@@ -32,12 +32,20 @@ | |||
32 | #define BFA_TRC_MAX (4 * 1024) | 32 | #define BFA_TRC_MAX (4 * 1024) |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | #define BFA_TRC_TS(_trcm) \ | ||
36 | ({ \ | ||
37 | struct timeval tv; \ | ||
38 | \ | ||
39 | do_gettimeofday(&tv); \ | ||
40 | (tv.tv_sec*1000000+tv.tv_usec); \ | ||
41 | }) | ||
42 | |||
35 | #ifndef BFA_TRC_TS | 43 | #ifndef BFA_TRC_TS |
36 | #define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) | 44 | #define BFA_TRC_TS(_trcm) ((_trcm)->ticks++) |
37 | #endif | 45 | #endif |
38 | 46 | ||
39 | struct bfa_trc_s { | 47 | struct bfa_trc_s { |
40 | #ifdef __BIGENDIAN | 48 | #ifdef __BIG_ENDIAN |
41 | u16 fileno; | 49 | u16 fileno; |
42 | u16 line; | 50 | u16 line; |
43 | #else | 51 | #else |
@@ -99,13 +107,6 @@ bfa_trc_stop(struct bfa_trc_mod_s *trcm) | |||
99 | trcm->stopped = 1; | 107 | trcm->stopped = 1; |
100 | } | 108 | } |
101 | 109 | ||
102 | #ifdef FWTRC | ||
103 | extern void dc_flush(void *data); | ||
104 | #else | ||
105 | #define dc_flush(data) | ||
106 | #endif | ||
107 | |||
108 | |||
109 | static inline void | 110 | static inline void |
110 | __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) | 111 | __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) |
111 | { | 112 | { |
@@ -119,12 +120,10 @@ __bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data) | |||
119 | trc->line = (u16) line; | 120 | trc->line = (u16) line; |
120 | trc->data.u64 = data; | 121 | trc->data.u64 = data; |
121 | trc->timestamp = BFA_TRC_TS(trcm); | 122 | trc->timestamp = BFA_TRC_TS(trcm); |
122 | dc_flush(trc); | ||
123 | 123 | ||
124 | trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); | 124 | trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); |
125 | if (trcm->tail == trcm->head) | 125 | if (trcm->tail == trcm->head) |
126 | trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); | 126 | trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); |
127 | dc_flush(trcm); | ||
128 | } | 127 | } |
129 | 128 | ||
130 | 129 | ||
@@ -141,42 +140,18 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) | |||
141 | trc->line = (u16) line; | 140 | trc->line = (u16) line; |
142 | trc->data.u32.u32 = data; | 141 | trc->data.u32.u32 = data; |
143 | trc->timestamp = BFA_TRC_TS(trcm); | 142 | trc->timestamp = BFA_TRC_TS(trcm); |
144 | dc_flush(trc); | ||
145 | 143 | ||
146 | trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); | 144 | trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); |
147 | if (trcm->tail == trcm->head) | 145 | if (trcm->tail == trcm->head) |
148 | trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); | 146 | trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1); |
149 | dc_flush(trcm); | ||
150 | } | 147 | } |
151 | 148 | ||
152 | #ifndef BFA_PERF_BUILD | ||
153 | #define bfa_trc_fp(_trcp, _data) bfa_trc(_trcp, _data) | ||
154 | #else | ||
155 | #define bfa_trc_fp(_trcp, _data) | ||
156 | #endif | ||
157 | |||
158 | /* | ||
159 | * @ BFA LOG interfaces | ||
160 | */ | ||
161 | #define bfa_assert(__cond) do { \ | ||
162 | if (!(__cond)) { \ | ||
163 | printk(KERN_ERR "assert(%s) failed at %s:%d\\n", \ | ||
164 | #__cond, __FILE__, __LINE__); \ | ||
165 | } \ | ||
166 | } while (0) | ||
167 | |||
168 | #define bfa_sm_fault(__mod, __event) do { \ | 149 | #define bfa_sm_fault(__mod, __event) do { \ |
169 | bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ | 150 | bfa_trc(__mod, (((u32)0xDEAD << 16) | __event)); \ |
170 | printk(KERN_ERR "Assertion failure: %s:%d: %d", \ | 151 | printk(KERN_ERR "Assertion failure: %s:%d: %d", \ |
171 | __FILE__, __LINE__, (__event)); \ | 152 | __FILE__, __LINE__, (__event)); \ |
172 | } while (0) | 153 | } while (0) |
173 | 154 | ||
174 | #ifndef BFA_PERF_BUILD | ||
175 | #define bfa_assert_fp(__cond) bfa_assert(__cond) | ||
176 | #else | ||
177 | #define bfa_assert_fp(__cond) | ||
178 | #endif | ||
179 | |||
180 | /* BFA queue definitions */ | 155 | /* BFA queue definitions */ |
181 | #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) | 156 | #define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next)) |
182 | #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) | 157 | #define bfa_q_next(_qe) (((struct list_head *) (_qe))->next) |
@@ -199,7 +174,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) | |||
199 | bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ | 174 | bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) = \ |
200 | (struct list_head *) (_q); \ | 175 | (struct list_head *) (_q); \ |
201 | bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\ | 176 | bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\ |
202 | BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \ | ||
203 | } else { \ | 177 | } else { \ |
204 | *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ | 178 | *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ |
205 | } \ | 179 | } \ |
@@ -214,7 +188,6 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) | |||
214 | bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ | 188 | bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) = \ |
215 | (struct list_head *) (_q); \ | 189 | (struct list_head *) (_q); \ |
216 | bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ | 190 | bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\ |
217 | BFA_Q_DBG_INIT(*((struct list_head **) _qe)); \ | ||
218 | } else { \ | 191 | } else { \ |
219 | *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ | 192 | *((struct list_head **) (_qe)) = (struct list_head *) NULL;\ |
220 | } \ | 193 | } \ |
@@ -236,16 +209,6 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) | |||
236 | return 0; | 209 | return 0; |
237 | } | 210 | } |
238 | 211 | ||
239 | /* | ||
240 | * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not | ||
241 | * consistent across modules) | ||
242 | */ | ||
243 | #ifndef BFA_PERF_BUILD | ||
244 | #define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe) | ||
245 | #else | ||
246 | #define BFA_Q_DBG_INIT(_qe) | ||
247 | #endif | ||
248 | |||
249 | #define bfa_q_is_on_q(_q, _qe) \ | 212 | #define bfa_q_is_on_q(_q, _qe) \ |
250 | bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) | 213 | bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) |
251 | 214 | ||
@@ -361,4 +324,43 @@ bfa_wc_wait(struct bfa_wc_s *wc) | |||
361 | bfa_wc_down(wc); | 324 | bfa_wc_down(wc); |
362 | } | 325 | } |
363 | 326 | ||
327 | static inline void | ||
328 | wwn2str(char *wwn_str, u64 wwn) | ||
329 | { | ||
330 | union { | ||
331 | u64 wwn; | ||
332 | u8 byte[8]; | ||
333 | } w; | ||
334 | |||
335 | w.wwn = wwn; | ||
336 | sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0], | ||
337 | w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5], | ||
338 | w.byte[6], w.byte[7]); | ||
339 | } | ||
340 | |||
341 | static inline void | ||
342 | fcid2str(char *fcid_str, u32 fcid) | ||
343 | { | ||
344 | union { | ||
345 | u32 fcid; | ||
346 | u8 byte[4]; | ||
347 | } f; | ||
348 | |||
349 | f.fcid = fcid; | ||
350 | sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]); | ||
351 | } | ||
352 | |||
353 | #define bfa_swap_3b(_x) \ | ||
354 | ((((_x) & 0xff) << 16) | \ | ||
355 | ((_x) & 0x00ff00) | \ | ||
356 | (((_x) & 0xff0000) >> 16)) | ||
357 | |||
358 | #ifndef __BIG_ENDIAN | ||
359 | #define bfa_hton3b(_x) bfa_swap_3b(_x) | ||
360 | #else | ||
361 | #define bfa_hton3b(_x) (_x) | ||
362 | #endif | ||
363 | |||
364 | #define bfa_ntoh3b(_x) bfa_hton3b(_x) | ||
365 | |||
364 | #endif /* __BFA_CS_H__ */ | 366 | #endif /* __BFA_CS_H__ */ |
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index 4b5b9e35abb9..d85f93aea465 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #define __BFA_DEFS_H__ | 19 | #define __BFA_DEFS_H__ |
20 | 20 | ||
21 | #include "bfa_fc.h" | 21 | #include "bfa_fc.h" |
22 | #include "bfa_os_inc.h" | 22 | #include "bfad_drv.h" |
23 | 23 | ||
24 | #define BFA_MFG_SERIALNUM_SIZE 11 | 24 | #define BFA_MFG_SERIALNUM_SIZE 11 |
25 | #define STRSZ(_n) (((_n) + 4) & ~3) | 25 | #define STRSZ(_n) (((_n) + 4) & ~3) |
@@ -446,8 +446,8 @@ enum bfa_boot_bootopt { | |||
446 | * Boot lun information. | 446 | * Boot lun information. |
447 | */ | 447 | */ |
448 | struct bfa_boot_bootlun_s { | 448 | struct bfa_boot_bootlun_s { |
449 | wwn_t pwwn; /* port wwn of target */ | 449 | wwn_t pwwn; /* port wwn of target */ |
450 | lun_t lun; /* 64-bit lun */ | 450 | struct scsi_lun lun; /* 64-bit lun */ |
451 | }; | 451 | }; |
452 | #pragma pack() | 452 | #pragma pack() |
453 | 453 | ||
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index e24e9f7ca81f..648c84176722 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h | |||
@@ -34,8 +34,8 @@ | |||
34 | struct bfa_iocfc_intr_attr_s { | 34 | struct bfa_iocfc_intr_attr_s { |
35 | u8 coalesce; /* enable/disable coalescing */ | 35 | u8 coalesce; /* enable/disable coalescing */ |
36 | u8 rsvd[3]; | 36 | u8 rsvd[3]; |
37 | u16 latency; /* latency in microseconds */ | 37 | __be16 latency; /* latency in microseconds */ |
38 | u16 delay; /* delay in microseconds */ | 38 | __be16 delay; /* delay in microseconds */ |
39 | }; | 39 | }; |
40 | 40 | ||
41 | /* | 41 | /* |
@@ -743,7 +743,7 @@ struct bfa_port_cfg_s { | |||
743 | u8 qos_enabled; /* qos enabled or not */ | 743 | u8 qos_enabled; /* qos enabled or not */ |
744 | u8 cfg_hardalpa; /* is hard alpa configured */ | 744 | u8 cfg_hardalpa; /* is hard alpa configured */ |
745 | u8 hardalpa; /* configured hard alpa */ | 745 | u8 hardalpa; /* configured hard alpa */ |
746 | u16 maxfrsize; /* maximum frame size */ | 746 | __be16 maxfrsize; /* maximum frame size */ |
747 | u8 rx_bbcredit; /* receive buffer credits */ | 747 | u8 rx_bbcredit; /* receive buffer credits */ |
748 | u8 tx_bbcredit; /* transmit buffer credits */ | 748 | u8 tx_bbcredit; /* transmit buffer credits */ |
749 | u8 ratelimit; /* ratelimit enabled or not */ | 749 | u8 ratelimit; /* ratelimit enabled or not */ |
@@ -843,7 +843,7 @@ struct bfa_fcport_fcf_s { | |||
843 | u8 fka_disabled; /* FKA is disabled */ | 843 | u8 fka_disabled; /* FKA is disabled */ |
844 | u8 maxsz_verified; /* FCoE max size verified */ | 844 | u8 maxsz_verified; /* FCoE max size verified */ |
845 | u8 fc_map[3]; /* FC map */ | 845 | u8 fc_map[3]; /* FC map */ |
846 | u16 vlan; /* FCoE vlan tag/priority */ | 846 | __be16 vlan; /* FCoE vlan tag/priority */ |
847 | u32 fka_adv_per; /* FIP ka advert. period */ | 847 | u32 fka_adv_per; /* FIP ka advert. period */ |
848 | mac_t mac; /* FCF mac */ | 848 | mac_t mac; /* FCF mac */ |
849 | }; | 849 | }; |
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c deleted file mode 100644 index 0222d7c88a9a..000000000000 --- a/drivers/scsi/bfa/bfa_drv.c +++ /dev/null | |||
@@ -1,107 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | ||
3 | * All rights reserved | ||
4 | * www.brocade.com | ||
5 | * | ||
6 | * Linux driver for Brocade Fibre Channel Host Bus Adapter. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License (GPL) Version 2 as | ||
10 | * published by the Free Software Foundation | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #include "bfa_modules.h" | ||
19 | |||
20 | /* | ||
21 | * BFA module list terminated by NULL | ||
22 | */ | ||
23 | struct bfa_module_s *hal_mods[] = { | ||
24 | &hal_mod_sgpg, | ||
25 | &hal_mod_fcport, | ||
26 | &hal_mod_fcxp, | ||
27 | &hal_mod_lps, | ||
28 | &hal_mod_uf, | ||
29 | &hal_mod_rport, | ||
30 | &hal_mod_fcpim, | ||
31 | NULL | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * Message handlers for various modules. | ||
36 | */ | ||
37 | bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { | ||
38 | bfa_isr_unhandled, /* NONE */ | ||
39 | bfa_isr_unhandled, /* BFI_MC_IOC */ | ||
40 | bfa_isr_unhandled, /* BFI_MC_DIAG */ | ||
41 | bfa_isr_unhandled, /* BFI_MC_FLASH */ | ||
42 | bfa_isr_unhandled, /* BFI_MC_CEE */ | ||
43 | bfa_fcport_isr, /* BFI_MC_FCPORT */ | ||
44 | bfa_isr_unhandled, /* BFI_MC_IOCFC */ | ||
45 | bfa_isr_unhandled, /* BFI_MC_LL */ | ||
46 | bfa_uf_isr, /* BFI_MC_UF */ | ||
47 | bfa_fcxp_isr, /* BFI_MC_FCXP */ | ||
48 | bfa_lps_isr, /* BFI_MC_LPS */ | ||
49 | bfa_rport_isr, /* BFI_MC_RPORT */ | ||
50 | bfa_itnim_isr, /* BFI_MC_ITNIM */ | ||
51 | bfa_isr_unhandled, /* BFI_MC_IOIM_READ */ | ||
52 | bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */ | ||
53 | bfa_isr_unhandled, /* BFI_MC_IOIM_IO */ | ||
54 | bfa_ioim_isr, /* BFI_MC_IOIM */ | ||
55 | bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */ | ||
56 | bfa_tskim_isr, /* BFI_MC_TSKIM */ | ||
57 | bfa_isr_unhandled, /* BFI_MC_SBOOT */ | ||
58 | bfa_isr_unhandled, /* BFI_MC_IPFC */ | ||
59 | bfa_isr_unhandled, /* BFI_MC_PORT */ | ||
60 | bfa_isr_unhandled, /* --------- */ | ||
61 | bfa_isr_unhandled, /* --------- */ | ||
62 | bfa_isr_unhandled, /* --------- */ | ||
63 | bfa_isr_unhandled, /* --------- */ | ||
64 | bfa_isr_unhandled, /* --------- */ | ||
65 | bfa_isr_unhandled, /* --------- */ | ||
66 | bfa_isr_unhandled, /* --------- */ | ||
67 | bfa_isr_unhandled, /* --------- */ | ||
68 | bfa_isr_unhandled, /* --------- */ | ||
69 | bfa_isr_unhandled, /* --------- */ | ||
70 | }; | ||
71 | |||
72 | |||
73 | /* | ||
74 | * Message handlers for mailbox command classes | ||
75 | */ | ||
76 | bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { | ||
77 | NULL, | ||
78 | NULL, /* BFI_MC_IOC */ | ||
79 | NULL, /* BFI_MC_DIAG */ | ||
80 | NULL, /* BFI_MC_FLASH */ | ||
81 | NULL, /* BFI_MC_CEE */ | ||
82 | NULL, /* BFI_MC_PORT */ | ||
83 | bfa_iocfc_isr, /* BFI_MC_IOCFC */ | ||
84 | NULL, | ||
85 | }; | ||
86 | |||
87 | |||
88 | |||
89 | void | ||
90 | bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi) | ||
91 | { | ||
92 | struct bfa_port_s *port = &bfa->modules.port; | ||
93 | u32 dm_len; | ||
94 | u8 *dm_kva; | ||
95 | u64 dm_pa; | ||
96 | |||
97 | dm_len = bfa_port_meminfo(); | ||
98 | dm_kva = bfa_meminfo_dma_virt(mi); | ||
99 | dm_pa = bfa_meminfo_dma_phys(mi); | ||
100 | |||
101 | memset(port, 0, sizeof(struct bfa_port_s)); | ||
102 | bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod); | ||
103 | bfa_port_mem_claim(port, dm_kva, dm_pa); | ||
104 | |||
105 | bfa_meminfo_dma_virt(mi) = dm_kva + dm_len; | ||
106 | bfa_meminfo_dma_phys(mi) = dm_pa + dm_len; | ||
107 | } | ||
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index e929d25b09e3..8e764fae8dc9 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h | |||
@@ -18,14 +18,12 @@ | |||
18 | #ifndef __BFA_FC_H__ | 18 | #ifndef __BFA_FC_H__ |
19 | #define __BFA_FC_H__ | 19 | #define __BFA_FC_H__ |
20 | 20 | ||
21 | #include "bfa_os_inc.h" | 21 | #include "bfad_drv.h" |
22 | 22 | ||
23 | typedef u64 wwn_t; | 23 | typedef u64 wwn_t; |
24 | typedef u64 lun_t; | ||
25 | 24 | ||
26 | #define WWN_NULL (0) | 25 | #define WWN_NULL (0) |
27 | #define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ | 26 | #define FC_SYMNAME_MAX 256 /* max name server symbolic name size */ |
28 | #define FC_ALPA_MAX 128 | ||
29 | 27 | ||
30 | #pragma pack(1) | 28 | #pragma pack(1) |
31 | 29 | ||
@@ -40,7 +38,6 @@ struct mac_s { u8 mac[MAC_ADDRLEN]; }; | |||
40 | struct scsi_cdb_s { | 38 | struct scsi_cdb_s { |
41 | u8 scsi_cdb[SCSI_MAX_CDBLEN]; | 39 | u8 scsi_cdb[SCSI_MAX_CDBLEN]; |
42 | }; | 40 | }; |
43 | #define scsi_cdb_t struct scsi_cdb_s | ||
44 | 41 | ||
45 | /* ------------------------------------------------------------ | 42 | /* ------------------------------------------------------------ |
46 | * SCSI status byte values | 43 | * SCSI status byte values |
@@ -63,7 +60,7 @@ struct scsi_cdb_s { | |||
63 | * Fibre Channel Header Structure (FCHS) definition | 60 | * Fibre Channel Header Structure (FCHS) definition |
64 | */ | 61 | */ |
65 | struct fchs_s { | 62 | struct fchs_s { |
66 | #ifdef __BIGENDIAN | 63 | #ifdef __BIG_ENDIAN |
67 | u32 routing:4; /* routing bits */ | 64 | u32 routing:4; /* routing bits */ |
68 | u32 cat_info:4; /* category info */ | 65 | u32 cat_info:4; /* category info */ |
69 | #else | 66 | #else |
@@ -75,34 +72,19 @@ struct fchs_s { | |||
75 | u32 cs_ctl:8; /* class specific control */ | 72 | u32 cs_ctl:8; /* class specific control */ |
76 | u32 s_id:24; /* source identifier */ | 73 | u32 s_id:24; /* source identifier */ |
77 | 74 | ||
78 | u32 type:8; /* data structure type */ | 75 | u32 type:8; /* data structure type */ |
79 | u32 f_ctl:24; /* initial frame control */ | 76 | u32 f_ctl:24; /* initial frame control */ |
80 | 77 | ||
81 | u8 seq_id; /* sequence identifier */ | 78 | u8 seq_id; /* sequence identifier */ |
82 | u8 df_ctl; /* data field control */ | 79 | u8 df_ctl; /* data field control */ |
83 | u16 seq_cnt; /* sequence count */ | 80 | u16 seq_cnt; /* sequence count */ |
84 | 81 | ||
85 | u16 ox_id; /* originator exchange ID */ | 82 | __be16 ox_id; /* originator exchange ID */ |
86 | u16 rx_id; /* responder exchange ID */ | 83 | u16 rx_id; /* responder exchange ID */ |
87 | 84 | ||
88 | u32 ro; /* relative offset */ | 85 | u32 ro; /* relative offset */ |
89 | }; | 86 | }; |
90 | 87 | ||
91 | #define FC_SOF_LEN 4 | ||
92 | #define FC_EOF_LEN 4 | ||
93 | #define FC_CRC_LEN 4 | ||
94 | |||
95 | /* | ||
96 | * Fibre Channel BB_E Header Structure | ||
97 | */ | ||
98 | struct fcbbehs_s { | ||
99 | u16 ver_rsvd; | ||
100 | u32 rsvd[2]; | ||
101 | u32 rsvd__sof; | ||
102 | }; | ||
103 | |||
104 | #define FC_SEQ_ID_MAX 256 | ||
105 | |||
106 | /* | 88 | /* |
107 | * routing bit definitions | 89 | * routing bit definitions |
108 | */ | 90 | */ |
@@ -149,22 +131,6 @@ enum { | |||
149 | }; | 131 | }; |
150 | 132 | ||
151 | /* | 133 | /* |
152 | * information category for Link Control | ||
153 | */ | ||
154 | enum { | ||
155 | FC_CAT_ACK_1 = 0x00, | ||
156 | FC_CAT_ACK_0_N = 0x01, | ||
157 | FC_CAT_P_RJT = 0x02, | ||
158 | FC_CAT_F_RJT = 0x03, | ||
159 | FC_CAT_P_BSY = 0x04, | ||
160 | FC_CAT_F_BSY_DATA = 0x05, | ||
161 | FC_CAT_F_BSY_LINK_CTL = 0x06, | ||
162 | FC_CAT_F_LCR = 0x07, | ||
163 | FC_CAT_NTY = 0x08, | ||
164 | FC_CAT_END = 0x09, | ||
165 | }; | ||
166 | |||
167 | /* | ||
168 | * Type Field Definitions. FC-PH Section 18.5 pg. 165 | 134 | * Type Field Definitions. FC-PH Section 18.5 pg. 165 |
169 | */ | 135 | */ |
170 | enum { | 136 | enum { |
@@ -182,10 +148,6 @@ enum { | |||
182 | FC_TYPE_MAX = 256, /* 256 FC-4 types */ | 148 | FC_TYPE_MAX = 256, /* 256 FC-4 types */ |
183 | }; | 149 | }; |
184 | 150 | ||
185 | struct fc_fc4types_s { | ||
186 | u8 bits[FC_TYPE_MAX / 8]; | ||
187 | }; | ||
188 | |||
189 | /* | 151 | /* |
190 | * Frame Control Definitions. FC-PH Table-45. pg. 168 | 152 | * Frame Control Definitions. FC-PH Table-45. pg. 168 |
191 | */ | 153 | */ |
@@ -288,7 +250,6 @@ enum { | |||
288 | FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */ | 250 | FC_ELS_AUTH = 0x90, /* Authentication. Ref FC-SP */ |
289 | FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref | 251 | FC_ELS_RFCN = 0x97, /* Request Fabric Change Notification. Ref |
290 | *FC-SP */ | 252 | *FC-SP */ |
291 | |||
292 | }; | 253 | }; |
293 | 254 | ||
294 | /* | 255 | /* |
@@ -314,12 +275,12 @@ enum { | |||
314 | * FC-PH-x. Figure-76. pg. 308. | 275 | * FC-PH-x. Figure-76. pg. 308. |
315 | */ | 276 | */ |
316 | struct fc_plogi_csp_s { | 277 | struct fc_plogi_csp_s { |
317 | u8 verhi; /* FC-PH high version */ | 278 | u8 verhi; /* FC-PH high version */ |
318 | u8 verlo; /* FC-PH low version */ | 279 | u8 verlo; /* FC-PH low version */ |
319 | u16 bbcred; /* BB_Credit */ | 280 | __be16 bbcred; /* BB_Credit */ |
320 | 281 | ||
321 | #ifdef __BIGENDIAN | 282 | #ifdef __BIG_ENDIAN |
322 | u8 ciro:1, /* continuously increasing RO */ | 283 | u8 ciro:1, /* continuously increasing RO */ |
323 | rro:1, /* random relative offset */ | 284 | rro:1, /* random relative offset */ |
324 | npiv_supp:1, /* NPIV supported */ | 285 | npiv_supp:1, /* NPIV supported */ |
325 | port_type:1, /* N_Port/F_port */ | 286 | port_type:1, /* N_Port/F_port */ |
@@ -328,7 +289,7 @@ struct fc_plogi_csp_s { | |||
328 | vvl_info:1, /* VVL Info included */ | 289 | vvl_info:1, /* VVL Info included */ |
329 | reserved1:1; | 290 | reserved1:1; |
330 | 291 | ||
331 | u8 hg_supp:1, | 292 | u8 hg_supp:1, |
332 | query_dbc:1, | 293 | query_dbc:1, |
333 | security:1, | 294 | security:1, |
334 | sync_cap:1, | 295 | sync_cap:1, |
@@ -337,7 +298,7 @@ struct fc_plogi_csp_s { | |||
337 | cisc:1, /* continuously increasing seq count */ | 298 | cisc:1, /* continuously increasing seq count */ |
338 | payload:1; | 299 | payload:1; |
339 | #else | 300 | #else |
340 | u8 reserved2:2, | 301 | u8 reserved2:2, |
341 | resolution:1, /* ms/ns ED_TOV resolution */ | 302 | resolution:1, /* ms/ns ED_TOV resolution */ |
342 | altbbcred:1, /* alternate BB_Credit */ | 303 | altbbcred:1, /* alternate BB_Credit */ |
343 | port_type:1, /* N_Port/F_port */ | 304 | port_type:1, /* N_Port/F_port */ |
@@ -345,7 +306,7 @@ struct fc_plogi_csp_s { | |||
345 | rro:1, /* random relative offset */ | 306 | rro:1, /* random relative offset */ |
346 | ciro:1; /* continuously increasing RO */ | 307 | ciro:1; /* continuously increasing RO */ |
347 | 308 | ||
348 | u8 payload:1, | 309 | u8 payload:1, |
349 | cisc:1, /* continuously increasing seq count */ | 310 | cisc:1, /* continuously increasing seq count */ |
350 | dh_dup_supp:1, | 311 | dh_dup_supp:1, |
351 | r_t_tov:1, | 312 | r_t_tov:1, |
@@ -354,13 +315,10 @@ struct fc_plogi_csp_s { | |||
354 | query_dbc:1, | 315 | query_dbc:1, |
355 | hg_supp:1; | 316 | hg_supp:1; |
356 | #endif | 317 | #endif |
357 | 318 | __be16 rxsz; /* recieve data_field size */ | |
358 | u16 rxsz; /* recieve data_field size */ | 319 | __be16 conseq; |
359 | 320 | __be16 ro_bitmap; | |
360 | u16 conseq; | 321 | __be32 e_d_tov; |
361 | u16 ro_bitmap; | ||
362 | |||
363 | u32 e_d_tov; | ||
364 | }; | 322 | }; |
365 | 323 | ||
366 | /* | 324 | /* |
@@ -368,12 +326,11 @@ struct fc_plogi_csp_s { | |||
368 | * FC-PH-x. Figure 78. pg. 318. | 326 | * FC-PH-x. Figure 78. pg. 318. |
369 | */ | 327 | */ |
370 | struct fc_plogi_clp_s { | 328 | struct fc_plogi_clp_s { |
371 | #ifdef __BIGENDIAN | 329 | #ifdef __BIG_ENDIAN |
372 | u32 class_valid:1; | 330 | u32 class_valid:1; |
373 | u32 intermix:1; /* class intermix supported if set =1. | 331 | u32 intermix:1; /* class intermix supported if set =1. |
374 | * valid only for class1. Reserved for | 332 | * valid only for class1. Reserved for |
375 | * class2 & class3 | 333 | * class2 & class3 */ |
376 | */ | ||
377 | u32 reserved1:2; | 334 | u32 reserved1:2; |
378 | u32 sequential:1; | 335 | u32 sequential:1; |
379 | u32 reserved2:3; | 336 | u32 reserved2:3; |
@@ -382,12 +339,10 @@ struct fc_plogi_clp_s { | |||
382 | u32 sequential:1; | 339 | u32 sequential:1; |
383 | u32 reserved1:2; | 340 | u32 reserved1:2; |
384 | u32 intermix:1; /* class intermix supported if set =1. | 341 | u32 intermix:1; /* class intermix supported if set =1. |
385 | * valid only for class1. Reserved for | 342 | * valid only for class1. Reserved for |
386 | * class2 & class3 | 343 | * class2 & class3 */ |
387 | */ | ||
388 | u32 class_valid:1; | 344 | u32 class_valid:1; |
389 | #endif | 345 | #endif |
390 | |||
391 | u32 reserved3:24; | 346 | u32 reserved3:24; |
392 | 347 | ||
393 | u32 reserved4:16; | 348 | u32 reserved4:16; |
@@ -395,7 +350,7 @@ struct fc_plogi_clp_s { | |||
395 | 350 | ||
396 | u32 reserved5:8; | 351 | u32 reserved5:8; |
397 | u32 conseq:8; | 352 | u32 conseq:8; |
398 | u32 e2e_credit:16; /* end to end credit */ | 353 | u32 e2e_credit:16; /* end to end credit */ |
399 | 354 | ||
400 | u32 reserved7:8; | 355 | u32 reserved7:8; |
401 | u32 ospx:8; | 356 | u32 ospx:8; |
@@ -409,24 +364,24 @@ struct fc_plogi_clp_s { | |||
409 | * PLOGI els command and reply payload | 364 | * PLOGI els command and reply payload |
410 | */ | 365 | */ |
411 | struct fc_logi_s { | 366 | struct fc_logi_s { |
412 | struct fc_els_cmd_s els_cmd; /* ELS command code */ | 367 | struct fc_els_cmd_s els_cmd; /* ELS command code */ |
413 | struct fc_plogi_csp_s csp; /* common service params */ | 368 | struct fc_plogi_csp_s csp; /* common service params */ |
414 | wwn_t port_name; | 369 | wwn_t port_name; |
415 | wwn_t node_name; | 370 | wwn_t node_name; |
416 | struct fc_plogi_clp_s class1; /* class 1 service parameters */ | 371 | struct fc_plogi_clp_s class1; /* class 1 service parameters */ |
417 | struct fc_plogi_clp_s class2; /* class 2 service parameters */ | 372 | struct fc_plogi_clp_s class2; /* class 2 service parameters */ |
418 | struct fc_plogi_clp_s class3; /* class 3 service parameters */ | 373 | struct fc_plogi_clp_s class3; /* class 3 service parameters */ |
419 | struct fc_plogi_clp_s class4; /* class 4 service parameters */ | 374 | struct fc_plogi_clp_s class4; /* class 4 service parameters */ |
420 | u8 vvl[16]; /* vendor version level */ | 375 | u8 vvl[16]; /* vendor version level */ |
421 | }; | 376 | }; |
422 | 377 | ||
423 | /* | 378 | /* |
424 | * LOGO els command payload | 379 | * LOGO els command payload |
425 | */ | 380 | */ |
426 | struct fc_logo_s { | 381 | struct fc_logo_s { |
427 | struct fc_els_cmd_s els_cmd; /* ELS command code */ | 382 | struct fc_els_cmd_s els_cmd; /* ELS command code */ |
428 | u32 res1:8; | 383 | u32 res1:8; |
429 | u32 nport_id:24; /* N_Port identifier of source */ | 384 | u32 nport_id:24; /* N_Port identifier of source */ |
430 | wwn_t orig_port_name; /* Port name of the LOGO originator */ | 385 | wwn_t orig_port_name; /* Port name of the LOGO originator */ |
431 | }; | 386 | }; |
432 | 387 | ||
@@ -435,12 +390,12 @@ struct fc_logo_s { | |||
435 | */ | 390 | */ |
436 | struct fc_adisc_s { | 391 | struct fc_adisc_s { |
437 | struct fc_els_cmd_s els_cmd; /* ELS command code */ | 392 | struct fc_els_cmd_s els_cmd; /* ELS command code */ |
438 | u32 res1:8; | 393 | u32 res1:8; |
439 | u32 orig_HA:24; /* originator hard address */ | 394 | u32 orig_HA:24; /* originator hard address */ |
440 | wwn_t orig_port_name; /* originator port name */ | 395 | wwn_t orig_port_name; /* originator port name */ |
441 | wwn_t orig_node_name; /* originator node name */ | 396 | wwn_t orig_node_name; /* originator node name */ |
442 | u32 res2:8; | 397 | u32 res2:8; |
443 | u32 nport_id:24; /* originator NPortID */ | 398 | u32 nport_id:24; /* originator NPortID */ |
444 | }; | 399 | }; |
445 | 400 | ||
446 | /* | 401 | /* |
@@ -466,7 +421,7 @@ struct fc_exch_status_blk_s { | |||
466 | struct fc_res_s { | 421 | struct fc_res_s { |
467 | struct fc_els_cmd_s els_cmd; /* ELS command code */ | 422 | struct fc_els_cmd_s els_cmd; /* ELS command code */ |
468 | u32 res1:8; | 423 | u32 res1:8; |
469 | u32 nport_id:24; /* N_Port identifier of source */ | 424 | u32 nport_id:24; /* N_Port identifier of source */ |
470 | u32 oxid:16; | 425 | u32 oxid:16; |
471 | u32 rxid:16; | 426 | u32 rxid:16; |
472 | u8 assoc_hdr[32]; | 427 | u8 assoc_hdr[32]; |
@@ -512,8 +467,8 @@ struct fc_rec_acc_s { | |||
512 | u32 orig_id:24; /* N_Port id of exchange originator */ | 467 | u32 orig_id:24; /* N_Port id of exchange originator */ |
513 | u32 res2:8; | 468 | u32 res2:8; |
514 | u32 resp_id:24; /* N_Port id of exchange responder */ | 469 | u32 resp_id:24; /* N_Port id of exchange responder */ |
515 | u32 count; /* data transfer count */ | 470 | u32 count; /* data transfer count */ |
516 | u32 e_stat; /* exchange status */ | 471 | u32 e_stat; /* exchange status */ |
517 | }; | 472 | }; |
518 | 473 | ||
519 | /* | 474 | /* |
@@ -533,7 +488,7 @@ struct fc_rsi_s { | |||
533 | */ | 488 | */ |
534 | struct fc_prli_params_s { | 489 | struct fc_prli_params_s { |
535 | u32 reserved:16; | 490 | u32 reserved:16; |
536 | #ifdef __BIGENDIAN | 491 | #ifdef __BIG_ENDIAN |
537 | u32 reserved1:5; | 492 | u32 reserved1:5; |
538 | u32 rec_support:1; | 493 | u32 rec_support:1; |
539 | u32 task_retry_id:1; | 494 | u32 task_retry_id:1; |
@@ -575,7 +530,7 @@ enum { | |||
575 | struct fc_prli_params_page_s { | 530 | struct fc_prli_params_page_s { |
576 | u32 type:8; | 531 | u32 type:8; |
577 | u32 codext:8; | 532 | u32 codext:8; |
578 | #ifdef __BIGENDIAN | 533 | #ifdef __BIG_ENDIAN |
579 | u32 origprocasv:1; | 534 | u32 origprocasv:1; |
580 | u32 rsppav:1; | 535 | u32 rsppav:1; |
581 | u32 imagepair:1; | 536 | u32 imagepair:1; |
@@ -611,18 +566,14 @@ struct fc_prli_s { | |||
611 | struct fc_prlo_params_page_s { | 566 | struct fc_prlo_params_page_s { |
612 | u32 type:8; | 567 | u32 type:8; |
613 | u32 type_ext:8; | 568 | u32 type_ext:8; |
614 | #ifdef __BIGENDIAN | 569 | #ifdef __BIG_ENDIAN |
615 | u32 opa_valid:1; /* originator process associator | 570 | u32 opa_valid:1; /* originator process associator valid */ |
616 | * valid | ||
617 | */ | ||
618 | u32 rpa_valid:1; /* responder process associator valid */ | 571 | u32 rpa_valid:1; /* responder process associator valid */ |
619 | u32 res1:14; | 572 | u32 res1:14; |
620 | #else | 573 | #else |
621 | u32 res1:14; | 574 | u32 res1:14; |
622 | u32 rpa_valid:1; /* responder process associator valid */ | 575 | u32 rpa_valid:1; /* responder process associator valid */ |
623 | u32 opa_valid:1; /* originator process associator | 576 | u32 opa_valid:1; /* originator process associator valid */ |
624 | * valid | ||
625 | */ | ||
626 | #endif | 577 | #endif |
627 | u32 orig_process_assc; | 578 | u32 orig_process_assc; |
628 | u32 resp_process_assc; | 579 | u32 resp_process_assc; |
@@ -647,18 +598,14 @@ struct fc_prlo_acc_params_page_s { | |||
647 | u32 type:8; | 598 | u32 type:8; |
648 | u32 type_ext:8; | 599 | u32 type_ext:8; |
649 | 600 | ||
650 | #ifdef __BIGENDIAN | 601 | #ifdef __BIG_ENDIAN |
651 | u32 opa_valid:1; /* originator process associator | 602 | u32 opa_valid:1; /* originator process associator valid */ |
652 | * valid | ||
653 | */ | ||
654 | u32 rpa_valid:1; /* responder process associator valid */ | 603 | u32 rpa_valid:1; /* responder process associator valid */ |
655 | u32 res1:14; | 604 | u32 res1:14; |
656 | #else | 605 | #else |
657 | u32 res1:14; | 606 | u32 res1:14; |
658 | u32 rpa_valid:1; /* responder process associator valid */ | 607 | u32 rpa_valid:1; /* responder process associator valid */ |
659 | u32 opa_valid:1; /* originator process associator | 608 | u32 opa_valid:1; /* originator process associator valid */ |
660 | * valid | ||
661 | */ | ||
662 | #endif | 609 | #endif |
663 | u32 orig_process_assc; | 610 | u32 orig_process_assc; |
664 | u32 resp_process_assc; | 611 | u32 resp_process_assc; |
@@ -715,9 +662,9 @@ enum { | |||
715 | * LS_RJT els reply payload | 662 | * LS_RJT els reply payload |
716 | */ | 663 | */ |
717 | struct fc_ls_rjt_s { | 664 | struct fc_ls_rjt_s { |
718 | struct fc_els_cmd_s els_cmd; /* ELS command code */ | 665 | struct fc_els_cmd_s els_cmd; /* ELS command code */ |
719 | u32 res1:8; | 666 | u32 res1:8; |
720 | u32 reason_code:8; /* Reason code for reject */ | 667 | u32 reason_code:8; /* Reason code for reject */ |
721 | u32 reason_code_expl:8; /* Reason code explanation */ | 668 | u32 reason_code_expl:8; /* Reason code explanation */ |
722 | u32 vendor_unique:8; /* Vendor specific */ | 669 | u32 vendor_unique:8; /* Vendor specific */ |
723 | }; | 670 | }; |
@@ -779,12 +726,12 @@ struct fc_rrq_s { | |||
779 | */ | 726 | */ |
780 | struct fc_ba_acc_s { | 727 | struct fc_ba_acc_s { |
781 | u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ | 728 | u32 seq_id_valid:8; /* set to 0x00 for Abort Exchange */ |
782 | u32 seq_id:8; /* invalid for Abort Exchange */ | 729 | u32 seq_id:8; /* invalid for Abort Exchange */ |
783 | u32 res2:16; | 730 | u32 res2:16; |
784 | u32 ox_id:16; /* OX_ID from ABTS frame */ | 731 | u32 ox_id:16; /* OX_ID from ABTS frame */ |
785 | u32 rx_id:16; /* RX_ID from ABTS frame */ | 732 | u32 rx_id:16; /* RX_ID from ABTS frame */ |
786 | u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */ | 733 | u32 low_seq_cnt:16; /* set to 0x0000 for Abort Exchange */ |
787 | u32 high_seq_cnt:16;/* set to 0xFFFF for Abort Exchange */ | 734 | u32 high_seq_cnt:16; /* set to 0xFFFF for Abort Exchange */ |
788 | }; | 735 | }; |
789 | 736 | ||
790 | /* | 737 | /* |
@@ -794,17 +741,17 @@ struct fc_ba_rjt_s { | |||
794 | u32 res1:8; /* Reserved */ | 741 | u32 res1:8; /* Reserved */ |
795 | u32 reason_code:8; /* reason code for reject */ | 742 | u32 reason_code:8; /* reason code for reject */ |
796 | u32 reason_expl:8; /* reason code explanation */ | 743 | u32 reason_expl:8; /* reason code explanation */ |
797 | u32 vendor_unique:8;/* vendor unique reason code,set to 0 */ | 744 | u32 vendor_unique:8; /* vendor unique reason code,set to 0 */ |
798 | }; | 745 | }; |
799 | 746 | ||
800 | /* | 747 | /* |
801 | * TPRLO logout parameter page | 748 | * TPRLO logout parameter page |
802 | */ | 749 | */ |
803 | struct fc_tprlo_params_page_s { | 750 | struct fc_tprlo_params_page_s { |
804 | u32 type:8; | 751 | u32 type:8; |
805 | u32 type_ext:8; | 752 | u32 type_ext:8; |
806 | 753 | ||
807 | #ifdef __BIGENDIAN | 754 | #ifdef __BIG_ENDIAN |
808 | u32 opa_valid:1; | 755 | u32 opa_valid:1; |
809 | u32 rpa_valid:1; | 756 | u32 rpa_valid:1; |
810 | u32 tpo_nport_valid:1; | 757 | u32 tpo_nport_valid:1; |
@@ -864,16 +811,16 @@ enum fc_rscn_format { | |||
864 | }; | 811 | }; |
865 | 812 | ||
866 | struct fc_rscn_event_s { | 813 | struct fc_rscn_event_s { |
867 | u32 format:2; | 814 | u32 format:2; |
868 | u32 qualifier:4; | 815 | u32 qualifier:4; |
869 | u32 resvd:2; | 816 | u32 resvd:2; |
870 | u32 portid:24; | 817 | u32 portid:24; |
871 | }; | 818 | }; |
872 | 819 | ||
873 | struct fc_rscn_pl_s { | 820 | struct fc_rscn_pl_s { |
874 | u8 command; | 821 | u8 command; |
875 | u8 pagelen; | 822 | u8 pagelen; |
876 | u16 payldlen; | 823 | __be16 payldlen; |
877 | struct fc_rscn_event_s event[1]; | 824 | struct fc_rscn_event_s event[1]; |
878 | }; | 825 | }; |
879 | 826 | ||
@@ -887,7 +834,6 @@ struct fc_echo_s { | |||
887 | /* | 834 | /* |
888 | * RNID els command | 835 | * RNID els command |
889 | */ | 836 | */ |
890 | |||
891 | #define RNID_NODEID_DATA_FORMAT_COMMON 0x00 | 837 | #define RNID_NODEID_DATA_FORMAT_COMMON 0x00 |
892 | #define RNID_NODEID_DATA_FORMAT_FCP3 0x08 | 838 | #define RNID_NODEID_DATA_FORMAT_FCP3 0x08 |
893 | #define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF | 839 | #define RNID_NODEID_DATA_FORMAT_DISCOVERY 0xDF |
@@ -920,15 +866,15 @@ struct fc_rnid_cmd_s { | |||
920 | */ | 866 | */ |
921 | 867 | ||
922 | struct fc_rnid_common_id_data_s { | 868 | struct fc_rnid_common_id_data_s { |
923 | wwn_t port_name; | 869 | wwn_t port_name; |
924 | wwn_t node_name; | 870 | wwn_t node_name; |
925 | }; | 871 | }; |
926 | 872 | ||
927 | struct fc_rnid_general_topology_data_s { | 873 | struct fc_rnid_general_topology_data_s { |
928 | u32 vendor_unique[4]; | 874 | u32 vendor_unique[4]; |
929 | u32 asso_type; | 875 | __be32 asso_type; |
930 | u32 phy_port_num; | 876 | u32 phy_port_num; |
931 | u32 num_attached_nodes; | 877 | __be32 num_attached_nodes; |
932 | u32 node_mgmt:8; | 878 | u32 node_mgmt:8; |
933 | u32 ip_version:8; | 879 | u32 ip_version:8; |
934 | u32 udp_tcp_port_num:16; | 880 | u32 udp_tcp_port_num:16; |
@@ -980,59 +926,17 @@ enum fc_rpsc_op_speed { | |||
980 | RPSC_OP_SPEED_8G = 0x0800, | 926 | RPSC_OP_SPEED_8G = 0x0800, |
981 | RPSC_OP_SPEED_16G = 0x0400, | 927 | RPSC_OP_SPEED_16G = 0x0400, |
982 | 928 | ||
983 | RPSC_OP_SPEED_NOT_EST = 0x0001, /*! speed not established */ | 929 | RPSC_OP_SPEED_NOT_EST = 0x0001, /* speed not established */ |
984 | }; | 930 | }; |
985 | 931 | ||
986 | struct fc_rpsc_speed_info_s { | 932 | struct fc_rpsc_speed_info_s { |
987 | u16 port_speed_cap; /*! see enum fc_rpsc_speed_cap */ | 933 | __be16 port_speed_cap; /* see enum fc_rpsc_speed_cap */ |
988 | u16 port_op_speed; /*! see enum fc_rpsc_op_speed */ | 934 | __be16 port_op_speed; /* see enum fc_rpsc_op_speed */ |
989 | }; | ||
990 | |||
991 | enum link_e2e_beacon_subcmd { | ||
992 | LINK_E2E_BEACON_ON = 1, | ||
993 | LINK_E2E_BEACON_OFF = 2 | ||
994 | }; | ||
995 | |||
996 | enum beacon_type { | ||
997 | BEACON_TYPE_NORMAL = 1, /*! Normal Beaconing. Green */ | ||
998 | BEACON_TYPE_WARN = 2, /*! Warning Beaconing. Yellow/Amber */ | ||
999 | BEACON_TYPE_CRITICAL = 3 /*! Critical Beaconing. Red */ | ||
1000 | }; | ||
1001 | |||
1002 | struct link_e2e_beacon_param_s { | ||
1003 | u8 beacon_type; /* Beacon Type. See enum beacon_type */ | ||
1004 | u8 beacon_frequency; | ||
1005 | /* Beacon frequency. Number of blinks | ||
1006 | * per 10 seconds | ||
1007 | */ | ||
1008 | u16 beacon_duration;/* Beacon duration (in Seconds). The | ||
1009 | * command operation should be | ||
1010 | * terminated at the end of this | ||
1011 | * timeout value. | ||
1012 | * | ||
1013 | * Ignored if diag_sub_cmd is | ||
1014 | * LINK_E2E_BEACON_OFF. | ||
1015 | * | ||
1016 | * If 0, beaconing will continue till a | ||
1017 | * BEACON OFF request is received | ||
1018 | */ | ||
1019 | }; | ||
1020 | |||
1021 | /* | ||
1022 | * Link E2E beacon request/good response format. | ||
1023 | * For LS_RJTs use struct fc_ls_rjt_s | ||
1024 | */ | ||
1025 | struct link_e2e_beacon_req_s { | ||
1026 | u32 ls_code; /*! FC_ELS_E2E_LBEACON in requests * | ||
1027 | *or FC_ELS_ACC in good replies */ | ||
1028 | u32 ls_sub_cmd; /*! See enum link_e2e_beacon_subcmd */ | ||
1029 | struct link_e2e_beacon_param_s beacon_parm; | ||
1030 | }; | 935 | }; |
1031 | 936 | ||
1032 | /* | 937 | /* |
1033 | * If RPSC request is sent to the Domain Controller, the request is for | 938 | * If RPSC request is sent to the Domain Controller, the request is for |
1034 | * all the ports within that domain (TODO - I don't think FOS implements | 939 | * all the ports within that domain. |
1035 | * this...). | ||
1036 | */ | 940 | */ |
1037 | struct fc_rpsc_cmd_s { | 941 | struct fc_rpsc_cmd_s { |
1038 | struct fc_els_cmd_s els_cmd; | 942 | struct fc_els_cmd_s els_cmd; |
@@ -1056,9 +960,9 @@ struct fc_rpsc_acc_s { | |||
1056 | 960 | ||
1057 | struct fc_rpsc2_cmd_s { | 961 | struct fc_rpsc2_cmd_s { |
1058 | struct fc_els_cmd_s els_cmd; | 962 | struct fc_els_cmd_s els_cmd; |
1059 | u32 token; | 963 | __be32 token; |
1060 | u16 resvd; | 964 | u16 resvd; |
1061 | u16 num_pids; /* Number of pids in the request */ | 965 | __be16 num_pids; /* Number of pids in the request */ |
1062 | struct { | 966 | struct { |
1063 | u32 rsvd1:8; | 967 | u32 rsvd1:8; |
1064 | u32 pid:24; /* port identifier */ | 968 | u32 pid:24; /* port identifier */ |
@@ -1072,16 +976,17 @@ enum fc_rpsc2_port_type { | |||
1072 | RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, | 976 | RPSC2_PORT_TYPE_NPIV_PORT = 0x5f, |
1073 | RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, | 977 | RPSC2_PORT_TYPE_NPORT_TRUNK = 0x6f, |
1074 | }; | 978 | }; |
979 | |||
1075 | /* | 980 | /* |
1076 | * RPSC2 portInfo entry structure | 981 | * RPSC2 portInfo entry structure |
1077 | */ | 982 | */ |
1078 | struct fc_rpsc2_port_info_s { | 983 | struct fc_rpsc2_port_info_s { |
1079 | u32 pid; /* PID */ | 984 | __be32 pid; /* PID */ |
1080 | u16 resvd1; | 985 | u16 resvd1; |
1081 | u16 index; /* port number / index */ | 986 | __be16 index; /* port number / index */ |
1082 | u8 resvd2; | 987 | u8 resvd2; |
1083 | u8 type; /* port type N/NL/... */ | 988 | u8 type; /* port type N/NL/... */ |
1084 | u16 speed; /* port Operating Speed */ | 989 | __be16 speed; /* port Operating Speed */ |
1085 | }; | 990 | }; |
1086 | 991 | ||
1087 | /* | 992 | /* |
@@ -1090,8 +995,8 @@ struct fc_rpsc2_port_info_s { | |||
1090 | struct fc_rpsc2_acc_s { | 995 | struct fc_rpsc2_acc_s { |
1091 | u8 els_cmd; | 996 | u8 els_cmd; |
1092 | u8 resvd; | 997 | u8 resvd; |
1093 | u16 num_pids; /* Number of pids in the request */ | 998 | __be16 num_pids; /* Number of pids in the request */ |
1094 | struct fc_rpsc2_port_info_s port_info[1]; /* port information */ | 999 | struct fc_rpsc2_port_info_s port_info[1]; /* port information */ |
1095 | }; | 1000 | }; |
1096 | 1001 | ||
1097 | /* | 1002 | /* |
@@ -1110,18 +1015,14 @@ struct fc_symname_s { | |||
1110 | u8 symname[FC_SYMNAME_MAX]; | 1015 | u8 symname[FC_SYMNAME_MAX]; |
1111 | }; | 1016 | }; |
1112 | 1017 | ||
1113 | struct fc_alpabm_s { | ||
1114 | u8 alpa_bm[FC_ALPA_MAX / 8]; | ||
1115 | }; | ||
1116 | |||
1117 | /* | 1018 | /* |
1118 | * protocol default timeout values | 1019 | * protocol default timeout values |
1119 | */ | 1020 | */ |
1120 | #define FC_ED_TOV 2 | 1021 | #define FC_ED_TOV 2 |
1121 | #define FC_REC_TOV (FC_ED_TOV + 1) | 1022 | #define FC_REC_TOV (FC_ED_TOV + 1) |
1122 | #define FC_RA_TOV 10 | 1023 | #define FC_RA_TOV 10 |
1123 | #define FC_ELS_TOV (2 * FC_RA_TOV) | 1024 | #define FC_ELS_TOV (2 * FC_RA_TOV) |
1124 | #define FC_FCCT_TOV (3 * FC_RA_TOV) | 1025 | #define FC_FCCT_TOV (3 * FC_RA_TOV) |
1125 | 1026 | ||
1126 | /* | 1027 | /* |
1127 | * virtual fabric related defines | 1028 | * virtual fabric related defines |
@@ -1157,50 +1058,34 @@ enum { | |||
1157 | }; | 1058 | }; |
1158 | 1059 | ||
1159 | /* | 1060 | /* |
1160 | * SRR FC-4 LS payload | ||
1161 | */ | ||
1162 | struct fc_srr_s { | ||
1163 | u32 ls_cmd; | ||
1164 | u32 ox_id:16; /* ox-id */ | ||
1165 | u32 rx_id:16; /* rx-id */ | ||
1166 | u32 ro; /* relative offset */ | ||
1167 | u32 r_ctl:8; /* R_CTL for I.U. */ | ||
1168 | u32 res:24; | ||
1169 | }; | ||
1170 | |||
1171 | |||
1172 | /* | ||
1173 | * FCP_CMND definitions | 1061 | * FCP_CMND definitions |
1174 | */ | 1062 | */ |
1175 | #define FCP_CMND_CDB_LEN 16 | 1063 | #define FCP_CMND_CDB_LEN 16 |
1176 | #define FCP_CMND_LUN_LEN 8 | 1064 | #define FCP_CMND_LUN_LEN 8 |
1177 | 1065 | ||
1178 | struct fcp_cmnd_s { | 1066 | struct fcp_cmnd_s { |
1179 | lun_t lun; /* 64-bit LU number */ | 1067 | struct scsi_lun lun; /* 64-bit LU number */ |
1180 | u8 crn; /* command reference number */ | 1068 | u8 crn; /* command reference number */ |
1181 | #ifdef __BIGENDIAN | 1069 | #ifdef __BIG_ENDIAN |
1182 | u8 resvd:1, | 1070 | u8 resvd:1, |
1183 | priority:4, /* FCP-3: SAM-3 priority */ | 1071 | priority:4, /* FCP-3: SAM-3 priority */ |
1184 | taskattr:3; /* scsi task attribute */ | 1072 | taskattr:3; /* scsi task attribute */ |
1185 | #else | 1073 | #else |
1186 | u8 taskattr:3, /* scsi task attribute */ | 1074 | u8 taskattr:3, /* scsi task attribute */ |
1187 | priority:4, /* FCP-3: SAM-3 priority */ | 1075 | priority:4, /* FCP-3: SAM-3 priority */ |
1188 | resvd:1; | 1076 | resvd:1; |
1189 | #endif | 1077 | #endif |
1190 | u8 tm_flags; /* task management flags */ | 1078 | u8 tm_flags; /* task management flags */ |
1191 | #ifdef __BIGENDIAN | 1079 | #ifdef __BIG_ENDIAN |
1192 | u8 addl_cdb_len:6, /* additional CDB length words */ | 1080 | u8 addl_cdb_len:6, /* additional CDB length words */ |
1193 | iodir:2; /* read/write FCP_DATA IUs */ | 1081 | iodir:2; /* read/write FCP_DATA IUs */ |
1194 | #else | 1082 | #else |
1195 | u8 iodir:2, /* read/write FCP_DATA IUs */ | 1083 | u8 iodir:2, /* read/write FCP_DATA IUs */ |
1196 | addl_cdb_len:6; /* additional CDB length */ | 1084 | addl_cdb_len:6; /* additional CDB length */ |
1197 | #endif | 1085 | #endif |
1198 | scsi_cdb_t cdb; | 1086 | struct scsi_cdb_s cdb; |
1199 | 1087 | ||
1200 | /* | 1088 | __be32 fcp_dl; /* bytes to be transferred */ |
1201 | * !!! additional cdb bytes follows here!!! | ||
1202 | */ | ||
1203 | u32 fcp_dl; /* bytes to be transferred */ | ||
1204 | }; | 1089 | }; |
1205 | 1090 | ||
1206 | #define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN) | 1091 | #define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN) |
@@ -1210,21 +1095,10 @@ struct fcp_cmnd_s { | |||
1210 | * struct fcp_cmnd_s .iodir field values | 1095 | * struct fcp_cmnd_s .iodir field values |
1211 | */ | 1096 | */ |
1212 | enum fcp_iodir { | 1097 | enum fcp_iodir { |
1213 | FCP_IODIR_NONE = 0, | 1098 | FCP_IODIR_NONE = 0, |
1214 | FCP_IODIR_WRITE = 1, | 1099 | FCP_IODIR_WRITE = 1, |
1215 | FCP_IODIR_READ = 2, | 1100 | FCP_IODIR_READ = 2, |
1216 | FCP_IODIR_RW = 3, | 1101 | FCP_IODIR_RW = 3, |
1217 | }; | ||
1218 | |||
1219 | /* | ||
1220 | * Task attribute field | ||
1221 | */ | ||
1222 | enum { | ||
1223 | FCP_TASK_ATTR_SIMPLE = 0, | ||
1224 | FCP_TASK_ATTR_HOQ = 1, | ||
1225 | FCP_TASK_ATTR_ORDERED = 2, | ||
1226 | FCP_TASK_ATTR_ACA = 4, | ||
1227 | FCP_TASK_ATTR_UNTAGGED = 5, /* obsolete in FCP-3 */ | ||
1228 | }; | 1102 | }; |
1229 | 1103 | ||
1230 | /* | 1104 | /* |
@@ -1239,58 +1113,40 @@ enum fcp_tm_cmnd { | |||
1239 | }; | 1113 | }; |
1240 | 1114 | ||
1241 | /* | 1115 | /* |
1242 | * FCP_XFER_RDY IU defines | ||
1243 | */ | ||
1244 | struct fcp_xfer_rdy_s { | ||
1245 | u32 data_ro; | ||
1246 | u32 burst_len; | ||
1247 | u32 reserved; | ||
1248 | }; | ||
1249 | |||
1250 | /* | ||
1251 | * FCP_RSP residue flags | 1116 | * FCP_RSP residue flags |
1252 | */ | 1117 | */ |
1253 | enum fcp_residue { | 1118 | enum fcp_residue { |
1254 | FCP_NO_RESIDUE = 0, /* no residue */ | 1119 | FCP_NO_RESIDUE = 0, /* no residue */ |
1255 | FCP_RESID_OVER = 1, /* more data left that was not sent */ | 1120 | FCP_RESID_OVER = 1, /* more data left that was not sent */ |
1256 | FCP_RESID_UNDER = 2, /* less data than requested */ | 1121 | FCP_RESID_UNDER = 2, /* less data than requested */ |
1257 | }; | ||
1258 | |||
1259 | enum { | ||
1260 | FCP_RSPINFO_GOOD = 0, | ||
1261 | FCP_RSPINFO_DATALEN_MISMATCH = 1, | ||
1262 | FCP_RSPINFO_CMND_INVALID = 2, | ||
1263 | FCP_RSPINFO_ROLEN_MISMATCH = 3, | ||
1264 | FCP_RSPINFO_TM_NOT_SUPP = 4, | ||
1265 | FCP_RSPINFO_TM_FAILED = 5, | ||
1266 | }; | 1122 | }; |
1267 | 1123 | ||
1268 | struct fcp_rspinfo_s { | 1124 | struct fcp_rspinfo_s { |
1269 | u32 res0:24; | 1125 | u32 res0:24; |
1270 | u32 rsp_code:8; /* response code (as above) */ | 1126 | u32 rsp_code:8; /* response code (as above) */ |
1271 | u32 res1; | 1127 | u32 res1; |
1272 | }; | 1128 | }; |
1273 | 1129 | ||
1274 | struct fcp_resp_s { | 1130 | struct fcp_resp_s { |
1275 | u32 reserved[2]; /* 2 words reserved */ | 1131 | u32 reserved[2]; /* 2 words reserved */ |
1276 | u16 reserved2; | 1132 | u16 reserved2; |
1277 | #ifdef __BIGENDIAN | 1133 | #ifdef __BIG_ENDIAN |
1278 | u8 reserved3:3; | 1134 | u8 reserved3:3; |
1279 | u8 fcp_conf_req:1; /* FCP_CONF is requested */ | 1135 | u8 fcp_conf_req:1; /* FCP_CONF is requested */ |
1280 | u8 resid_flags:2; /* underflow/overflow */ | 1136 | u8 resid_flags:2; /* underflow/overflow */ |
1281 | u8 sns_len_valid:1;/* sense len is valid */ | 1137 | u8 sns_len_valid:1; /* sense len is valid */ |
1282 | u8 rsp_len_valid:1;/* response len is valid */ | 1138 | u8 rsp_len_valid:1; /* response len is valid */ |
1283 | #else | 1139 | #else |
1284 | u8 rsp_len_valid:1;/* response len is valid */ | 1140 | u8 rsp_len_valid:1; /* response len is valid */ |
1285 | u8 sns_len_valid:1;/* sense len is valid */ | 1141 | u8 sns_len_valid:1; /* sense len is valid */ |
1286 | u8 resid_flags:2; /* underflow/overflow */ | 1142 | u8 resid_flags:2; /* underflow/overflow */ |
1287 | u8 fcp_conf_req:1; /* FCP_CONF is requested */ | 1143 | u8 fcp_conf_req:1; /* FCP_CONF is requested */ |
1288 | u8 reserved3:3; | 1144 | u8 reserved3:3; |
1289 | #endif | 1145 | #endif |
1290 | u8 scsi_status; /* one byte SCSI status */ | 1146 | u8 scsi_status; /* one byte SCSI status */ |
1291 | u32 residue; /* residual data bytes */ | 1147 | u32 residue; /* residual data bytes */ |
1292 | u32 sns_len; /* length od sense info */ | 1148 | u32 sns_len; /* length od sense info */ |
1293 | u32 rsp_len; /* length of response info */ | 1149 | u32 rsp_len; /* length of response info */ |
1294 | }; | 1150 | }; |
1295 | 1151 | ||
1296 | #define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \ | 1152 | #define fcp_snslen(__fcprsp) ((__fcprsp)->sns_len_valid ? \ |
@@ -1300,12 +1156,6 @@ struct fcp_resp_s { | |||
1300 | #define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1)) | 1156 | #define fcp_rspinfo(__fcprsp) ((struct fcp_rspinfo_s *)((__fcprsp) + 1)) |
1301 | #define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \ | 1157 | #define fcp_snsinfo(__fcprsp) (((u8 *)fcp_rspinfo(__fcprsp)) + \ |
1302 | fcp_rsplen(__fcprsp)) | 1158 | fcp_rsplen(__fcprsp)) |
1303 | |||
1304 | struct fcp_cmnd_fr_s { | ||
1305 | struct fchs_s fchs; | ||
1306 | struct fcp_cmnd_s fcp; | ||
1307 | }; | ||
1308 | |||
1309 | /* | 1159 | /* |
1310 | * CT | 1160 | * CT |
1311 | */ | 1161 | */ |
@@ -1379,7 +1229,7 @@ enum { | |||
1379 | CT_RSN_LOGICAL_BUSY = 0x05, | 1229 | CT_RSN_LOGICAL_BUSY = 0x05, |
1380 | CT_RSN_PROTO_ERR = 0x07, | 1230 | CT_RSN_PROTO_ERR = 0x07, |
1381 | CT_RSN_UNABLE_TO_PERF = 0x09, | 1231 | CT_RSN_UNABLE_TO_PERF = 0x09, |
1382 | CT_RSN_NOT_SUPP = 0x0B, | 1232 | CT_RSN_NOT_SUPP = 0x0B, |
1383 | CT_RSN_SERVER_NOT_AVBL = 0x0D, | 1233 | CT_RSN_SERVER_NOT_AVBL = 0x0D, |
1384 | CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E, | 1234 | CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E, |
1385 | CT_RSN_VENDOR_SPECIFIC = 0xFF, | 1235 | CT_RSN_VENDOR_SPECIFIC = 0xFF, |
@@ -1419,10 +1269,10 @@ enum { | |||
1419 | * defintions for the explanation code for all servers | 1269 | * defintions for the explanation code for all servers |
1420 | */ | 1270 | */ |
1421 | enum { | 1271 | enum { |
1422 | CT_EXP_AUTH_EXCEPTION = 0xF1, | 1272 | CT_EXP_AUTH_EXCEPTION = 0xF1, |
1423 | CT_EXP_DB_FULL = 0xF2, | 1273 | CT_EXP_DB_FULL = 0xF2, |
1424 | CT_EXP_DB_EMPTY = 0xF3, | 1274 | CT_EXP_DB_EMPTY = 0xF3, |
1425 | CT_EXP_PROCESSING_REQ = 0xF4, | 1275 | CT_EXP_PROCESSING_REQ = 0xF4, |
1426 | CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5, | 1276 | CT_EXP_UNABLE_TO_VERIFY_CONN = 0xF5, |
1427 | CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6 | 1277 | CT_EXP_DEVICES_NOT_IN_CMN_ZONE = 0xF6 |
1428 | }; | 1278 | }; |
@@ -1446,7 +1296,7 @@ enum { | |||
1446 | GS_RFF_ID = 0x021F, /* Register FC4 Feature */ | 1296 | GS_RFF_ID = 0x021F, /* Register FC4 Feature */ |
1447 | }; | 1297 | }; |
1448 | 1298 | ||
1449 | struct fcgs_id_req_s{ | 1299 | struct fcgs_id_req_s { |
1450 | u32 rsvd:8; | 1300 | u32 rsvd:8; |
1451 | u32 dap:24; /* port identifier */ | 1301 | u32 dap:24; /* port identifier */ |
1452 | }; | 1302 | }; |
@@ -1460,7 +1310,7 @@ struct fcgs_gidpn_req_s { | |||
1460 | 1310 | ||
1461 | struct fcgs_gidpn_resp_s { | 1311 | struct fcgs_gidpn_resp_s { |
1462 | u32 rsvd:8; | 1312 | u32 rsvd:8; |
1463 | u32 dap:24; /* port identifier */ | 1313 | u32 dap:24; /* port identifier */ |
1464 | }; | 1314 | }; |
1465 | 1315 | ||
1466 | /* | 1316 | /* |
@@ -1469,22 +1319,21 @@ struct fcgs_gidpn_resp_s { | |||
1469 | struct fcgs_rftid_req_s { | 1319 | struct fcgs_rftid_req_s { |
1470 | u32 rsvd:8; | 1320 | u32 rsvd:8; |
1471 | u32 dap:24; /* port identifier */ | 1321 | u32 dap:24; /* port identifier */ |
1472 | u32 fc4_type[8]; /* fc4 types */ | 1322 | __be32 fc4_type[8]; /* fc4 types */ |
1473 | }; | 1323 | }; |
1474 | 1324 | ||
1475 | /* | 1325 | /* |
1476 | * RFF_ID : Register FC4 features. | 1326 | * RFF_ID : Register FC4 features. |
1477 | */ | 1327 | */ |
1478 | |||
1479 | #define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02 | 1328 | #define FC_GS_FCP_FC4_FEATURE_INITIATOR 0x02 |
1480 | #define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 | 1329 | #define FC_GS_FCP_FC4_FEATURE_TARGET 0x01 |
1481 | 1330 | ||
1482 | struct fcgs_rffid_req_s { | 1331 | struct fcgs_rffid_req_s { |
1483 | u32 rsvd:8; | 1332 | u32 rsvd:8; |
1484 | u32 dap:24; /* port identifier */ | 1333 | u32 dap:24; /* port identifier */ |
1485 | u32 rsvd1:16; | 1334 | u32 rsvd1:16; |
1486 | u32 fc4ftr_bits:8; /* fc4 feature bits */ | 1335 | u32 fc4ftr_bits:8; /* fc4 feature bits */ |
1487 | u32 fc4_type:8; /* corresponding FC4 Type */ | 1336 | u32 fc4_type:8; /* corresponding FC4 Type */ |
1488 | }; | 1337 | }; |
1489 | 1338 | ||
1490 | /* | 1339 | /* |
@@ -1495,16 +1344,16 @@ struct fcgs_gidft_req_s { | |||
1495 | u8 domain_id; /* domain, 0 - all fabric */ | 1344 | u8 domain_id; /* domain, 0 - all fabric */ |
1496 | u8 area_id; /* area, 0 - whole domain */ | 1345 | u8 area_id; /* area, 0 - whole domain */ |
1497 | u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ | 1346 | u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ |
1498 | }; /* GID_FT Request */ | 1347 | }; |
1499 | 1348 | ||
1500 | /* | 1349 | /* |
1501 | * GID_FT Response | 1350 | * GID_FT Response |
1502 | */ | 1351 | */ |
1503 | struct fcgs_gidft_resp_s { | 1352 | struct fcgs_gidft_resp_s { |
1504 | u8 last:1; /* last port identifier flag */ | 1353 | u8 last:1; /* last port identifier flag */ |
1505 | u8 reserved:7; | 1354 | u8 reserved:7; |
1506 | u32 pid:24; /* port identifier */ | 1355 | u32 pid:24; /* port identifier */ |
1507 | }; /* GID_FT Response */ | 1356 | }; |
1508 | 1357 | ||
1509 | /* | 1358 | /* |
1510 | * RSPN_ID | 1359 | * RSPN_ID |
@@ -1512,8 +1361,8 @@ struct fcgs_gidft_resp_s { | |||
1512 | struct fcgs_rspnid_req_s { | 1361 | struct fcgs_rspnid_req_s { |
1513 | u32 rsvd:8; | 1362 | u32 rsvd:8; |
1514 | u32 dap:24; /* port identifier */ | 1363 | u32 dap:24; /* port identifier */ |
1515 | u8 spn_len; /* symbolic port name length */ | 1364 | u8 spn_len; /* symbolic port name length */ |
1516 | u8 spn[256]; /* symbolic port name */ | 1365 | u8 spn[256]; /* symbolic port name */ |
1517 | }; | 1366 | }; |
1518 | 1367 | ||
1519 | /* | 1368 | /* |
@@ -1522,7 +1371,7 @@ struct fcgs_rspnid_req_s { | |||
1522 | struct fcgs_rpnid_req_s { | 1371 | struct fcgs_rpnid_req_s { |
1523 | u32 rsvd:8; | 1372 | u32 rsvd:8; |
1524 | u32 port_id:24; | 1373 | u32 port_id:24; |
1525 | wwn_t port_name; | 1374 | wwn_t port_name; |
1526 | }; | 1375 | }; |
1527 | 1376 | ||
1528 | /* | 1377 | /* |
@@ -1531,7 +1380,7 @@ struct fcgs_rpnid_req_s { | |||
1531 | struct fcgs_rnnid_req_s { | 1380 | struct fcgs_rnnid_req_s { |
1532 | u32 rsvd:8; | 1381 | u32 rsvd:8; |
1533 | u32 port_id:24; | 1382 | u32 port_id:24; |
1534 | wwn_t node_name; | 1383 | wwn_t node_name; |
1535 | }; | 1384 | }; |
1536 | 1385 | ||
1537 | /* | 1386 | /* |
@@ -1565,8 +1414,8 @@ struct fcgs_ganxt_req_s { | |||
1565 | * GA_NXT Response | 1414 | * GA_NXT Response |
1566 | */ | 1415 | */ |
1567 | struct fcgs_ganxt_rsp_s { | 1416 | struct fcgs_ganxt_rsp_s { |
1568 | u32 port_type:8; /* Port Type */ | 1417 | u32 port_type:8; /* Port Type */ |
1569 | u32 port_id:24; /* Port Identifier */ | 1418 | u32 port_id:24; /* Port Identifier */ |
1570 | wwn_t port_name; /* Port Name */ | 1419 | wwn_t port_name; /* Port Name */ |
1571 | u8 spn_len; /* Length of Symbolic Port Name */ | 1420 | u8 spn_len; /* Length of Symbolic Port Name */ |
1572 | char spn[255]; /* Symbolic Port Name */ | 1421 | char spn[255]; /* Symbolic Port Name */ |
@@ -1575,19 +1424,14 @@ struct fcgs_ganxt_rsp_s { | |||
1575 | char snn[255]; /* Symbolic Node Name */ | 1424 | char snn[255]; /* Symbolic Node Name */ |
1576 | u8 ipa[8]; /* Initial Process Associator */ | 1425 | u8 ipa[8]; /* Initial Process Associator */ |
1577 | u8 ip[16]; /* IP Address */ | 1426 | u8 ip[16]; /* IP Address */ |
1578 | u32 cos; /* Class of Service */ | 1427 | u32 cos; /* Class of Service */ |
1579 | u32 fc4types[8]; /* FC-4 TYPEs */ | 1428 | u32 fc4types[8]; /* FC-4 TYPEs */ |
1580 | wwn_t fabric_port_name; | 1429 | wwn_t fabric_port_name; /* Fabric Port Name */ |
1581 | /* Fabric Port Name */ | 1430 | u32 rsvd:8; /* Reserved */ |
1582 | u32 rsvd:8; /* Reserved */ | 1431 | u32 hard_addr:24; /* Hard Address */ |
1583 | u32 hard_addr:24; /* Hard Address */ | ||
1584 | }; | 1432 | }; |
1585 | 1433 | ||
1586 | /* | 1434 | /* |
1587 | * Fabric Config Server | ||
1588 | */ | ||
1589 | |||
1590 | /* | ||
1591 | * Command codes for Fabric Configuration Server | 1435 | * Command codes for Fabric Configuration Server |
1592 | */ | 1436 | */ |
1593 | enum { | 1437 | enum { |
@@ -1598,159 +1442,9 @@ enum { | |||
1598 | }; | 1442 | }; |
1599 | 1443 | ||
1600 | /* | 1444 | /* |
1601 | * Source or Destination Port Tags. | ||
1602 | */ | ||
1603 | enum { | ||
1604 | GS_FTRACE_TAG_NPORT_ID = 1, | ||
1605 | GS_FTRACE_TAG_NPORT_NAME = 2, | ||
1606 | }; | ||
1607 | |||
1608 | /* | ||
1609 | * Port Value : Could be a Port id or wwn | ||
1610 | */ | ||
1611 | union fcgs_port_val_u { | ||
1612 | u32 nport_id; | ||
1613 | wwn_t nport_wwn; | ||
1614 | }; | ||
1615 | |||
1616 | #define GS_FTRACE_MAX_HOP_COUNT 20 | ||
1617 | #define GS_FTRACE_REVISION 1 | ||
1618 | |||
1619 | /* | ||
1620 | * Ftrace Related Structures. | ||
1621 | */ | ||
1622 | |||
1623 | /* | ||
1624 | * STR (Switch Trace) Reject Reason Codes. From FC-SW. | ||
1625 | */ | ||
1626 | enum { | ||
1627 | GS_FTRACE_STR_CMD_COMPLETED_SUCC = 0, | ||
1628 | GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH, | ||
1629 | GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH, | ||
1630 | GS_FTRACE_STR_MAX_HOP_CNT_REACHED, | ||
1631 | GS_FTRACE_STR_SRC_PORT_NOT_FOUND, | ||
1632 | GS_FTRACE_STR_DST_PORT_NOT_FOUND, | ||
1633 | GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE, | ||
1634 | GS_FTRACE_STR_NO_ROUTE_BW_PORTS, | ||
1635 | GS_FTRACE_STR_NO_ADDL_EXPLN, | ||
1636 | GS_FTRACE_STR_FABRIC_BUSY, | ||
1637 | GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS, | ||
1638 | GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0, | ||
1639 | GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff, | ||
1640 | }; | ||
1641 | |||
1642 | /* | ||
1643 | * Ftrace Request | ||
1644 | */ | ||
1645 | struct fcgs_ftrace_req_s { | ||
1646 | u32 revision; | ||
1647 | u16 src_port_tag; /* Source Port tag */ | ||
1648 | u16 src_port_len; /* Source Port len */ | ||
1649 | union fcgs_port_val_u src_port_val; /* Source Port value */ | ||
1650 | u16 dst_port_tag; /* Destination Port tag */ | ||
1651 | u16 dst_port_len; /* Destination Port len */ | ||
1652 | union fcgs_port_val_u dst_port_val; /* Destination Port value */ | ||
1653 | u32 token; | ||
1654 | u8 vendor_id[8]; /* T10 Vendor Identifier */ | ||
1655 | u8 vendor_info[8]; /* Vendor specific Info */ | ||
1656 | u32 max_hop_cnt; /* Max Hop Count */ | ||
1657 | }; | ||
1658 | |||
1659 | /* | ||
1660 | * Path info structure | ||
1661 | */ | ||
1662 | struct fcgs_ftrace_path_info_s { | ||
1663 | wwn_t switch_name; /* Switch WWN */ | ||
1664 | u32 domain_id; | ||
1665 | wwn_t ingress_port_name; /* Ingress ports wwn */ | ||
1666 | u32 ingress_phys_port_num; /* Ingress ports physical port | ||
1667 | * number | ||
1668 | */ | ||
1669 | wwn_t egress_port_name; /* Ingress ports wwn */ | ||
1670 | u32 egress_phys_port_num; /* Ingress ports physical port | ||
1671 | * number | ||
1672 | */ | ||
1673 | }; | ||
1674 | |||
1675 | /* | ||
1676 | * Ftrace Acc Response | ||
1677 | */ | ||
1678 | struct fcgs_ftrace_resp_s { | ||
1679 | u32 revision; | ||
1680 | u32 token; | ||
1681 | u8 vendor_id[8]; /* T10 Vendor Identifier */ | ||
1682 | u8 vendor_info[8]; /* Vendor specific Info */ | ||
1683 | u32 str_rej_reason_code; /* STR Reject Reason Code */ | ||
1684 | u32 num_path_info_entries; /* No. of path info entries */ | ||
1685 | /* | ||
1686 | * path info entry/entries. | ||
1687 | */ | ||
1688 | struct fcgs_ftrace_path_info_s path_info[1]; | ||
1689 | |||
1690 | }; | ||
1691 | |||
1692 | /* | ||
1693 | * Fabric Config Server : FCPing | ||
1694 | */ | ||
1695 | |||
1696 | /* | ||
1697 | * FC Ping Request | ||
1698 | */ | ||
1699 | struct fcgs_fcping_req_s { | ||
1700 | u32 revision; | ||
1701 | u16 port_tag; | ||
1702 | u16 port_len; /* Port len */ | ||
1703 | union fcgs_port_val_u port_val; /* Port value */ | ||
1704 | u32 token; | ||
1705 | }; | ||
1706 | |||
1707 | /* | ||
1708 | * FC Ping Response | ||
1709 | */ | ||
1710 | struct fcgs_fcping_resp_s { | ||
1711 | u32 token; | ||
1712 | }; | ||
1713 | |||
1714 | /* | ||
1715 | * Command codes for zone server query. | ||
1716 | */ | ||
1717 | enum { | ||
1718 | ZS_GZME = 0x0124, /* Get zone member extended */ | ||
1719 | }; | ||
1720 | |||
1721 | /* | ||
1722 | * ZS GZME request | ||
1723 | */ | ||
1724 | #define ZS_GZME_ZNAMELEN 32 | ||
1725 | struct zs_gzme_req_s { | ||
1726 | u8 znamelen; | ||
1727 | u8 rsvd[3]; | ||
1728 | u8 zname[ZS_GZME_ZNAMELEN]; | ||
1729 | }; | ||
1730 | |||
1731 | enum zs_mbr_type { | ||
1732 | ZS_MBR_TYPE_PWWN = 1, | ||
1733 | ZS_MBR_TYPE_DOMPORT = 2, | ||
1734 | ZS_MBR_TYPE_PORTID = 3, | ||
1735 | ZS_MBR_TYPE_NWWN = 4, | ||
1736 | }; | ||
1737 | |||
1738 | struct zs_mbr_wwn_s { | ||
1739 | u8 mbr_type; | ||
1740 | u8 rsvd[3]; | ||
1741 | wwn_t wwn; | ||
1742 | }; | ||
1743 | |||
1744 | struct zs_query_resp_s { | ||
1745 | u32 nmbrs; /* number of zone members */ | ||
1746 | struct zs_mbr_wwn_s mbr[1]; | ||
1747 | }; | ||
1748 | |||
1749 | /* | ||
1750 | * GMAL Command ( Get ( interconnect Element) Management Address List) | 1445 | * GMAL Command ( Get ( interconnect Element) Management Address List) |
1751 | * To retrieve the IP Address of a Switch. | 1446 | * To retrieve the IP Address of a Switch. |
1752 | */ | 1447 | */ |
1753 | |||
1754 | #define CT_GMAL_RESP_PREFIX_TELNET "telnet://" | 1448 | #define CT_GMAL_RESP_PREFIX_TELNET "telnet://" |
1755 | #define CT_GMAL_RESP_PREFIX_HTTP "http://" | 1449 | #define CT_GMAL_RESP_PREFIX_HTTP "http://" |
1756 | 1450 | ||
@@ -1764,7 +1458,7 @@ struct fcgs_req_s { | |||
1764 | 1458 | ||
1765 | /* Accept Response to GMAL */ | 1459 | /* Accept Response to GMAL */ |
1766 | struct fcgs_gmal_resp_s { | 1460 | struct fcgs_gmal_resp_s { |
1767 | u32 ms_len; /* Num of entries */ | 1461 | __be32 ms_len; /* Num of entries */ |
1768 | u8 ms_ma[256]; | 1462 | u8 ms_ma[256]; |
1769 | }; | 1463 | }; |
1770 | 1464 | ||
@@ -1775,9 +1469,6 @@ struct fcgs_gmal_entry_s { | |||
1775 | }; | 1469 | }; |
1776 | 1470 | ||
1777 | /* | 1471 | /* |
1778 | * FDMI | ||
1779 | */ | ||
1780 | /* | ||
1781 | * FDMI Command Codes | 1472 | * FDMI Command Codes |
1782 | */ | 1473 | */ |
1783 | #define FDMI_GRHL 0x0100 | 1474 | #define FDMI_GRHL 0x0100 |
@@ -1856,8 +1547,8 @@ enum fdmi_port_attribute_type { | |||
1856 | * FDMI attribute | 1547 | * FDMI attribute |
1857 | */ | 1548 | */ |
1858 | struct fdmi_attr_s { | 1549 | struct fdmi_attr_s { |
1859 | u16 type; | 1550 | __be16 type; |
1860 | u16 len; | 1551 | __be16 len; |
1861 | u8 value[1]; | 1552 | u8 value[1]; |
1862 | }; | 1553 | }; |
1863 | 1554 | ||
@@ -1865,7 +1556,7 @@ struct fdmi_attr_s { | |||
1865 | * HBA Attribute Block | 1556 | * HBA Attribute Block |
1866 | */ | 1557 | */ |
1867 | struct fdmi_hba_attr_s { | 1558 | struct fdmi_hba_attr_s { |
1868 | u32 attr_count; /* # of attributes */ | 1559 | __be32 attr_count; /* # of attributes */ |
1869 | struct fdmi_attr_s hba_attr; /* n attributes */ | 1560 | struct fdmi_attr_s hba_attr; /* n attributes */ |
1870 | }; | 1561 | }; |
1871 | 1562 | ||
@@ -1873,15 +1564,15 @@ struct fdmi_hba_attr_s { | |||
1873 | * Registered Port List | 1564 | * Registered Port List |
1874 | */ | 1565 | */ |
1875 | struct fdmi_port_list_s { | 1566 | struct fdmi_port_list_s { |
1876 | u32 num_ports; /* number Of Port Entries */ | 1567 | __be32 num_ports; /* number Of Port Entries */ |
1877 | wwn_t port_entry; /* one or more */ | 1568 | wwn_t port_entry; /* one or more */ |
1878 | }; | 1569 | }; |
1879 | 1570 | ||
1880 | /* | 1571 | /* |
1881 | * Port Attribute Block | 1572 | * Port Attribute Block |
1882 | */ | 1573 | */ |
1883 | struct fdmi_port_attr_s { | 1574 | struct fdmi_port_attr_s { |
1884 | u32 attr_count; /* # of attributes */ | 1575 | __be32 attr_count; /* # of attributes */ |
1885 | struct fdmi_attr_s port_attr; /* n attributes */ | 1576 | struct fdmi_attr_s port_attr; /* n attributes */ |
1886 | }; | 1577 | }; |
1887 | 1578 | ||
@@ -1889,7 +1580,7 @@ struct fdmi_port_attr_s { | |||
1889 | * FDMI Register HBA Attributes | 1580 | * FDMI Register HBA Attributes |
1890 | */ | 1581 | */ |
1891 | struct fdmi_rhba_s { | 1582 | struct fdmi_rhba_s { |
1892 | wwn_t hba_id; /* HBA Identifier */ | 1583 | wwn_t hba_id; /* HBA Identifier */ |
1893 | struct fdmi_port_list_s port_list; /* Registered Port List */ | 1584 | struct fdmi_port_list_s port_list; /* Registered Port List */ |
1894 | struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */ | 1585 | struct fdmi_hba_attr_s hba_attr_blk; /* HBA attribute block */ |
1895 | }; | 1586 | }; |
@@ -1898,8 +1589,8 @@ struct fdmi_rhba_s { | |||
1898 | * FDMI Register Port | 1589 | * FDMI Register Port |
1899 | */ | 1590 | */ |
1900 | struct fdmi_rprt_s { | 1591 | struct fdmi_rprt_s { |
1901 | wwn_t hba_id; /* HBA Identifier */ | 1592 | wwn_t hba_id; /* HBA Identifier */ |
1902 | wwn_t port_name; /* Port wwn */ | 1593 | wwn_t port_name; /* Port wwn */ |
1903 | struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ | 1594 | struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ |
1904 | }; | 1595 | }; |
1905 | 1596 | ||
@@ -1907,7 +1598,7 @@ struct fdmi_rprt_s { | |||
1907 | * FDMI Register Port Attributes | 1598 | * FDMI Register Port Attributes |
1908 | */ | 1599 | */ |
1909 | struct fdmi_rpa_s { | 1600 | struct fdmi_rpa_s { |
1910 | wwn_t port_name; /* port wwn */ | 1601 | wwn_t port_name; /* port wwn */ |
1911 | struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ | 1602 | struct fdmi_port_attr_s port_attr_blk; /* Port Attr Block */ |
1912 | }; | 1603 | }; |
1913 | 1604 | ||
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index 9c725314b513..b7e253451654 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c | |||
@@ -18,16 +18,16 @@ | |||
18 | * fcbuild.c - FC link service frame building and parsing routines | 18 | * fcbuild.c - FC link service frame building and parsing routines |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include "bfa_os_inc.h" | 21 | #include "bfad_drv.h" |
22 | #include "bfa_fcbuild.h" | 22 | #include "bfa_fcbuild.h" |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * static build functions | 25 | * static build functions |
26 | */ | 26 | */ |
27 | static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | 27 | static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, |
28 | u16 ox_id); | 28 | __be16 ox_id); |
29 | static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | 29 | static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, |
30 | u16 ox_id); | 30 | __be16 ox_id); |
31 | static struct fchs_s fc_els_req_tmpl; | 31 | static struct fchs_s fc_els_req_tmpl; |
32 | static struct fchs_s fc_els_rsp_tmpl; | 32 | static struct fchs_s fc_els_rsp_tmpl; |
33 | static struct fchs_s fc_bls_req_tmpl; | 33 | static struct fchs_s fc_bls_req_tmpl; |
@@ -48,7 +48,7 @@ fcbuild_init(void) | |||
48 | fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; | 48 | fc_els_req_tmpl.cat_info = FC_CAT_LD_REQUEST; |
49 | fc_els_req_tmpl.type = FC_TYPE_ELS; | 49 | fc_els_req_tmpl.type = FC_TYPE_ELS; |
50 | fc_els_req_tmpl.f_ctl = | 50 | fc_els_req_tmpl.f_ctl = |
51 | bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | | 51 | bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | |
52 | FCTL_SI_XFER); | 52 | FCTL_SI_XFER); |
53 | fc_els_req_tmpl.rx_id = FC_RXID_ANY; | 53 | fc_els_req_tmpl.rx_id = FC_RXID_ANY; |
54 | 54 | ||
@@ -59,7 +59,7 @@ fcbuild_init(void) | |||
59 | fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; | 59 | fc_els_rsp_tmpl.cat_info = FC_CAT_LD_REPLY; |
60 | fc_els_rsp_tmpl.type = FC_TYPE_ELS; | 60 | fc_els_rsp_tmpl.type = FC_TYPE_ELS; |
61 | fc_els_rsp_tmpl.f_ctl = | 61 | fc_els_rsp_tmpl.f_ctl = |
62 | bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | | 62 | bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | |
63 | FCTL_END_SEQ | FCTL_SI_XFER); | 63 | FCTL_END_SEQ | FCTL_SI_XFER); |
64 | fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; | 64 | fc_els_rsp_tmpl.rx_id = FC_RXID_ANY; |
65 | 65 | ||
@@ -68,7 +68,7 @@ fcbuild_init(void) | |||
68 | */ | 68 | */ |
69 | fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; | 69 | fc_bls_req_tmpl.routing = FC_RTG_BASIC_LINK; |
70 | fc_bls_req_tmpl.type = FC_TYPE_BLS; | 70 | fc_bls_req_tmpl.type = FC_TYPE_BLS; |
71 | fc_bls_req_tmpl.f_ctl = bfa_os_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); | 71 | fc_bls_req_tmpl.f_ctl = bfa_hton3b(FCTL_END_SEQ | FCTL_SI_XFER); |
72 | fc_bls_req_tmpl.rx_id = FC_RXID_ANY; | 72 | fc_bls_req_tmpl.rx_id = FC_RXID_ANY; |
73 | 73 | ||
74 | /* | 74 | /* |
@@ -78,7 +78,7 @@ fcbuild_init(void) | |||
78 | fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; | 78 | fc_bls_rsp_tmpl.cat_info = FC_CAT_BA_ACC; |
79 | fc_bls_rsp_tmpl.type = FC_TYPE_BLS; | 79 | fc_bls_rsp_tmpl.type = FC_TYPE_BLS; |
80 | fc_bls_rsp_tmpl.f_ctl = | 80 | fc_bls_rsp_tmpl.f_ctl = |
81 | bfa_os_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | | 81 | bfa_hton3b(FCTL_EC_RESP | FCTL_SEQ_INI | FCTL_LS_EXCH | |
82 | FCTL_END_SEQ | FCTL_SI_XFER); | 82 | FCTL_END_SEQ | FCTL_SI_XFER); |
83 | fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; | 83 | fc_bls_rsp_tmpl.rx_id = FC_RXID_ANY; |
84 | 84 | ||
@@ -129,7 +129,7 @@ fcbuild_init(void) | |||
129 | fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; | 129 | fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD; |
130 | fcp_fchs_tmpl.type = FC_TYPE_FCP; | 130 | fcp_fchs_tmpl.type = FC_TYPE_FCP; |
131 | fcp_fchs_tmpl.f_ctl = | 131 | fcp_fchs_tmpl.f_ctl = |
132 | bfa_os_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); | 132 | bfa_hton3b(FCTL_FS_EXCH | FCTL_END_SEQ | FCTL_SI_XFER); |
133 | fcp_fchs_tmpl.seq_id = 1; | 133 | fcp_fchs_tmpl.seq_id = 1; |
134 | fcp_fchs_tmpl.rx_id = FC_RXID_ANY; | 134 | fcp_fchs_tmpl.rx_id = FC_RXID_ANY; |
135 | } | 135 | } |
@@ -143,7 +143,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) | |||
143 | fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; | 143 | fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; |
144 | fchs->type = FC_TYPE_SERVICES; | 144 | fchs->type = FC_TYPE_SERVICES; |
145 | fchs->f_ctl = | 145 | fchs->f_ctl = |
146 | bfa_os_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | | 146 | bfa_hton3b(FCTL_SEQ_INI | FCTL_FS_EXCH | FCTL_END_SEQ | |
147 | FCTL_SI_XFER); | 147 | FCTL_SI_XFER); |
148 | fchs->rx_id = FC_RXID_ANY; | 148 | fchs->rx_id = FC_RXID_ANY; |
149 | fchs->d_id = (d_id); | 149 | fchs->d_id = (d_id); |
@@ -157,7 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | void | 159 | void |
160 | fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) | 160 | fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) |
161 | { | 161 | { |
162 | memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); | 162 | memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); |
163 | fchs->d_id = (d_id); | 163 | fchs->d_id = (d_id); |
@@ -166,7 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | static void | 168 | static void |
169 | fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) | 169 | fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) |
170 | { | 170 | { |
171 | memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); | 171 | memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); |
172 | fchs->d_id = d_id; | 172 | fchs->d_id = d_id; |
@@ -196,7 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len) | |||
196 | } | 196 | } |
197 | 197 | ||
198 | static void | 198 | static void |
199 | fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) | 199 | fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id) |
200 | { | 200 | { |
201 | memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); | 201 | memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); |
202 | fchs->d_id = d_id; | 202 | fchs->d_id = d_id; |
@@ -206,7 +206,7 @@ fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) | |||
206 | 206 | ||
207 | static u16 | 207 | static u16 |
208 | fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | 208 | fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, |
209 | u16 ox_id, wwn_t port_name, wwn_t node_name, | 209 | __be16 ox_id, wwn_t port_name, wwn_t node_name, |
210 | u16 pdu_size, u8 els_code) | 210 | u16 pdu_size, u8 els_code) |
211 | { | 211 | { |
212 | struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); | 212 | struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); |
@@ -232,8 +232,8 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | |||
232 | u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, | 232 | u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size, |
233 | u8 set_npiv, u8 set_auth, u16 local_bb_credits) | 233 | u8 set_npiv, u8 set_auth, u16 local_bb_credits) |
234 | { | 234 | { |
235 | u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); | 235 | u32 d_id = bfa_hton3b(FC_FABRIC_PORT); |
236 | u32 *vvl_info; | 236 | __be32 *vvl_info; |
237 | 237 | ||
238 | memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); | 238 | memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); |
239 | 239 | ||
@@ -267,7 +267,7 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | |||
267 | 267 | ||
268 | u16 | 268 | u16 |
269 | fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | 269 | fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, |
270 | u16 ox_id, wwn_t port_name, wwn_t node_name, | 270 | __be16 ox_id, wwn_t port_name, wwn_t node_name, |
271 | u16 pdu_size, u16 local_bb_credits) | 271 | u16 pdu_size, u16 local_bb_credits) |
272 | { | 272 | { |
273 | u32 d_id = 0; | 273 | u32 d_id = 0; |
@@ -289,7 +289,7 @@ u16 | |||
289 | fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, | 289 | fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, |
290 | u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) | 290 | u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size) |
291 | { | 291 | { |
292 | u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); | 292 | u32 d_id = bfa_hton3b(FC_FABRIC_PORT); |
293 | 293 | ||
294 | memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); | 294 | memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); |
295 | 295 | ||
@@ -392,7 +392,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
392 | 392 | ||
393 | u16 | 393 | u16 |
394 | fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | 394 | fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, |
395 | u16 ox_id, enum bfa_lport_role role) | 395 | __be16 ox_id, enum bfa_lport_role role) |
396 | { | 396 | { |
397 | struct fc_prli_s *prli = (struct fc_prli_s *) (pld); | 397 | struct fc_prli_s *prli = (struct fc_prli_s *) (pld); |
398 | 398 | ||
@@ -456,9 +456,9 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, | |||
456 | return sizeof(struct fc_logo_s); | 456 | return sizeof(struct fc_logo_s); |
457 | } | 457 | } |
458 | 458 | ||
459 | static u16 | 459 | static u16 |
460 | fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, | 460 | fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, |
461 | u32 s_id, u16 ox_id, wwn_t port_name, | 461 | u32 s_id, __be16 ox_id, wwn_t port_name, |
462 | wwn_t node_name, u8 els_code) | 462 | wwn_t node_name, u8 els_code) |
463 | { | 463 | { |
464 | memset(adisc, '\0', sizeof(struct fc_adisc_s)); | 464 | memset(adisc, '\0', sizeof(struct fc_adisc_s)); |
@@ -480,7 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, | |||
480 | 480 | ||
481 | u16 | 481 | u16 |
482 | fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, | 482 | fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, |
483 | u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name) | 483 | u32 s_id, __be16 ox_id, wwn_t port_name, wwn_t node_name) |
484 | { | 484 | { |
485 | return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, | 485 | return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, |
486 | node_name, FC_ELS_ADISC); | 486 | node_name, FC_ELS_ADISC); |
@@ -488,7 +488,7 @@ fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, | |||
488 | 488 | ||
489 | u16 | 489 | u16 |
490 | fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, | 490 | fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, |
491 | u32 s_id, u16 ox_id, wwn_t port_name, | 491 | u32 s_id, __be16 ox_id, wwn_t port_name, |
492 | wwn_t node_name) | 492 | wwn_t node_name) |
493 | { | 493 | { |
494 | return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, | 494 | return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name, |
@@ -592,7 +592,7 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, | |||
592 | 592 | ||
593 | u16 | 593 | u16 |
594 | fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | 594 | fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, |
595 | u16 ox_id) | 595 | __be16 ox_id) |
596 | { | 596 | { |
597 | struct fc_els_cmd_s *acc = pld; | 597 | struct fc_els_cmd_s *acc = pld; |
598 | 598 | ||
@@ -606,7 +606,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, | |||
606 | 606 | ||
607 | u16 | 607 | u16 |
608 | fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, | 608 | fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, |
609 | u32 s_id, u16 ox_id, u8 reason_code, | 609 | u32 s_id, __be16 ox_id, u8 reason_code, |
610 | u8 reason_code_expl) | 610 | u8 reason_code_expl) |
611 | { | 611 | { |
612 | fc_els_rsp_build(fchs, d_id, s_id, ox_id); | 612 | fc_els_rsp_build(fchs, d_id, s_id, ox_id); |
@@ -622,7 +622,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, | |||
622 | 622 | ||
623 | u16 | 623 | u16 |
624 | fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, | 624 | fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, |
625 | u32 s_id, u16 ox_id, u16 rx_id) | 625 | u32 s_id, __be16 ox_id, u16 rx_id) |
626 | { | 626 | { |
627 | fc_bls_rsp_build(fchs, d_id, s_id, ox_id); | 627 | fc_bls_rsp_build(fchs, d_id, s_id, ox_id); |
628 | 628 | ||
@@ -638,7 +638,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, | |||
638 | 638 | ||
639 | u16 | 639 | u16 |
640 | fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, | 640 | fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, |
641 | u32 s_id, u16 ox_id) | 641 | u32 s_id, __be16 ox_id) |
642 | { | 642 | { |
643 | fc_els_rsp_build(fchs, d_id, s_id, ox_id); | 643 | fc_els_rsp_build(fchs, d_id, s_id, ox_id); |
644 | memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); | 644 | memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); |
@@ -666,7 +666,7 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) | |||
666 | 666 | ||
667 | u16 | 667 | u16 |
668 | fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, | 668 | fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, |
669 | u32 d_id, u32 s_id, u16 ox_id, int num_pages) | 669 | u32 d_id, u32 s_id, __be16 ox_id, int num_pages) |
670 | { | 670 | { |
671 | int page; | 671 | int page; |
672 | 672 | ||
@@ -690,7 +690,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, | |||
690 | 690 | ||
691 | u16 | 691 | u16 |
692 | fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, | 692 | fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, |
693 | u32 s_id, u16 ox_id, int num_pages) | 693 | u32 s_id, __be16 ox_id, int num_pages) |
694 | { | 694 | { |
695 | int page; | 695 | int page; |
696 | 696 | ||
@@ -728,7 +728,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, | |||
728 | 728 | ||
729 | u16 | 729 | u16 |
730 | fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, | 730 | fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, |
731 | u32 s_id, u16 ox_id, u32 data_format, | 731 | u32 s_id, __be16 ox_id, u32 data_format, |
732 | struct fc_rnid_common_id_data_s *common_id_data, | 732 | struct fc_rnid_common_id_data_s *common_id_data, |
733 | struct fc_rnid_general_topology_data_s *gen_topo_data) | 733 | struct fc_rnid_general_topology_data_s *gen_topo_data) |
734 | { | 734 | { |
@@ -770,10 +770,10 @@ u16 | |||
770 | fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, | 770 | fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, |
771 | u32 s_id, u32 *pid_list, u16 npids) | 771 | u32 s_id, u32 *pid_list, u16 npids) |
772 | { | 772 | { |
773 | u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id)); | 773 | u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_hton3b(d_id)); |
774 | int i = 0; | 774 | int i = 0; |
775 | 775 | ||
776 | fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); | 776 | fc_els_req_build(fchs, bfa_hton3b(dctlr_id), s_id, 0); |
777 | 777 | ||
778 | memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); | 778 | memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); |
779 | 779 | ||
@@ -788,7 +788,7 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, | |||
788 | 788 | ||
789 | u16 | 789 | u16 |
790 | fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, | 790 | fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, |
791 | u32 d_id, u32 s_id, u16 ox_id, | 791 | u32 d_id, u32 s_id, __be16 ox_id, |
792 | struct fc_rpsc_speed_info_s *oper_speed) | 792 | struct fc_rpsc_speed_info_s *oper_speed) |
793 | { | 793 | { |
794 | memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); | 794 | memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); |
@@ -807,11 +807,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, | |||
807 | return sizeof(struct fc_rpsc_acc_s); | 807 | return sizeof(struct fc_rpsc_acc_s); |
808 | } | 808 | } |
809 | 809 | ||
810 | /* | ||
811 | * TBD - | ||
812 | * . get rid of unnecessary memsets | ||
813 | */ | ||
814 | |||
815 | u16 | 810 | u16 |
816 | fc_logo_rsp_parse(struct fchs_s *fchs, int len) | 811 | fc_logo_rsp_parse(struct fchs_s *fchs, int len) |
817 | { | 812 | { |
@@ -995,7 +990,7 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len) | |||
995 | } | 990 | } |
996 | 991 | ||
997 | u16 | 992 | u16 |
998 | fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, | 993 | fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id, |
999 | u32 reason_code, u32 reason_expl) | 994 | u32 reason_code, u32 reason_expl) |
1000 | { | 995 | { |
1001 | struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); | 996 | struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1); |
@@ -1045,7 +1040,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1045 | { | 1040 | { |
1046 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1041 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1047 | struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); | 1042 | struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1); |
1048 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1043 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1049 | 1044 | ||
1050 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1045 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
1051 | fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); | 1046 | fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); |
@@ -1061,7 +1056,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1061 | { | 1056 | { |
1062 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1057 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1063 | fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); | 1058 | fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1); |
1064 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1059 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1065 | 1060 | ||
1066 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1061 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
1067 | fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); | 1062 | fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); |
@@ -1077,7 +1072,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1077 | { | 1072 | { |
1078 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1073 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1079 | fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); | 1074 | fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1); |
1080 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1075 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1081 | 1076 | ||
1082 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1077 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
1083 | fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); | 1078 | fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); |
@@ -1104,7 +1099,7 @@ u16 | |||
1104 | fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, | 1099 | fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, |
1105 | u8 set_br_reg, u32 s_id, u16 ox_id) | 1100 | u8 set_br_reg, u32 s_id, u16 ox_id) |
1106 | { | 1101 | { |
1107 | u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); | 1102 | u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); |
1108 | 1103 | ||
1109 | fc_els_req_build(fchs, d_id, s_id, ox_id); | 1104 | fc_els_req_build(fchs, d_id, s_id, ox_id); |
1110 | 1105 | ||
@@ -1121,7 +1116,7 @@ u16 | |||
1121 | fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, | 1116 | fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, |
1122 | u32 s_id, u16 ox_id) | 1117 | u32 s_id, u16 ox_id) |
1123 | { | 1118 | { |
1124 | u32 d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER); | 1119 | u32 d_id = bfa_hton3b(FC_FABRIC_CONTROLLER); |
1125 | u16 payldlen; | 1120 | u16 payldlen; |
1126 | 1121 | ||
1127 | fc_els_req_build(fchs, d_id, s_id, ox_id); | 1122 | fc_els_req_build(fchs, d_id, s_id, ox_id); |
@@ -1143,7 +1138,7 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1143 | { | 1138 | { |
1144 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1139 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1145 | struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); | 1140 | struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); |
1146 | u32 type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1141 | u32 type_value, d_id = bfa_hton3b(FC_NAME_SERVER); |
1147 | u8 index; | 1142 | u8 index; |
1148 | 1143 | ||
1149 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1144 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
@@ -1167,7 +1162,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1167 | { | 1162 | { |
1168 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1163 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1169 | struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); | 1164 | struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1); |
1170 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1165 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1171 | 1166 | ||
1172 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1167 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
1173 | fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); | 1168 | fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); |
@@ -1187,7 +1182,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1187 | { | 1182 | { |
1188 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1183 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1189 | struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); | 1184 | struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1); |
1190 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1185 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1191 | 1186 | ||
1192 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1187 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
1193 | fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); | 1188 | fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); |
@@ -1209,7 +1204,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | |||
1209 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1204 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1210 | struct fcgs_rspnid_req_s *rspnid = | 1205 | struct fcgs_rspnid_req_s *rspnid = |
1211 | (struct fcgs_rspnid_req_s *)(cthdr + 1); | 1206 | (struct fcgs_rspnid_req_s *)(cthdr + 1); |
1212 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1207 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1213 | 1208 | ||
1214 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); | 1209 | fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); |
1215 | fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); | 1210 | fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); |
@@ -1229,7 +1224,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) | |||
1229 | 1224 | ||
1230 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1225 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1231 | struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); | 1226 | struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1); |
1232 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1227 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1233 | 1228 | ||
1234 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1229 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1235 | 1230 | ||
@@ -1249,7 +1244,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
1249 | { | 1244 | { |
1250 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1245 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1251 | struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); | 1246 | struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1); |
1252 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1247 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1253 | 1248 | ||
1254 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1249 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1255 | fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); | 1250 | fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); |
@@ -1267,7 +1262,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
1267 | { | 1262 | { |
1268 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1263 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1269 | struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); | 1264 | struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1); |
1270 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1265 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1271 | 1266 | ||
1272 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1267 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1273 | fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); | 1268 | fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); |
@@ -1286,7 +1281,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
1286 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1281 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1287 | struct fcgs_rcsid_req_s *rcsid = | 1282 | struct fcgs_rcsid_req_s *rcsid = |
1288 | (struct fcgs_rcsid_req_s *) (cthdr + 1); | 1283 | (struct fcgs_rcsid_req_s *) (cthdr + 1); |
1289 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1284 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1290 | 1285 | ||
1291 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1286 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1292 | fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); | 1287 | fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); |
@@ -1304,7 +1299,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, | |||
1304 | { | 1299 | { |
1305 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1300 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1306 | struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); | 1301 | struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1); |
1307 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1302 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1308 | 1303 | ||
1309 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1304 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1310 | fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); | 1305 | fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); |
@@ -1321,7 +1316,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) | |||
1321 | { | 1316 | { |
1322 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1317 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1323 | struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); | 1318 | struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1); |
1324 | u32 d_id = bfa_os_hton3b(FC_NAME_SERVER); | 1319 | u32 d_id = bfa_hton3b(FC_NAME_SERVER); |
1325 | 1320 | ||
1326 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1321 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1327 | fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); | 1322 | fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); |
@@ -1341,7 +1336,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, | |||
1341 | { | 1336 | { |
1342 | 1337 | ||
1343 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1338 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1344 | u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); | 1339 | u32 d_id = bfa_hton3b(FC_MGMT_SERVER); |
1345 | 1340 | ||
1346 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1341 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1347 | fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); | 1342 | fc_gs_fdmi_cthdr_build(cthdr, s_id, cmd_code); |
@@ -1356,7 +1351,7 @@ void | |||
1356 | fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) | 1351 | fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) |
1357 | { | 1352 | { |
1358 | u8 index; | 1353 | u8 index; |
1359 | u32 *ptr = (u32 *) bit_mask; | 1354 | __be32 *ptr = (__be32 *) bit_mask; |
1360 | u32 type_value; | 1355 | u32 type_value; |
1361 | 1356 | ||
1362 | /* | 1357 | /* |
@@ -1377,7 +1372,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) | |||
1377 | { | 1372 | { |
1378 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1373 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1379 | fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); | 1374 | fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1); |
1380 | u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); | 1375 | u32 d_id = bfa_hton3b(FC_MGMT_SERVER); |
1381 | 1376 | ||
1382 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1377 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1383 | fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, | 1378 | fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, |
@@ -1397,7 +1392,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) | |||
1397 | { | 1392 | { |
1398 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; | 1393 | struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld; |
1399 | fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); | 1394 | fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1); |
1400 | u32 d_id = bfa_os_hton3b(FC_MGMT_SERVER); | 1395 | u32 d_id = bfa_hton3b(FC_MGMT_SERVER); |
1401 | 1396 | ||
1402 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); | 1397 | fc_gs_fchdr_build(fchs, d_id, s_id, 0); |
1403 | fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, | 1398 | fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, |
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h index 73abd02e53cc..ece51ec7620b 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.h +++ b/drivers/scsi/bfa/bfa_fcbuild.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #ifndef __FCBUILD_H__ | 21 | #ifndef __FCBUILD_H__ |
22 | #define __FCBUILD_H__ | 22 | #define __FCBUILD_H__ |
23 | 23 | ||
24 | #include "bfa_os_inc.h" | 24 | #include "bfad_drv.h" |
25 | #include "bfa_fc.h" | 25 | #include "bfa_fc.h" |
26 | #include "bfa_defs_fcs.h" | 26 | #include "bfa_defs_fcs.h" |
27 | 27 | ||
@@ -138,7 +138,7 @@ u16 fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id, | |||
138 | u16 pdu_size); | 138 | u16 pdu_size); |
139 | 139 | ||
140 | u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, | 140 | u16 fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, |
141 | u32 s_id, u16 ox_id, | 141 | u32 s_id, __be16 ox_id, |
142 | wwn_t port_name, wwn_t node_name, | 142 | wwn_t port_name, wwn_t node_name, |
143 | u16 pdu_size, | 143 | u16 pdu_size, |
144 | u16 local_bb_credits); | 144 | u16 local_bb_credits); |
@@ -186,7 +186,7 @@ u16 fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, | |||
186 | u16 pdu_size); | 186 | u16 pdu_size); |
187 | 187 | ||
188 | u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, | 188 | u16 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, |
189 | u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, | 189 | u32 d_id, u32 s_id, __be16 ox_id, wwn_t port_name, |
190 | wwn_t node_name); | 190 | wwn_t node_name); |
191 | 191 | ||
192 | enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, | 192 | enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld, |
@@ -196,20 +196,20 @@ enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, | |||
196 | wwn_t port_name, wwn_t node_name); | 196 | wwn_t port_name, wwn_t node_name); |
197 | 197 | ||
198 | u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, | 198 | u16 fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, |
199 | u32 d_id, u32 s_id, u16 ox_id, | 199 | u32 d_id, u32 s_id, __be16 ox_id, |
200 | wwn_t port_name, wwn_t node_name); | 200 | wwn_t port_name, wwn_t node_name); |
201 | u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, | 201 | u16 fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, |
202 | u32 d_id, u32 s_id, u16 ox_id, | 202 | u32 d_id, u32 s_id, __be16 ox_id, |
203 | u8 reason_code, u8 reason_code_expl); | 203 | u8 reason_code, u8 reason_code_expl); |
204 | u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, | 204 | u16 fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, |
205 | u32 d_id, u32 s_id, u16 ox_id); | 205 | u32 d_id, u32 s_id, __be16 ox_id); |
206 | u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, | 206 | u16 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, |
207 | u32 s_id, u16 ox_id); | 207 | u32 s_id, u16 ox_id); |
208 | 208 | ||
209 | enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len); | 209 | enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len); |
210 | 210 | ||
211 | u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, | 211 | u16 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, |
212 | u32 s_id, u16 ox_id, | 212 | u32 s_id, __be16 ox_id, |
213 | enum bfa_lport_role role); | 213 | enum bfa_lport_role role); |
214 | 214 | ||
215 | u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, | 215 | u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, |
@@ -218,7 +218,7 @@ u16 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, | |||
218 | 218 | ||
219 | u16 fc_rnid_acc_build(struct fchs_s *fchs, | 219 | u16 fc_rnid_acc_build(struct fchs_s *fchs, |
220 | struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, | 220 | struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id, |
221 | u16 ox_id, u32 data_format, | 221 | __be16 ox_id, u32 data_format, |
222 | struct fc_rnid_common_id_data_s *common_id_data, | 222 | struct fc_rnid_common_id_data_s *common_id_data, |
223 | struct fc_rnid_general_topology_data_s *gen_topo_data); | 223 | struct fc_rnid_general_topology_data_s *gen_topo_data); |
224 | 224 | ||
@@ -228,7 +228,7 @@ u16 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, | |||
228 | u32 d_id, u32 s_id, u16 ox_id); | 228 | u32 d_id, u32 s_id, u16 ox_id); |
229 | u16 fc_rpsc_acc_build(struct fchs_s *fchs, | 229 | u16 fc_rpsc_acc_build(struct fchs_s *fchs, |
230 | struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, | 230 | struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, |
231 | u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed); | 231 | __be16 ox_id, struct fc_rpsc_speed_info_s *oper_speed); |
232 | u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, | 232 | u16 fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id, |
233 | u8 fc4_type); | 233 | u8 fc4_type); |
234 | 234 | ||
@@ -251,7 +251,7 @@ u16 fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, | |||
251 | u32 s_id, u16 ox_id, wwn_t port_name); | 251 | u32 s_id, u16 ox_id, wwn_t port_name); |
252 | 252 | ||
253 | u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, | 253 | u16 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, |
254 | u32 s_id, u16 ox_id); | 254 | u32 s_id, __be16 ox_id); |
255 | 255 | ||
256 | u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, | 256 | u16 fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id, |
257 | u16 cmd_code); | 257 | u16 cmd_code); |
@@ -261,7 +261,7 @@ u16 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn); | |||
261 | void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); | 261 | void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask); |
262 | 262 | ||
263 | void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | 263 | void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, |
264 | u16 ox_id); | 264 | __be16 ox_id); |
265 | 265 | ||
266 | enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len); | 266 | enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len); |
267 | 267 | ||
@@ -274,15 +274,15 @@ enum fc_parse_status fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, | |||
274 | wwn_t port_name); | 274 | wwn_t port_name); |
275 | 275 | ||
276 | u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, | 276 | u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, |
277 | u32 s_id, u16 ox_id, u16 rx_id); | 277 | u32 s_id, __be16 ox_id, u16 rx_id); |
278 | 278 | ||
279 | int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); | 279 | int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code); |
280 | 280 | ||
281 | u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, | 281 | u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, |
282 | u32 d_id, u32 s_id, u16 ox_id, int num_pages); | 282 | u32 d_id, u32 s_id, __be16 ox_id, int num_pages); |
283 | 283 | ||
284 | u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, | 284 | u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, |
285 | u32 d_id, u32 s_id, u16 ox_id, int num_pages); | 285 | u32 d_id, u32 s_id, __be16 ox_id, int num_pages); |
286 | 286 | ||
287 | u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len); | 287 | u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len); |
288 | 288 | ||
@@ -304,7 +304,7 @@ u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | |||
304 | u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len); | 304 | u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len); |
305 | 305 | ||
306 | u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, | 306 | u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, |
307 | u16 ox_id, u32 reason_code, u32 reason_expl); | 307 | __be16 ox_id, u32 reason_code, u32 reason_expl); |
308 | 308 | ||
309 | u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, | 309 | u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, |
310 | u32 port_id); | 310 | u32 port_id); |
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 135c4427801c..9c410b21db6d 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c | |||
@@ -15,17 +15,12 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_modules.h" | 19 | #include "bfa_modules.h" |
19 | #include "bfa_cb_ioim.h" | ||
20 | 20 | ||
21 | BFA_TRC_FILE(HAL, FCPIM); | 21 | BFA_TRC_FILE(HAL, FCPIM); |
22 | BFA_MODULE(fcpim); | 22 | BFA_MODULE(fcpim); |
23 | 23 | ||
24 | |||
25 | #define bfa_fcpim_add_iostats(__l, __r, __stats) \ | ||
26 | (__l->__stats += __r->__stats) | ||
27 | |||
28 | |||
29 | /* | 24 | /* |
30 | * BFA ITNIM Related definitions | 25 | * BFA ITNIM Related definitions |
31 | */ | 26 | */ |
@@ -37,12 +32,12 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); | |||
37 | #define bfa_fcpim_additn(__itnim) \ | 32 | #define bfa_fcpim_additn(__itnim) \ |
38 | list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) | 33 | list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q) |
39 | #define bfa_fcpim_delitn(__itnim) do { \ | 34 | #define bfa_fcpim_delitn(__itnim) do { \ |
40 | bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ | 35 | WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \ |
41 | bfa_itnim_update_del_itn_stats(__itnim); \ | 36 | bfa_itnim_update_del_itn_stats(__itnim); \ |
42 | list_del(&(__itnim)->qe); \ | 37 | list_del(&(__itnim)->qe); \ |
43 | bfa_assert(list_empty(&(__itnim)->io_q)); \ | 38 | WARN_ON(!list_empty(&(__itnim)->io_q)); \ |
44 | bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \ | 39 | WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \ |
45 | bfa_assert(list_empty(&(__itnim)->pending_q)); \ | 40 | WARN_ON(!list_empty(&(__itnim)->pending_q)); \ |
46 | } while (0) | 41 | } while (0) |
47 | 42 | ||
48 | #define bfa_itnim_online_cb(__itnim) do { \ | 43 | #define bfa_itnim_online_cb(__itnim) do { \ |
@@ -73,10 +68,8 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); | |||
73 | } while (0) | 68 | } while (0) |
74 | 69 | ||
75 | /* | 70 | /* |
76 | * bfa_itnim_sm BFA itnim state machine | 71 | * itnim state machine event |
77 | */ | 72 | */ |
78 | |||
79 | |||
80 | enum bfa_itnim_event { | 73 | enum bfa_itnim_event { |
81 | BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ | 74 | BFA_ITNIM_SM_CREATE = 1, /* itnim is created */ |
82 | BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ | 75 | BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */ |
@@ -107,9 +100,6 @@ enum bfa_itnim_event { | |||
107 | if ((__fcpim)->profile_start) \ | 100 | if ((__fcpim)->profile_start) \ |
108 | (__fcpim)->profile_start(__ioim); \ | 101 | (__fcpim)->profile_start(__ioim); \ |
109 | } while (0) | 102 | } while (0) |
110 | /* | ||
111 | * hal_ioim_sm | ||
112 | */ | ||
113 | 103 | ||
114 | /* | 104 | /* |
115 | * IO state machine events | 105 | * IO state machine events |
@@ -221,8 +211,7 @@ static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, | |||
221 | * forward declaration for BFA IOIM functions | 211 | * forward declaration for BFA IOIM functions |
222 | */ | 212 | */ |
223 | static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); | 213 | static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); |
224 | static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim); | 214 | static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim); |
225 | static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim); | ||
226 | static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); | 215 | static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim); |
227 | static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); | 216 | static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim); |
228 | static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); | 217 | static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete); |
@@ -232,7 +221,6 @@ static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete); | |||
232 | static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); | 221 | static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); |
233 | static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); | 222 | static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); |
234 | 223 | ||
235 | |||
236 | /* | 224 | /* |
237 | * forward declaration of BFA IO state machine | 225 | * forward declaration of BFA IO state machine |
238 | */ | 226 | */ |
@@ -260,14 +248,13 @@ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, | |||
260 | enum bfa_ioim_event event); | 248 | enum bfa_ioim_event event); |
261 | static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, | 249 | static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, |
262 | enum bfa_ioim_event event); | 250 | enum bfa_ioim_event event); |
263 | |||
264 | /* | 251 | /* |
265 | * forward declaration for BFA TSKIM functions | 252 | * forward declaration for BFA TSKIM functions |
266 | */ | 253 | */ |
267 | static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); | 254 | static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); |
268 | static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); | 255 | static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete); |
269 | static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, | 256 | static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim, |
270 | lun_t lun); | 257 | struct scsi_lun lun); |
271 | static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); | 258 | static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim); |
272 | static void bfa_tskim_cleanp_comp(void *tskim_cbarg); | 259 | static void bfa_tskim_cleanp_comp(void *tskim_cbarg); |
273 | static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); | 260 | static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim); |
@@ -275,7 +262,6 @@ static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim); | |||
275 | static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); | 262 | static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); |
276 | static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); | 263 | static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); |
277 | 264 | ||
278 | |||
279 | /* | 265 | /* |
280 | * forward declaration of BFA TSKIM state machine | 266 | * forward declaration of BFA TSKIM state machine |
281 | */ | 267 | */ |
@@ -293,13 +279,12 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, | |||
293 | enum bfa_tskim_event event); | 279 | enum bfa_tskim_event event); |
294 | static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, | 280 | static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, |
295 | enum bfa_tskim_event event); | 281 | enum bfa_tskim_event event); |
296 | |||
297 | /* | 282 | /* |
298 | * hal_fcpim_mod BFA FCP Initiator Mode module | 283 | * BFA FCP Initiator Mode module |
299 | */ | 284 | */ |
300 | 285 | ||
301 | /* | 286 | /* |
302 | * Compute and return memory needed by FCP(im) module. | 287 | * Compute and return memory needed by FCP(im) module. |
303 | */ | 288 | */ |
304 | static void | 289 | static void |
305 | bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, | 290 | bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, |
@@ -357,10 +342,6 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
357 | static void | 342 | static void |
358 | bfa_fcpim_detach(struct bfa_s *bfa) | 343 | bfa_fcpim_detach(struct bfa_s *bfa) |
359 | { | 344 | { |
360 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
361 | |||
362 | bfa_ioim_detach(fcpim); | ||
363 | bfa_tskim_detach(fcpim); | ||
364 | } | 345 | } |
365 | 346 | ||
366 | static void | 347 | static void |
@@ -387,56 +368,6 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa) | |||
387 | } | 368 | } |
388 | 369 | ||
389 | void | 370 | void |
390 | bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats, | ||
391 | struct bfa_itnim_iostats_s *rstats) | ||
392 | { | ||
393 | bfa_fcpim_add_iostats(lstats, rstats, total_ios); | ||
394 | bfa_fcpim_add_iostats(lstats, rstats, qresumes); | ||
395 | bfa_fcpim_add_iostats(lstats, rstats, no_iotags); | ||
396 | bfa_fcpim_add_iostats(lstats, rstats, io_aborts); | ||
397 | bfa_fcpim_add_iostats(lstats, rstats, no_tskims); | ||
398 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok); | ||
399 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun); | ||
400 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun); | ||
401 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted); | ||
402 | bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout); | ||
403 | bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort); | ||
404 | bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err); | ||
405 | bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err); | ||
406 | bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed); | ||
407 | bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free); | ||
408 | bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts); | ||
409 | bfa_fcpim_add_iostats(lstats, rstats, iocom_utags); | ||
410 | bfa_fcpim_add_iostats(lstats, rstats, io_cleanups); | ||
411 | bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts); | ||
412 | bfa_fcpim_add_iostats(lstats, rstats, onlines); | ||
413 | bfa_fcpim_add_iostats(lstats, rstats, offlines); | ||
414 | bfa_fcpim_add_iostats(lstats, rstats, creates); | ||
415 | bfa_fcpim_add_iostats(lstats, rstats, deletes); | ||
416 | bfa_fcpim_add_iostats(lstats, rstats, create_comps); | ||
417 | bfa_fcpim_add_iostats(lstats, rstats, delete_comps); | ||
418 | bfa_fcpim_add_iostats(lstats, rstats, sler_events); | ||
419 | bfa_fcpim_add_iostats(lstats, rstats, fw_create); | ||
420 | bfa_fcpim_add_iostats(lstats, rstats, fw_delete); | ||
421 | bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled); | ||
422 | bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps); | ||
423 | bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds); | ||
424 | bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps); | ||
425 | bfa_fcpim_add_iostats(lstats, rstats, tm_success); | ||
426 | bfa_fcpim_add_iostats(lstats, rstats, tm_failures); | ||
427 | bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps); | ||
428 | bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes); | ||
429 | bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns); | ||
430 | bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups); | ||
431 | bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps); | ||
432 | bfa_fcpim_add_iostats(lstats, rstats, io_comps); | ||
433 | bfa_fcpim_add_iostats(lstats, rstats, input_reqs); | ||
434 | bfa_fcpim_add_iostats(lstats, rstats, output_reqs); | ||
435 | bfa_fcpim_add_iostats(lstats, rstats, rd_throughput); | ||
436 | bfa_fcpim_add_iostats(lstats, rstats, wr_throughput); | ||
437 | } | ||
438 | |||
439 | void | ||
440 | bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) | 371 | bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov) |
441 | { | 372 | { |
442 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | 373 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); |
@@ -454,128 +385,6 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa) | |||
454 | return fcpim->path_tov / 1000; | 385 | return fcpim->path_tov / 1000; |
455 | } | 386 | } |
456 | 387 | ||
457 | bfa_status_t | ||
458 | bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats, | ||
459 | u8 lp_tag) | ||
460 | { | ||
461 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
462 | struct list_head *qe, *qen; | ||
463 | struct bfa_itnim_s *itnim; | ||
464 | |||
465 | /* accumulate IO stats from itnim */ | ||
466 | memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); | ||
467 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { | ||
468 | itnim = (struct bfa_itnim_s *) qe; | ||
469 | if (itnim->rport->rport_info.lp_tag != lp_tag) | ||
470 | continue; | ||
471 | bfa_fcpim_add_stats(stats, &(itnim->stats)); | ||
472 | } | ||
473 | return BFA_STATUS_OK; | ||
474 | } | ||
475 | bfa_status_t | ||
476 | bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats) | ||
477 | { | ||
478 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
479 | struct list_head *qe, *qen; | ||
480 | struct bfa_itnim_s *itnim; | ||
481 | |||
482 | /* accumulate IO stats from itnim */ | ||
483 | memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); | ||
484 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { | ||
485 | itnim = (struct bfa_itnim_s *) qe; | ||
486 | bfa_fcpim_add_stats(modstats, &(itnim->stats)); | ||
487 | } | ||
488 | return BFA_STATUS_OK; | ||
489 | } | ||
490 | |||
491 | bfa_status_t | ||
492 | bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa, | ||
493 | struct bfa_fcpim_del_itn_stats_s *modstats) | ||
494 | { | ||
495 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
496 | |||
497 | *modstats = fcpim->del_itn_stats; | ||
498 | |||
499 | return BFA_STATUS_OK; | ||
500 | } | ||
501 | |||
502 | |||
503 | bfa_status_t | ||
504 | bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time) | ||
505 | { | ||
506 | struct bfa_itnim_s *itnim; | ||
507 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
508 | struct list_head *qe, *qen; | ||
509 | |||
510 | /* accumulate IO stats from itnim */ | ||
511 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { | ||
512 | itnim = (struct bfa_itnim_s *) qe; | ||
513 | bfa_itnim_clear_stats(itnim); | ||
514 | } | ||
515 | fcpim->io_profile = BFA_TRUE; | ||
516 | fcpim->io_profile_start_time = time; | ||
517 | fcpim->profile_comp = bfa_ioim_profile_comp; | ||
518 | fcpim->profile_start = bfa_ioim_profile_start; | ||
519 | |||
520 | return BFA_STATUS_OK; | ||
521 | } | ||
522 | bfa_status_t | ||
523 | bfa_fcpim_profile_off(struct bfa_s *bfa) | ||
524 | { | ||
525 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
526 | fcpim->io_profile = BFA_FALSE; | ||
527 | fcpim->io_profile_start_time = 0; | ||
528 | fcpim->profile_comp = NULL; | ||
529 | fcpim->profile_start = NULL; | ||
530 | return BFA_STATUS_OK; | ||
531 | } | ||
532 | |||
533 | bfa_status_t | ||
534 | bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag) | ||
535 | { | ||
536 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
537 | struct list_head *qe, *qen; | ||
538 | struct bfa_itnim_s *itnim; | ||
539 | |||
540 | /* clear IO stats from all active itnims */ | ||
541 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { | ||
542 | itnim = (struct bfa_itnim_s *) qe; | ||
543 | if (itnim->rport->rport_info.lp_tag != lp_tag) | ||
544 | continue; | ||
545 | bfa_itnim_clear_stats(itnim); | ||
546 | } | ||
547 | return BFA_STATUS_OK; | ||
548 | |||
549 | } | ||
550 | |||
551 | bfa_status_t | ||
552 | bfa_fcpim_clr_modstats(struct bfa_s *bfa) | ||
553 | { | ||
554 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
555 | struct list_head *qe, *qen; | ||
556 | struct bfa_itnim_s *itnim; | ||
557 | |||
558 | /* clear IO stats from all active itnims */ | ||
559 | list_for_each_safe(qe, qen, &fcpim->itnim_q) { | ||
560 | itnim = (struct bfa_itnim_s *) qe; | ||
561 | bfa_itnim_clear_stats(itnim); | ||
562 | } | ||
563 | memset(&fcpim->del_itn_stats, 0, | ||
564 | sizeof(struct bfa_fcpim_del_itn_stats_s)); | ||
565 | |||
566 | return BFA_STATUS_OK; | ||
567 | } | ||
568 | |||
569 | void | ||
570 | bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth) | ||
571 | { | ||
572 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
573 | |||
574 | bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX); | ||
575 | |||
576 | fcpim->q_depth = q_depth; | ||
577 | } | ||
578 | |||
579 | u16 | 388 | u16 |
580 | bfa_fcpim_qdepth_get(struct bfa_s *bfa) | 389 | bfa_fcpim_qdepth_get(struct bfa_s *bfa) |
581 | { | 390 | { |
@@ -584,32 +393,12 @@ bfa_fcpim_qdepth_get(struct bfa_s *bfa) | |||
584 | return fcpim->q_depth; | 393 | return fcpim->q_depth; |
585 | } | 394 | } |
586 | 395 | ||
587 | void | ||
588 | bfa_fcpim_update_ioredirect(struct bfa_s *bfa) | ||
589 | { | ||
590 | bfa_boolean_t ioredirect; | ||
591 | |||
592 | /* | ||
593 | * IO redirection is turned off when QoS is enabled and vice versa | ||
594 | */ | ||
595 | ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE; | ||
596 | } | ||
597 | |||
598 | void | ||
599 | bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state) | ||
600 | { | ||
601 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | ||
602 | fcpim->ioredirect = state; | ||
603 | } | ||
604 | |||
605 | |||
606 | |||
607 | /* | 396 | /* |
608 | * BFA ITNIM module state machine functions | 397 | * BFA ITNIM module state machine functions |
609 | */ | 398 | */ |
610 | 399 | ||
611 | /* | 400 | /* |
612 | * Beginning/unallocated state - no events expected. | 401 | * Beginning/unallocated state - no events expected. |
613 | */ | 402 | */ |
614 | static void | 403 | static void |
615 | bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | 404 | bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
@@ -630,7 +419,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | |||
630 | } | 419 | } |
631 | 420 | ||
632 | /* | 421 | /* |
633 | * Beginning state, only online event expected. | 422 | * Beginning state, only online event expected. |
634 | */ | 423 | */ |
635 | static void | 424 | static void |
636 | bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | 425 | bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
@@ -733,7 +522,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, | |||
733 | } | 522 | } |
734 | 523 | ||
735 | /* | 524 | /* |
736 | * Waiting for itnim create response from firmware, a delete is pending. | 525 | * Waiting for itnim create response from firmware, a delete is pending. |
737 | */ | 526 | */ |
738 | static void | 527 | static void |
739 | bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, | 528 | bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, |
@@ -761,7 +550,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, | |||
761 | } | 550 | } |
762 | 551 | ||
763 | /* | 552 | /* |
764 | * Online state - normal parking state. | 553 | * Online state - normal parking state. |
765 | */ | 554 | */ |
766 | static void | 555 | static void |
767 | bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | 556 | bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
@@ -803,7 +592,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | |||
803 | } | 592 | } |
804 | 593 | ||
805 | /* | 594 | /* |
806 | * Second level error recovery need. | 595 | * Second level error recovery need. |
807 | */ | 596 | */ |
808 | static void | 597 | static void |
809 | bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | 598 | bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
@@ -834,7 +623,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | |||
834 | } | 623 | } |
835 | 624 | ||
836 | /* | 625 | /* |
837 | * Going offline. Waiting for active IO cleanup. | 626 | * Going offline. Waiting for active IO cleanup. |
838 | */ | 627 | */ |
839 | static void | 628 | static void |
840 | bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, | 629 | bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, |
@@ -871,7 +660,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, | |||
871 | } | 660 | } |
872 | 661 | ||
873 | /* | 662 | /* |
874 | * Deleting itnim. Waiting for active IO cleanup. | 663 | * Deleting itnim. Waiting for active IO cleanup. |
875 | */ | 664 | */ |
876 | static void | 665 | static void |
877 | bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, | 666 | bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, |
@@ -956,7 +745,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, | |||
956 | } | 745 | } |
957 | 746 | ||
958 | /* | 747 | /* |
959 | * Offline state. | 748 | * Offline state. |
960 | */ | 749 | */ |
961 | static void | 750 | static void |
962 | bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | 751 | bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
@@ -987,9 +776,6 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | |||
987 | } | 776 | } |
988 | } | 777 | } |
989 | 778 | ||
990 | /* | ||
991 | * IOC h/w failed state. | ||
992 | */ | ||
993 | static void | 779 | static void |
994 | bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, | 780 | bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, |
995 | enum bfa_itnim_event event) | 781 | enum bfa_itnim_event event) |
@@ -1024,7 +810,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, | |||
1024 | } | 810 | } |
1025 | 811 | ||
1026 | /* | 812 | /* |
1027 | * Itnim is deleted, waiting for firmware response to delete. | 813 | * Itnim is deleted, waiting for firmware response to delete. |
1028 | */ | 814 | */ |
1029 | static void | 815 | static void |
1030 | bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) | 816 | bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) |
@@ -1069,7 +855,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, | |||
1069 | } | 855 | } |
1070 | 856 | ||
1071 | /* | 857 | /* |
1072 | * Initiate cleanup of all IOs on an IOC failure. | 858 | * Initiate cleanup of all IOs on an IOC failure. |
1073 | */ | 859 | */ |
1074 | static void | 860 | static void |
1075 | bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) | 861 | bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) |
@@ -1103,7 +889,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) | |||
1103 | } | 889 | } |
1104 | 890 | ||
1105 | /* | 891 | /* |
1106 | * IO cleanup completion | 892 | * IO cleanup completion |
1107 | */ | 893 | */ |
1108 | static void | 894 | static void |
1109 | bfa_itnim_cleanp_comp(void *itnim_cbarg) | 895 | bfa_itnim_cleanp_comp(void *itnim_cbarg) |
@@ -1115,7 +901,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg) | |||
1115 | } | 901 | } |
1116 | 902 | ||
1117 | /* | 903 | /* |
1118 | * Initiate cleanup of all IOs. | 904 | * Initiate cleanup of all IOs. |
1119 | */ | 905 | */ |
1120 | static void | 906 | static void |
1121 | bfa_itnim_cleanup(struct bfa_itnim_s *itnim) | 907 | bfa_itnim_cleanup(struct bfa_itnim_s *itnim) |
@@ -1187,9 +973,6 @@ bfa_itnim_qresume(void *cbarg) | |||
1187 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); | 973 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME); |
1188 | } | 974 | } |
1189 | 975 | ||
1190 | |||
1191 | |||
1192 | |||
1193 | /* | 976 | /* |
1194 | * bfa_itnim_public | 977 | * bfa_itnim_public |
1195 | */ | 978 | */ |
@@ -1401,7 +1184,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) | |||
1401 | if (itnim->fcpim->path_tov > 0) { | 1184 | if (itnim->fcpim->path_tov > 0) { |
1402 | 1185 | ||
1403 | itnim->iotov_active = BFA_TRUE; | 1186 | itnim->iotov_active = BFA_TRUE; |
1404 | bfa_assert(bfa_itnim_hold_io(itnim)); | 1187 | WARN_ON(!bfa_itnim_hold_io(itnim)); |
1405 | bfa_timer_start(itnim->bfa, &itnim->timer, | 1188 | bfa_timer_start(itnim->bfa, &itnim->timer, |
1406 | bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); | 1189 | bfa_itnim_iotov, itnim, itnim->fcpim->path_tov); |
1407 | } | 1190 | } |
@@ -1457,14 +1240,12 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) | |||
1457 | fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; | 1240 | fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns; |
1458 | } | 1241 | } |
1459 | 1242 | ||
1460 | |||
1461 | |||
1462 | /* | 1243 | /* |
1463 | * bfa_itnim_public | 1244 | * bfa_itnim_public |
1464 | */ | 1245 | */ |
1465 | 1246 | ||
1466 | /* | 1247 | /* |
1467 | * Itnim interrupt processing. | 1248 | * Itnim interrupt processing. |
1468 | */ | 1249 | */ |
1469 | void | 1250 | void |
1470 | bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | 1251 | bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
@@ -1481,7 +1262,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
1481 | case BFI_ITNIM_I2H_CREATE_RSP: | 1262 | case BFI_ITNIM_I2H_CREATE_RSP: |
1482 | itnim = BFA_ITNIM_FROM_TAG(fcpim, | 1263 | itnim = BFA_ITNIM_FROM_TAG(fcpim, |
1483 | msg.create_rsp->bfa_handle); | 1264 | msg.create_rsp->bfa_handle); |
1484 | bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); | 1265 | WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); |
1485 | bfa_stats(itnim, create_comps); | 1266 | bfa_stats(itnim, create_comps); |
1486 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); | 1267 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); |
1487 | break; | 1268 | break; |
@@ -1489,7 +1270,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
1489 | case BFI_ITNIM_I2H_DELETE_RSP: | 1270 | case BFI_ITNIM_I2H_DELETE_RSP: |
1490 | itnim = BFA_ITNIM_FROM_TAG(fcpim, | 1271 | itnim = BFA_ITNIM_FROM_TAG(fcpim, |
1491 | msg.delete_rsp->bfa_handle); | 1272 | msg.delete_rsp->bfa_handle); |
1492 | bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); | 1273 | WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); |
1493 | bfa_stats(itnim, delete_comps); | 1274 | bfa_stats(itnim, delete_comps); |
1494 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); | 1275 | bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP); |
1495 | break; | 1276 | break; |
@@ -1503,14 +1284,12 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
1503 | 1284 | ||
1504 | default: | 1285 | default: |
1505 | bfa_trc(bfa, m->mhdr.msg_id); | 1286 | bfa_trc(bfa, m->mhdr.msg_id); |
1506 | bfa_assert(0); | 1287 | WARN_ON(1); |
1507 | } | 1288 | } |
1508 | } | 1289 | } |
1509 | 1290 | ||
1510 | |||
1511 | |||
1512 | /* | 1291 | /* |
1513 | * bfa_itnim_api | 1292 | * bfa_itnim_api |
1514 | */ | 1293 | */ |
1515 | 1294 | ||
1516 | struct bfa_itnim_s * | 1295 | struct bfa_itnim_s * |
@@ -1520,7 +1299,7 @@ bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn) | |||
1520 | struct bfa_itnim_s *itnim; | 1299 | struct bfa_itnim_s *itnim; |
1521 | 1300 | ||
1522 | itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); | 1301 | itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag); |
1523 | bfa_assert(itnim->rport == rport); | 1302 | WARN_ON(itnim->rport != rport); |
1524 | 1303 | ||
1525 | itnim->ditn = ditn; | 1304 | itnim->ditn = ditn; |
1526 | 1305 | ||
@@ -1568,31 +1347,6 @@ bfa_itnim_hold_io(struct bfa_itnim_s *itnim) | |||
1568 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); | 1347 | bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable)); |
1569 | } | 1348 | } |
1570 | 1349 | ||
1571 | bfa_status_t | ||
1572 | bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, | ||
1573 | struct bfa_itnim_ioprofile_s *ioprofile) | ||
1574 | { | ||
1575 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa); | ||
1576 | if (!fcpim->io_profile) | ||
1577 | return BFA_STATUS_IOPROFILE_OFF; | ||
1578 | |||
1579 | itnim->ioprofile.index = BFA_IOBUCKET_MAX; | ||
1580 | itnim->ioprofile.io_profile_start_time = | ||
1581 | bfa_io_profile_start_time(itnim->bfa); | ||
1582 | itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul; | ||
1583 | itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div; | ||
1584 | *ioprofile = itnim->ioprofile; | ||
1585 | |||
1586 | return BFA_STATUS_OK; | ||
1587 | } | ||
1588 | |||
1589 | void | ||
1590 | bfa_itnim_get_stats(struct bfa_itnim_s *itnim, | ||
1591 | struct bfa_itnim_iostats_s *stats) | ||
1592 | { | ||
1593 | *stats = itnim->stats; | ||
1594 | } | ||
1595 | |||
1596 | void | 1350 | void |
1597 | bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) | 1351 | bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) |
1598 | { | 1352 | { |
@@ -1608,14 +1362,11 @@ bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) | |||
1608 | */ | 1362 | */ |
1609 | 1363 | ||
1610 | /* | 1364 | /* |
1611 | * IO is not started (unallocated). | 1365 | * IO is not started (unallocated). |
1612 | */ | 1366 | */ |
1613 | static void | 1367 | static void |
1614 | bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1368 | bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1615 | { | 1369 | { |
1616 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
1617 | bfa_trc_fp(ioim->bfa, event); | ||
1618 | |||
1619 | switch (event) { | 1370 | switch (event) { |
1620 | case BFA_IOIM_SM_START: | 1371 | case BFA_IOIM_SM_START: |
1621 | if (!bfa_itnim_is_online(ioim->itnim)) { | 1372 | if (!bfa_itnim_is_online(ioim->itnim)) { |
@@ -1635,7 +1386,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1635 | } | 1386 | } |
1636 | 1387 | ||
1637 | if (ioim->nsges > BFI_SGE_INLINE) { | 1388 | if (ioim->nsges > BFI_SGE_INLINE) { |
1638 | if (!bfa_ioim_sge_setup(ioim)) { | 1389 | if (!bfa_ioim_sgpg_alloc(ioim)) { |
1639 | bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); | 1390 | bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc); |
1640 | return; | 1391 | return; |
1641 | } | 1392 | } |
@@ -1662,7 +1413,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1662 | * requests immediately. | 1413 | * requests immediately. |
1663 | */ | 1414 | */ |
1664 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); | 1415 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
1665 | bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); | 1416 | WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)); |
1666 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, | 1417 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
1667 | __bfa_cb_ioim_abort, ioim); | 1418 | __bfa_cb_ioim_abort, ioim); |
1668 | break; | 1419 | break; |
@@ -1673,7 +1424,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1673 | } | 1424 | } |
1674 | 1425 | ||
1675 | /* | 1426 | /* |
1676 | * IO is waiting for SG pages. | 1427 | * IO is waiting for SG pages. |
1677 | */ | 1428 | */ |
1678 | static void | 1429 | static void |
1679 | bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1430 | bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
@@ -1720,14 +1471,11 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1720 | } | 1471 | } |
1721 | 1472 | ||
1722 | /* | 1473 | /* |
1723 | * IO is active. | 1474 | * IO is active. |
1724 | */ | 1475 | */ |
1725 | static void | 1476 | static void |
1726 | bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1477 | bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1727 | { | 1478 | { |
1728 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
1729 | bfa_trc_fp(ioim->bfa, event); | ||
1730 | |||
1731 | switch (event) { | 1479 | switch (event) { |
1732 | case BFA_IOIM_SM_COMP_GOOD: | 1480 | case BFA_IOIM_SM_COMP_GOOD: |
1733 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); | 1481 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb); |
@@ -1786,8 +1534,8 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1786 | break; | 1534 | break; |
1787 | 1535 | ||
1788 | case BFA_IOIM_SM_SQRETRY: | 1536 | case BFA_IOIM_SM_SQRETRY: |
1789 | if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) { | 1537 | if (bfa_ioim_maxretry_reached(ioim)) { |
1790 | /* max retry completed free IO */ | 1538 | /* max retry reached, free IO */ |
1791 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); | 1539 | bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); |
1792 | bfa_ioim_move_to_comp_q(ioim); | 1540 | bfa_ioim_move_to_comp_q(ioim); |
1793 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, | 1541 | bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, |
@@ -1804,17 +1552,15 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1804 | } | 1552 | } |
1805 | 1553 | ||
1806 | /* | 1554 | /* |
1807 | * IO is retried with new tag. | 1555 | * IO is retried with new tag. |
1808 | */ | 1556 | */ |
1809 | static void | 1557 | static void |
1810 | bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1558 | bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
1811 | { | 1559 | { |
1812 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
1813 | bfa_trc_fp(ioim->bfa, event); | ||
1814 | |||
1815 | switch (event) { | 1560 | switch (event) { |
1816 | case BFA_IOIM_SM_FREE: | 1561 | case BFA_IOIM_SM_FREE: |
1817 | /* abts and rrq done. Now retry the IO with new tag */ | 1562 | /* abts and rrq done. Now retry the IO with new tag */ |
1563 | bfa_ioim_update_iotag(ioim); | ||
1818 | if (!bfa_ioim_send_ioreq(ioim)) { | 1564 | if (!bfa_ioim_send_ioreq(ioim)) { |
1819 | bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); | 1565 | bfa_sm_set_state(ioim, bfa_ioim_sm_qfull); |
1820 | break; | 1566 | break; |
@@ -1858,7 +1604,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1858 | } | 1604 | } |
1859 | 1605 | ||
1860 | /* | 1606 | /* |
1861 | * IO is being aborted, waiting for completion from firmware. | 1607 | * IO is being aborted, waiting for completion from firmware. |
1862 | */ | 1608 | */ |
1863 | static void | 1609 | static void |
1864 | bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1610 | bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
@@ -1894,7 +1640,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1894 | break; | 1640 | break; |
1895 | 1641 | ||
1896 | case BFA_IOIM_SM_CLEANUP: | 1642 | case BFA_IOIM_SM_CLEANUP: |
1897 | bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); | 1643 | WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); |
1898 | ioim->iosp->abort_explicit = BFA_FALSE; | 1644 | ioim->iosp->abort_explicit = BFA_FALSE; |
1899 | 1645 | ||
1900 | if (bfa_ioim_send_abort(ioim)) | 1646 | if (bfa_ioim_send_abort(ioim)) |
@@ -1981,7 +1727,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
1981 | } | 1727 | } |
1982 | 1728 | ||
1983 | /* | 1729 | /* |
1984 | * IO is waiting for room in request CQ | 1730 | * IO is waiting for room in request CQ |
1985 | */ | 1731 | */ |
1986 | static void | 1732 | static void |
1987 | bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1733 | bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
@@ -2025,7 +1771,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
2025 | } | 1771 | } |
2026 | 1772 | ||
2027 | /* | 1773 | /* |
2028 | * Active IO is being aborted, waiting for room in request CQ. | 1774 | * Active IO is being aborted, waiting for room in request CQ. |
2029 | */ | 1775 | */ |
2030 | static void | 1776 | static void |
2031 | bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1777 | bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
@@ -2040,7 +1786,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
2040 | break; | 1786 | break; |
2041 | 1787 | ||
2042 | case BFA_IOIM_SM_CLEANUP: | 1788 | case BFA_IOIM_SM_CLEANUP: |
2043 | bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE); | 1789 | WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE); |
2044 | ioim->iosp->abort_explicit = BFA_FALSE; | 1790 | ioim->iosp->abort_explicit = BFA_FALSE; |
2045 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); | 1791 | bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull); |
2046 | break; | 1792 | break; |
@@ -2076,7 +1822,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
2076 | } | 1822 | } |
2077 | 1823 | ||
2078 | /* | 1824 | /* |
2079 | * Active IO is being cleaned up, waiting for room in request CQ. | 1825 | * Active IO is being cleaned up, waiting for room in request CQ. |
2080 | */ | 1826 | */ |
2081 | static void | 1827 | static void |
2082 | bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1828 | bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
@@ -2131,9 +1877,6 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
2131 | static void | 1877 | static void |
2132 | bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | 1878 | bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) |
2133 | { | 1879 | { |
2134 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
2135 | bfa_trc_fp(ioim->bfa, event); | ||
2136 | |||
2137 | switch (event) { | 1880 | switch (event) { |
2138 | case BFA_IOIM_SM_HCB: | 1881 | case BFA_IOIM_SM_HCB: |
2139 | bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); | 1882 | bfa_sm_set_state(ioim, bfa_ioim_sm_uninit); |
@@ -2213,11 +1956,6 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) | |||
2213 | } | 1956 | } |
2214 | 1957 | ||
2215 | 1958 | ||
2216 | |||
2217 | /* | ||
2218 | * hal_ioim_private | ||
2219 | */ | ||
2220 | |||
2221 | static void | 1959 | static void |
2222 | __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) | 1960 | __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete) |
2223 | { | 1961 | { |
@@ -2323,7 +2061,7 @@ bfa_ioim_sgpg_alloced(void *cbarg) | |||
2323 | 2061 | ||
2324 | ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); | 2062 | ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); |
2325 | list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); | 2063 | list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q); |
2326 | bfa_ioim_sgpg_setup(ioim); | 2064 | ioim->sgpg = bfa_q_first(&ioim->sgpg_q); |
2327 | bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); | 2065 | bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); |
2328 | } | 2066 | } |
2329 | 2067 | ||
@@ -2335,13 +2073,16 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) | |||
2335 | { | 2073 | { |
2336 | struct bfa_itnim_s *itnim = ioim->itnim; | 2074 | struct bfa_itnim_s *itnim = ioim->itnim; |
2337 | struct bfi_ioim_req_s *m; | 2075 | struct bfi_ioim_req_s *m; |
2338 | static struct fcp_cmnd_s cmnd_z0 = { 0 }; | 2076 | static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } }; |
2339 | struct bfi_sge_s *sge; | 2077 | struct bfi_sge_s *sge, *sgpge; |
2340 | u32 pgdlen = 0; | 2078 | u32 pgdlen = 0; |
2341 | u32 fcp_dl; | 2079 | u32 fcp_dl; |
2342 | u64 addr; | 2080 | u64 addr; |
2343 | struct scatterlist *sg; | 2081 | struct scatterlist *sg; |
2082 | struct bfa_sgpg_s *sgpg; | ||
2344 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; | 2083 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; |
2084 | u32 i, sge_id, pgcumsz; | ||
2085 | enum dma_data_direction dmadir; | ||
2345 | 2086 | ||
2346 | /* | 2087 | /* |
2347 | * check for room in queue to send request now | 2088 | * check for room in queue to send request now |
@@ -2359,22 +2100,61 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) | |||
2359 | */ | 2100 | */ |
2360 | m->io_tag = cpu_to_be16(ioim->iotag); | 2101 | m->io_tag = cpu_to_be16(ioim->iotag); |
2361 | m->rport_hdl = ioim->itnim->rport->fw_handle; | 2102 | m->rport_hdl = ioim->itnim->rport->fw_handle; |
2362 | m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); | 2103 | m->io_timeout = 0; |
2363 | 2104 | ||
2364 | /* | ||
2365 | * build inline IO SG element here | ||
2366 | */ | ||
2367 | sge = &m->sges[0]; | 2105 | sge = &m->sges[0]; |
2368 | if (ioim->nsges) { | 2106 | sgpg = ioim->sgpg; |
2369 | sg = (struct scatterlist *)scsi_sglist(cmnd); | 2107 | sge_id = 0; |
2370 | addr = bfa_os_sgaddr(sg_dma_address(sg)); | 2108 | sgpge = NULL; |
2371 | sge->sga = *(union bfi_addr_u *) &addr; | 2109 | pgcumsz = 0; |
2372 | pgdlen = sg_dma_len(sg); | 2110 | scsi_for_each_sg(cmnd, sg, ioim->nsges, i) { |
2373 | sge->sg_len = pgdlen; | 2111 | if (i == 0) { |
2374 | sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? | 2112 | /* build inline IO SG element */ |
2113 | addr = bfa_sgaddr_le(sg_dma_address(sg)); | ||
2114 | sge->sga = *(union bfi_addr_u *) &addr; | ||
2115 | pgdlen = sg_dma_len(sg); | ||
2116 | sge->sg_len = pgdlen; | ||
2117 | sge->flags = (ioim->nsges > BFI_SGE_INLINE) ? | ||
2375 | BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; | 2118 | BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST; |
2376 | bfa_sge_to_be(sge); | 2119 | bfa_sge_to_be(sge); |
2377 | sge++; | 2120 | sge++; |
2121 | } else { | ||
2122 | if (sge_id == 0) | ||
2123 | sgpge = sgpg->sgpg->sges; | ||
2124 | |||
2125 | addr = bfa_sgaddr_le(sg_dma_address(sg)); | ||
2126 | sgpge->sga = *(union bfi_addr_u *) &addr; | ||
2127 | sgpge->sg_len = sg_dma_len(sg); | ||
2128 | pgcumsz += sgpge->sg_len; | ||
2129 | |||
2130 | /* set flags */ | ||
2131 | if (i < (ioim->nsges - 1) && | ||
2132 | sge_id < (BFI_SGPG_DATA_SGES - 1)) | ||
2133 | sgpge->flags = BFI_SGE_DATA; | ||
2134 | else if (i < (ioim->nsges - 1)) | ||
2135 | sgpge->flags = BFI_SGE_DATA_CPL; | ||
2136 | else | ||
2137 | sgpge->flags = BFI_SGE_DATA_LAST; | ||
2138 | |||
2139 | bfa_sge_to_le(sgpge); | ||
2140 | |||
2141 | sgpge++; | ||
2142 | if (i == (ioim->nsges - 1)) { | ||
2143 | sgpge->flags = BFI_SGE_PGDLEN; | ||
2144 | sgpge->sga.a32.addr_lo = 0; | ||
2145 | sgpge->sga.a32.addr_hi = 0; | ||
2146 | sgpge->sg_len = pgcumsz; | ||
2147 | bfa_sge_to_le(sgpge); | ||
2148 | } else if (++sge_id == BFI_SGPG_DATA_SGES) { | ||
2149 | sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); | ||
2150 | sgpge->flags = BFI_SGE_LINK; | ||
2151 | sgpge->sga = sgpg->sgpg_pa; | ||
2152 | sgpge->sg_len = pgcumsz; | ||
2153 | bfa_sge_to_le(sgpge); | ||
2154 | sge_id = 0; | ||
2155 | pgcumsz = 0; | ||
2156 | } | ||
2157 | } | ||
2378 | } | 2158 | } |
2379 | 2159 | ||
2380 | if (ioim->nsges > BFI_SGE_INLINE) { | 2160 | if (ioim->nsges > BFI_SGE_INLINE) { |
@@ -2391,10 +2171,17 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) | |||
2391 | * set up I/O command parameters | 2171 | * set up I/O command parameters |
2392 | */ | 2172 | */ |
2393 | m->cmnd = cmnd_z0; | 2173 | m->cmnd = cmnd_z0; |
2394 | m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); | 2174 | int_to_scsilun(cmnd->device->lun, &m->cmnd.lun); |
2395 | m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); | 2175 | dmadir = cmnd->sc_data_direction; |
2396 | m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio); | 2176 | if (dmadir == DMA_TO_DEVICE) |
2397 | fcp_dl = bfa_cb_ioim_get_size(ioim->dio); | 2177 | m->cmnd.iodir = FCP_IODIR_WRITE; |
2178 | else if (dmadir == DMA_FROM_DEVICE) | ||
2179 | m->cmnd.iodir = FCP_IODIR_READ; | ||
2180 | else | ||
2181 | m->cmnd.iodir = FCP_IODIR_NONE; | ||
2182 | |||
2183 | m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd; | ||
2184 | fcp_dl = scsi_bufflen(cmnd); | ||
2398 | m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); | 2185 | m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); |
2399 | 2186 | ||
2400 | /* | 2187 | /* |
@@ -2418,28 +2205,9 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) | |||
2418 | bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); | 2205 | bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); |
2419 | } | 2206 | } |
2420 | if (itnim->seq_rec || | 2207 | if (itnim->seq_rec || |
2421 | (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1))) | 2208 | (scsi_bufflen(cmnd) & (sizeof(u32) - 1))) |
2422 | bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); | 2209 | bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa)); |
2423 | 2210 | ||
2424 | #ifdef IOIM_ADVANCED | ||
2425 | m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio); | ||
2426 | m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); | ||
2427 | m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); | ||
2428 | |||
2429 | /* | ||
2430 | * Handle large CDB (>16 bytes). | ||
2431 | */ | ||
2432 | m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - | ||
2433 | FCP_CMND_CDB_LEN) / sizeof(u32); | ||
2434 | if (m->cmnd.addl_cdb_len) { | ||
2435 | memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) | ||
2436 | bfa_cb_ioim_get_cdb(ioim->dio) + 1, | ||
2437 | m->cmnd.addl_cdb_len * sizeof(u32)); | ||
2438 | fcp_cmnd_fcpdl(&m->cmnd) = | ||
2439 | cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio)); | ||
2440 | } | ||
2441 | #endif | ||
2442 | |||
2443 | /* | 2211 | /* |
2444 | * queue I/O message to firmware | 2212 | * queue I/O message to firmware |
2445 | */ | 2213 | */ |
@@ -2452,11 +2220,11 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) | |||
2452 | * at queuing time. | 2220 | * at queuing time. |
2453 | */ | 2221 | */ |
2454 | static bfa_boolean_t | 2222 | static bfa_boolean_t |
2455 | bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) | 2223 | bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim) |
2456 | { | 2224 | { |
2457 | u16 nsgpgs; | 2225 | u16 nsgpgs; |
2458 | 2226 | ||
2459 | bfa_assert(ioim->nsges > BFI_SGE_INLINE); | 2227 | WARN_ON(ioim->nsges <= BFI_SGE_INLINE); |
2460 | 2228 | ||
2461 | /* | 2229 | /* |
2462 | * allocate SG pages needed | 2230 | * allocate SG pages needed |
@@ -2472,73 +2240,11 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) | |||
2472 | } | 2240 | } |
2473 | 2241 | ||
2474 | ioim->nsgpgs = nsgpgs; | 2242 | ioim->nsgpgs = nsgpgs; |
2475 | bfa_ioim_sgpg_setup(ioim); | 2243 | ioim->sgpg = bfa_q_first(&ioim->sgpg_q); |
2476 | 2244 | ||
2477 | return BFA_TRUE; | 2245 | return BFA_TRUE; |
2478 | } | 2246 | } |
2479 | 2247 | ||
2480 | static void | ||
2481 | bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) | ||
2482 | { | ||
2483 | int sgeid, nsges, i; | ||
2484 | struct bfi_sge_s *sge; | ||
2485 | struct bfa_sgpg_s *sgpg; | ||
2486 | u32 pgcumsz; | ||
2487 | u64 addr; | ||
2488 | struct scatterlist *sg; | ||
2489 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; | ||
2490 | |||
2491 | sgeid = BFI_SGE_INLINE; | ||
2492 | ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q); | ||
2493 | |||
2494 | sg = scsi_sglist(cmnd); | ||
2495 | sg = sg_next(sg); | ||
2496 | |||
2497 | do { | ||
2498 | sge = sgpg->sgpg->sges; | ||
2499 | nsges = ioim->nsges - sgeid; | ||
2500 | if (nsges > BFI_SGPG_DATA_SGES) | ||
2501 | nsges = BFI_SGPG_DATA_SGES; | ||
2502 | |||
2503 | pgcumsz = 0; | ||
2504 | for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) { | ||
2505 | addr = bfa_os_sgaddr(sg_dma_address(sg)); | ||
2506 | sge->sga = *(union bfi_addr_u *) &addr; | ||
2507 | sge->sg_len = sg_dma_len(sg); | ||
2508 | pgcumsz += sge->sg_len; | ||
2509 | |||
2510 | /* | ||
2511 | * set flags | ||
2512 | */ | ||
2513 | if (i < (nsges - 1)) | ||
2514 | sge->flags = BFI_SGE_DATA; | ||
2515 | else if (sgeid < (ioim->nsges - 1)) | ||
2516 | sge->flags = BFI_SGE_DATA_CPL; | ||
2517 | else | ||
2518 | sge->flags = BFI_SGE_DATA_LAST; | ||
2519 | |||
2520 | bfa_sge_to_le(sge); | ||
2521 | } | ||
2522 | |||
2523 | sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); | ||
2524 | |||
2525 | /* | ||
2526 | * set the link element of each page | ||
2527 | */ | ||
2528 | if (sgeid == ioim->nsges) { | ||
2529 | sge->flags = BFI_SGE_PGDLEN; | ||
2530 | sge->sga.a32.addr_lo = 0; | ||
2531 | sge->sga.a32.addr_hi = 0; | ||
2532 | } else { | ||
2533 | sge->flags = BFI_SGE_LINK; | ||
2534 | sge->sga = sgpg->sgpg_pa; | ||
2535 | } | ||
2536 | sge->sg_len = pgcumsz; | ||
2537 | |||
2538 | bfa_sge_to_le(sge); | ||
2539 | } while (sgeid < ioim->nsges); | ||
2540 | } | ||
2541 | |||
2542 | /* | 2248 | /* |
2543 | * Send I/O abort request to firmware. | 2249 | * Send I/O abort request to firmware. |
2544 | */ | 2250 | */ |
@@ -2605,7 +2311,7 @@ bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) | |||
2605 | } | 2311 | } |
2606 | bfa_itnim_iodone(ioim->itnim); | 2312 | bfa_itnim_iodone(ioim->itnim); |
2607 | } else | 2313 | } else |
2608 | bfa_tskim_iodone(ioim->iosp->tskim); | 2314 | bfa_wc_down(&ioim->iosp->tskim->wc); |
2609 | } | 2315 | } |
2610 | 2316 | ||
2611 | static bfa_boolean_t | 2317 | static bfa_boolean_t |
@@ -2623,9 +2329,6 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) | |||
2623 | return BFA_TRUE; | 2329 | return BFA_TRUE; |
2624 | } | 2330 | } |
2625 | 2331 | ||
2626 | /* | ||
2627 | * or after the link comes back. | ||
2628 | */ | ||
2629 | void | 2332 | void |
2630 | bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) | 2333 | bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) |
2631 | { | 2334 | { |
@@ -2653,11 +2356,6 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) | |||
2653 | } | 2356 | } |
2654 | 2357 | ||
2655 | 2358 | ||
2656 | |||
2657 | /* | ||
2658 | * hal_ioim_friend | ||
2659 | */ | ||
2660 | |||
2661 | /* | 2359 | /* |
2662 | * Memory allocation and initialization. | 2360 | * Memory allocation and initialization. |
2663 | */ | 2361 | */ |
@@ -2722,14 +2420,6 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) | |||
2722 | } | 2420 | } |
2723 | } | 2421 | } |
2724 | 2422 | ||
2725 | /* | ||
2726 | * Driver detach time call. | ||
2727 | */ | ||
2728 | void | ||
2729 | bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim) | ||
2730 | { | ||
2731 | } | ||
2732 | |||
2733 | void | 2423 | void |
2734 | bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | 2424 | bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
2735 | { | 2425 | { |
@@ -2742,7 +2432,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
2742 | iotag = be16_to_cpu(rsp->io_tag); | 2432 | iotag = be16_to_cpu(rsp->io_tag); |
2743 | 2433 | ||
2744 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); | 2434 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); |
2745 | bfa_assert(ioim->iotag == iotag); | 2435 | WARN_ON(ioim->iotag != iotag); |
2746 | 2436 | ||
2747 | bfa_trc(ioim->bfa, ioim->iotag); | 2437 | bfa_trc(ioim->bfa, ioim->iotag); |
2748 | bfa_trc(ioim->bfa, rsp->io_status); | 2438 | bfa_trc(ioim->bfa, rsp->io_status); |
@@ -2773,13 +2463,13 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
2773 | 2463 | ||
2774 | case BFI_IOIM_STS_PROTO_ERR: | 2464 | case BFI_IOIM_STS_PROTO_ERR: |
2775 | bfa_stats(ioim->itnim, iocom_proto_err); | 2465 | bfa_stats(ioim->itnim, iocom_proto_err); |
2776 | bfa_assert(rsp->reuse_io_tag); | 2466 | WARN_ON(!rsp->reuse_io_tag); |
2777 | evt = BFA_IOIM_SM_COMP; | 2467 | evt = BFA_IOIM_SM_COMP; |
2778 | break; | 2468 | break; |
2779 | 2469 | ||
2780 | case BFI_IOIM_STS_SQER_NEEDED: | 2470 | case BFI_IOIM_STS_SQER_NEEDED: |
2781 | bfa_stats(ioim->itnim, iocom_sqer_needed); | 2471 | bfa_stats(ioim->itnim, iocom_sqer_needed); |
2782 | bfa_assert(rsp->reuse_io_tag == 0); | 2472 | WARN_ON(rsp->reuse_io_tag != 0); |
2783 | evt = BFA_IOIM_SM_SQRETRY; | 2473 | evt = BFA_IOIM_SM_SQRETRY; |
2784 | break; | 2474 | break; |
2785 | 2475 | ||
@@ -2808,7 +2498,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
2808 | break; | 2498 | break; |
2809 | 2499 | ||
2810 | default: | 2500 | default: |
2811 | bfa_assert(0); | 2501 | WARN_ON(1); |
2812 | } | 2502 | } |
2813 | 2503 | ||
2814 | bfa_sm_send_event(ioim, evt); | 2504 | bfa_sm_send_event(ioim, evt); |
@@ -2825,39 +2515,12 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
2825 | iotag = be16_to_cpu(rsp->io_tag); | 2515 | iotag = be16_to_cpu(rsp->io_tag); |
2826 | 2516 | ||
2827 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); | 2517 | ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); |
2828 | bfa_assert(ioim->iotag == iotag); | 2518 | WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag); |
2829 | 2519 | ||
2830 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
2831 | bfa_ioim_cb_profile_comp(fcpim, ioim); | 2520 | bfa_ioim_cb_profile_comp(fcpim, ioim); |
2832 | |||
2833 | bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); | 2521 | bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD); |
2834 | } | 2522 | } |
2835 | 2523 | ||
2836 | void | ||
2837 | bfa_ioim_profile_start(struct bfa_ioim_s *ioim) | ||
2838 | { | ||
2839 | ioim->start_time = jiffies; | ||
2840 | } | ||
2841 | |||
2842 | void | ||
2843 | bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) | ||
2844 | { | ||
2845 | u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); | ||
2846 | u32 index = bfa_ioim_get_index(fcp_dl); | ||
2847 | u64 end_time = jiffies; | ||
2848 | struct bfa_itnim_latency_s *io_lat = | ||
2849 | &(ioim->itnim->ioprofile.io_latency); | ||
2850 | u32 val = (u32)(end_time - ioim->start_time); | ||
2851 | |||
2852 | bfa_itnim_ioprofile_update(ioim->itnim, index); | ||
2853 | |||
2854 | io_lat->count[index]++; | ||
2855 | io_lat->min[index] = (io_lat->min[index] < val) ? | ||
2856 | io_lat->min[index] : val; | ||
2857 | io_lat->max[index] = (io_lat->max[index] > val) ? | ||
2858 | io_lat->max[index] : val; | ||
2859 | io_lat->avg[index] += val; | ||
2860 | } | ||
2861 | /* | 2524 | /* |
2862 | * Called by itnim to clean up IO while going offline. | 2525 | * Called by itnim to clean up IO while going offline. |
2863 | */ | 2526 | */ |
@@ -2903,11 +2566,6 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim) | |||
2903 | } | 2566 | } |
2904 | 2567 | ||
2905 | 2568 | ||
2906 | |||
2907 | /* | ||
2908 | * hal_ioim_api | ||
2909 | */ | ||
2910 | |||
2911 | /* | 2569 | /* |
2912 | * Allocate IOIM resource for initiator mode I/O request. | 2570 | * Allocate IOIM resource for initiator mode I/O request. |
2913 | */ | 2571 | */ |
@@ -2936,7 +2594,6 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, | |||
2936 | fcpim->ios_active++; | 2594 | fcpim->ios_active++; |
2937 | 2595 | ||
2938 | list_add_tail(&ioim->qe, &itnim->io_q); | 2596 | list_add_tail(&ioim->qe, &itnim->io_q); |
2939 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
2940 | 2597 | ||
2941 | return ioim; | 2598 | return ioim; |
2942 | } | 2599 | } |
@@ -2946,18 +2603,13 @@ bfa_ioim_free(struct bfa_ioim_s *ioim) | |||
2946 | { | 2603 | { |
2947 | struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; | 2604 | struct bfa_fcpim_mod_s *fcpim = ioim->fcpim; |
2948 | 2605 | ||
2949 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
2950 | bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit)); | ||
2951 | |||
2952 | bfa_assert_fp(list_empty(&ioim->sgpg_q) || | ||
2953 | (ioim->nsges > BFI_SGE_INLINE)); | ||
2954 | |||
2955 | if (ioim->nsgpgs > 0) | 2606 | if (ioim->nsgpgs > 0) |
2956 | bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); | 2607 | bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs); |
2957 | 2608 | ||
2958 | bfa_stats(ioim->itnim, io_comps); | 2609 | bfa_stats(ioim->itnim, io_comps); |
2959 | fcpim->ios_active--; | 2610 | fcpim->ios_active--; |
2960 | 2611 | ||
2612 | ioim->iotag &= BFA_IOIM_IOTAG_MASK; | ||
2961 | list_del(&ioim->qe); | 2613 | list_del(&ioim->qe); |
2962 | list_add_tail(&ioim->qe, &fcpim->ioim_free_q); | 2614 | list_add_tail(&ioim->qe, &fcpim->ioim_free_q); |
2963 | } | 2615 | } |
@@ -2965,16 +2617,13 @@ bfa_ioim_free(struct bfa_ioim_s *ioim) | |||
2965 | void | 2617 | void |
2966 | bfa_ioim_start(struct bfa_ioim_s *ioim) | 2618 | bfa_ioim_start(struct bfa_ioim_s *ioim) |
2967 | { | 2619 | { |
2968 | bfa_trc_fp(ioim->bfa, ioim->iotag); | ||
2969 | |||
2970 | bfa_ioim_cb_profile_start(ioim->fcpim, ioim); | 2620 | bfa_ioim_cb_profile_start(ioim->fcpim, ioim); |
2971 | 2621 | ||
2972 | /* | 2622 | /* |
2973 | * Obtain the queue over which this request has to be issued | 2623 | * Obtain the queue over which this request has to be issued |
2974 | */ | 2624 | */ |
2975 | ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? | 2625 | ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? |
2976 | bfa_cb_ioim_get_reqq(ioim->dio) : | 2626 | BFA_FALSE : bfa_itnim_get_reqq(ioim); |
2977 | bfa_itnim_get_reqq(ioim); | ||
2978 | 2627 | ||
2979 | bfa_sm_send_event(ioim, BFA_IOIM_SM_START); | 2628 | bfa_sm_send_event(ioim, BFA_IOIM_SM_START); |
2980 | } | 2629 | } |
@@ -2997,13 +2646,12 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim) | |||
2997 | return BFA_STATUS_OK; | 2646 | return BFA_STATUS_OK; |
2998 | } | 2647 | } |
2999 | 2648 | ||
3000 | |||
3001 | /* | 2649 | /* |
3002 | * BFA TSKIM state machine functions | 2650 | * BFA TSKIM state machine functions |
3003 | */ | 2651 | */ |
3004 | 2652 | ||
3005 | /* | 2653 | /* |
3006 | * Task management command beginning state. | 2654 | * Task management command beginning state. |
3007 | */ | 2655 | */ |
3008 | static void | 2656 | static void |
3009 | bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 2657 | bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
@@ -3040,9 +2688,8 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
3040 | } | 2688 | } |
3041 | 2689 | ||
3042 | /* | 2690 | /* |
3043 | * brief | 2691 | * TM command is active, awaiting completion from firmware to |
3044 | * TM command is active, awaiting completion from firmware to | 2692 | * cleanup IO requests in TM scope. |
3045 | * cleanup IO requests in TM scope. | ||
3046 | */ | 2693 | */ |
3047 | static void | 2694 | static void |
3048 | bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 2695 | bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
@@ -3077,8 +2724,8 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
3077 | } | 2724 | } |
3078 | 2725 | ||
3079 | /* | 2726 | /* |
3080 | * An active TM is being cleaned up since ITN is offline. Awaiting cleanup | 2727 | * An active TM is being cleaned up since ITN is offline. Awaiting cleanup |
3081 | * completion event from firmware. | 2728 | * completion event from firmware. |
3082 | */ | 2729 | */ |
3083 | static void | 2730 | static void |
3084 | bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 2731 | bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
@@ -3138,7 +2785,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
3138 | } | 2785 | } |
3139 | 2786 | ||
3140 | /* | 2787 | /* |
3141 | * Task management command is waiting for room in request CQ | 2788 | * Task management command is waiting for room in request CQ |
3142 | */ | 2789 | */ |
3143 | static void | 2790 | static void |
3144 | bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 2791 | bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
@@ -3173,8 +2820,8 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
3173 | } | 2820 | } |
3174 | 2821 | ||
3175 | /* | 2822 | /* |
3176 | * Task management command is active, awaiting for room in request CQ | 2823 | * Task management command is active, awaiting for room in request CQ |
3177 | * to send clean up request. | 2824 | * to send clean up request. |
3178 | */ | 2825 | */ |
3179 | static void | 2826 | static void |
3180 | bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, | 2827 | bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, |
@@ -3186,10 +2833,8 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, | |||
3186 | case BFA_TSKIM_SM_DONE: | 2833 | case BFA_TSKIM_SM_DONE: |
3187 | bfa_reqq_wcancel(&tskim->reqq_wait); | 2834 | bfa_reqq_wcancel(&tskim->reqq_wait); |
3188 | /* | 2835 | /* |
3189 | * | ||
3190 | * Fall through !!! | 2836 | * Fall through !!! |
3191 | */ | 2837 | */ |
3192 | |||
3193 | case BFA_TSKIM_SM_QRESUME: | 2838 | case BFA_TSKIM_SM_QRESUME: |
3194 | bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); | 2839 | bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup); |
3195 | bfa_tskim_send_abort(tskim); | 2840 | bfa_tskim_send_abort(tskim); |
@@ -3208,7 +2853,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, | |||
3208 | } | 2853 | } |
3209 | 2854 | ||
3210 | /* | 2855 | /* |
3211 | * BFA callback is pending | 2856 | * BFA callback is pending |
3212 | */ | 2857 | */ |
3213 | static void | 2858 | static void |
3214 | bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | 2859 | bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) |
@@ -3233,12 +2878,6 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) | |||
3233 | } | 2878 | } |
3234 | } | 2879 | } |
3235 | 2880 | ||
3236 | |||
3237 | |||
3238 | /* | ||
3239 | * hal_tskim_private | ||
3240 | */ | ||
3241 | |||
3242 | static void | 2881 | static void |
3243 | __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) | 2882 | __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete) |
3244 | { | 2883 | { |
@@ -3268,8 +2907,8 @@ __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete) | |||
3268 | BFI_TSKIM_STS_FAILED); | 2907 | BFI_TSKIM_STS_FAILED); |
3269 | } | 2908 | } |
3270 | 2909 | ||
3271 | static bfa_boolean_t | 2910 | static bfa_boolean_t |
3272 | bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun) | 2911 | bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun) |
3273 | { | 2912 | { |
3274 | switch (tskim->tm_cmnd) { | 2913 | switch (tskim->tm_cmnd) { |
3275 | case FCP_TM_TARGET_RESET: | 2914 | case FCP_TM_TARGET_RESET: |
@@ -3279,24 +2918,26 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun) | |||
3279 | case FCP_TM_CLEAR_TASK_SET: | 2918 | case FCP_TM_CLEAR_TASK_SET: |
3280 | case FCP_TM_LUN_RESET: | 2919 | case FCP_TM_LUN_RESET: |
3281 | case FCP_TM_CLEAR_ACA: | 2920 | case FCP_TM_CLEAR_ACA: |
3282 | return (tskim->lun == lun); | 2921 | return !memcmp(&tskim->lun, &lun, sizeof(lun)); |
3283 | 2922 | ||
3284 | default: | 2923 | default: |
3285 | bfa_assert(0); | 2924 | WARN_ON(1); |
3286 | } | 2925 | } |
3287 | 2926 | ||
3288 | return BFA_FALSE; | 2927 | return BFA_FALSE; |
3289 | } | 2928 | } |
3290 | 2929 | ||
3291 | /* | 2930 | /* |
3292 | * Gather affected IO requests and task management commands. | 2931 | * Gather affected IO requests and task management commands. |
3293 | */ | 2932 | */ |
3294 | static void | 2933 | static void |
3295 | bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) | 2934 | bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) |
3296 | { | 2935 | { |
3297 | struct bfa_itnim_s *itnim = tskim->itnim; | 2936 | struct bfa_itnim_s *itnim = tskim->itnim; |
3298 | struct bfa_ioim_s *ioim; | 2937 | struct bfa_ioim_s *ioim; |
3299 | struct list_head *qe, *qen; | 2938 | struct list_head *qe, *qen; |
2939 | struct scsi_cmnd *cmnd; | ||
2940 | struct scsi_lun scsilun; | ||
3300 | 2941 | ||
3301 | INIT_LIST_HEAD(&tskim->io_q); | 2942 | INIT_LIST_HEAD(&tskim->io_q); |
3302 | 2943 | ||
@@ -3305,8 +2946,9 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) | |||
3305 | */ | 2946 | */ |
3306 | list_for_each_safe(qe, qen, &itnim->io_q) { | 2947 | list_for_each_safe(qe, qen, &itnim->io_q) { |
3307 | ioim = (struct bfa_ioim_s *) qe; | 2948 | ioim = (struct bfa_ioim_s *) qe; |
3308 | if (bfa_tskim_match_scope | 2949 | cmnd = (struct scsi_cmnd *) ioim->dio; |
3309 | (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { | 2950 | int_to_scsilun(cmnd->device->lun, &scsilun); |
2951 | if (bfa_tskim_match_scope(tskim, scsilun)) { | ||
3310 | list_del(&ioim->qe); | 2952 | list_del(&ioim->qe); |
3311 | list_add_tail(&ioim->qe, &tskim->io_q); | 2953 | list_add_tail(&ioim->qe, &tskim->io_q); |
3312 | } | 2954 | } |
@@ -3317,8 +2959,9 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) | |||
3317 | */ | 2959 | */ |
3318 | list_for_each_safe(qe, qen, &itnim->pending_q) { | 2960 | list_for_each_safe(qe, qen, &itnim->pending_q) { |
3319 | ioim = (struct bfa_ioim_s *) qe; | 2961 | ioim = (struct bfa_ioim_s *) qe; |
3320 | if (bfa_tskim_match_scope | 2962 | cmnd = (struct scsi_cmnd *) ioim->dio; |
3321 | (tskim, bfa_cb_ioim_get_lun(ioim->dio))) { | 2963 | int_to_scsilun(cmnd->device->lun, &scsilun); |
2964 | if (bfa_tskim_match_scope(tskim, scsilun)) { | ||
3322 | list_del(&ioim->qe); | 2965 | list_del(&ioim->qe); |
3323 | list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); | 2966 | list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q); |
3324 | bfa_ioim_tov(ioim); | 2967 | bfa_ioim_tov(ioim); |
@@ -3327,7 +2970,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) | |||
3327 | } | 2970 | } |
3328 | 2971 | ||
3329 | /* | 2972 | /* |
3330 | * IO cleanup completion | 2973 | * IO cleanup completion |
3331 | */ | 2974 | */ |
3332 | static void | 2975 | static void |
3333 | bfa_tskim_cleanp_comp(void *tskim_cbarg) | 2976 | bfa_tskim_cleanp_comp(void *tskim_cbarg) |
@@ -3339,7 +2982,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg) | |||
3339 | } | 2982 | } |
3340 | 2983 | ||
3341 | /* | 2984 | /* |
3342 | * Gather affected IO requests and task management commands. | 2985 | * Gather affected IO requests and task management commands. |
3343 | */ | 2986 | */ |
3344 | static void | 2987 | static void |
3345 | bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) | 2988 | bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) |
@@ -3359,7 +3002,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) | |||
3359 | } | 3002 | } |
3360 | 3003 | ||
3361 | /* | 3004 | /* |
3362 | * Send task management request to firmware. | 3005 | * Send task management request to firmware. |
3363 | */ | 3006 | */ |
3364 | static bfa_boolean_t | 3007 | static bfa_boolean_t |
3365 | bfa_tskim_send(struct bfa_tskim_s *tskim) | 3008 | bfa_tskim_send(struct bfa_tskim_s *tskim) |
@@ -3394,7 +3037,7 @@ bfa_tskim_send(struct bfa_tskim_s *tskim) | |||
3394 | } | 3037 | } |
3395 | 3038 | ||
3396 | /* | 3039 | /* |
3397 | * Send abort request to cleanup an active TM to firmware. | 3040 | * Send abort request to cleanup an active TM to firmware. |
3398 | */ | 3041 | */ |
3399 | static bfa_boolean_t | 3042 | static bfa_boolean_t |
3400 | bfa_tskim_send_abort(struct bfa_tskim_s *tskim) | 3043 | bfa_tskim_send_abort(struct bfa_tskim_s *tskim) |
@@ -3425,7 +3068,7 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim) | |||
3425 | } | 3068 | } |
3426 | 3069 | ||
3427 | /* | 3070 | /* |
3428 | * Call to resume task management cmnd waiting for room in request queue. | 3071 | * Call to resume task management cmnd waiting for room in request queue. |
3429 | */ | 3072 | */ |
3430 | static void | 3073 | static void |
3431 | bfa_tskim_qresume(void *cbarg) | 3074 | bfa_tskim_qresume(void *cbarg) |
@@ -3451,12 +3094,6 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) | |||
3451 | } | 3094 | } |
3452 | } | 3095 | } |
3453 | 3096 | ||
3454 | |||
3455 | |||
3456 | /* | ||
3457 | * hal_tskim_friend | ||
3458 | */ | ||
3459 | |||
3460 | /* | 3097 | /* |
3461 | * Notification on completions from related ioim. | 3098 | * Notification on completions from related ioim. |
3462 | */ | 3099 | */ |
@@ -3489,7 +3126,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim) | |||
3489 | } | 3126 | } |
3490 | 3127 | ||
3491 | /* | 3128 | /* |
3492 | * Memory allocation and initialization. | 3129 | * Memory allocation and initialization. |
3493 | */ | 3130 | */ |
3494 | void | 3131 | void |
3495 | bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) | 3132 | bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) |
@@ -3522,14 +3159,6 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) | |||
3522 | } | 3159 | } |
3523 | 3160 | ||
3524 | void | 3161 | void |
3525 | bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) | ||
3526 | { | ||
3527 | /* | ||
3528 | * @todo | ||
3529 | */ | ||
3530 | } | ||
3531 | |||
3532 | void | ||
3533 | bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | 3162 | bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) |
3534 | { | 3163 | { |
3535 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); | 3164 | struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); |
@@ -3538,7 +3167,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
3538 | u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); | 3167 | u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); |
3539 | 3168 | ||
3540 | tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); | 3169 | tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); |
3541 | bfa_assert(tskim->tsk_tag == tsk_tag); | 3170 | WARN_ON(tskim->tsk_tag != tsk_tag); |
3542 | 3171 | ||
3543 | tskim->tsk_status = rsp->tsk_status; | 3172 | tskim->tsk_status = rsp->tsk_status; |
3544 | 3173 | ||
@@ -3556,12 +3185,6 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
3556 | } | 3185 | } |
3557 | 3186 | ||
3558 | 3187 | ||
3559 | |||
3560 | /* | ||
3561 | * hal_tskim_api | ||
3562 | */ | ||
3563 | |||
3564 | |||
3565 | struct bfa_tskim_s * | 3188 | struct bfa_tskim_s * |
3566 | bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) | 3189 | bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) |
3567 | { | 3190 | { |
@@ -3579,13 +3202,13 @@ bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk) | |||
3579 | void | 3202 | void |
3580 | bfa_tskim_free(struct bfa_tskim_s *tskim) | 3203 | bfa_tskim_free(struct bfa_tskim_s *tskim) |
3581 | { | 3204 | { |
3582 | bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); | 3205 | WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe)); |
3583 | list_del(&tskim->qe); | 3206 | list_del(&tskim->qe); |
3584 | list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); | 3207 | list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); |
3585 | } | 3208 | } |
3586 | 3209 | ||
3587 | /* | 3210 | /* |
3588 | * Start a task management command. | 3211 | * Start a task management command. |
3589 | * | 3212 | * |
3590 | * @param[in] tskim BFA task management command instance | 3213 | * @param[in] tskim BFA task management command instance |
3591 | * @param[in] itnim i-t nexus for the task management command | 3214 | * @param[in] itnim i-t nexus for the task management command |
@@ -3596,7 +3219,8 @@ bfa_tskim_free(struct bfa_tskim_s *tskim) | |||
3596 | * @return None. | 3219 | * @return None. |
3597 | */ | 3220 | */ |
3598 | void | 3221 | void |
3599 | bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun, | 3222 | bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, |
3223 | struct scsi_lun lun, | ||
3600 | enum fcp_tm_cmnd tm_cmnd, u8 tsecs) | 3224 | enum fcp_tm_cmnd tm_cmnd, u8 tsecs) |
3601 | { | 3225 | { |
3602 | tskim->itnim = itnim; | 3226 | tskim->itnim = itnim; |
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index db53717eeb4b..1e38dade8423 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h | |||
@@ -41,7 +41,7 @@ | |||
41 | (__itnim->ioprofile.iocomps[__index]++) | 41 | (__itnim->ioprofile.iocomps[__index]++) |
42 | 42 | ||
43 | #define BFA_IOIM_RETRY_TAG_OFFSET 11 | 43 | #define BFA_IOIM_RETRY_TAG_OFFSET 11 |
44 | #define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */ | 44 | #define BFA_IOIM_IOTAG_MASK 0x07ff /* 2K IOs */ |
45 | #define BFA_IOIM_RETRY_MAX 7 | 45 | #define BFA_IOIM_RETRY_MAX 7 |
46 | 46 | ||
47 | /* Buckets are are 512 bytes to 2MB */ | 47 | /* Buckets are are 512 bytes to 2MB */ |
@@ -94,12 +94,12 @@ struct bfa_fcpim_mod_s { | |||
94 | struct list_head ioim_resfree_q; /* IOs waiting for f/w */ | 94 | struct list_head ioim_resfree_q; /* IOs waiting for f/w */ |
95 | struct list_head ioim_comp_q; /* IO global comp Q */ | 95 | struct list_head ioim_comp_q; /* IO global comp Q */ |
96 | struct list_head tskim_free_q; | 96 | struct list_head tskim_free_q; |
97 | u32 ios_active; /* current active IOs */ | 97 | u32 ios_active; /* current active IOs */ |
98 | u32 delay_comp; | 98 | u32 delay_comp; |
99 | struct bfa_fcpim_del_itn_stats_s del_itn_stats; | 99 | struct bfa_fcpim_del_itn_stats_s del_itn_stats; |
100 | bfa_boolean_t ioredirect; | 100 | bfa_boolean_t ioredirect; |
101 | bfa_boolean_t io_profile; | 101 | bfa_boolean_t io_profile; |
102 | u32 io_profile_start_time; | 102 | u32 io_profile_start_time; |
103 | bfa_fcpim_profile_t profile_comp; | 103 | bfa_fcpim_profile_t profile_comp; |
104 | bfa_fcpim_profile_t profile_start; | 104 | bfa_fcpim_profile_t profile_start; |
105 | }; | 105 | }; |
@@ -114,25 +114,24 @@ struct bfa_ioim_s { | |||
114 | struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ | 114 | struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ |
115 | struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ | 115 | struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ |
116 | struct bfad_ioim_s *dio; /* driver IO handle */ | 116 | struct bfad_ioim_s *dio; /* driver IO handle */ |
117 | u16 iotag; /* FWI IO tag */ | 117 | u16 iotag; /* FWI IO tag */ |
118 | u16 abort_tag; /* unqiue abort request tag */ | 118 | u16 abort_tag; /* unqiue abort request tag */ |
119 | u16 nsges; /* number of SG elements */ | 119 | u16 nsges; /* number of SG elements */ |
120 | u16 nsgpgs; /* number of SG pages */ | 120 | u16 nsgpgs; /* number of SG pages */ |
121 | struct bfa_sgpg_s *sgpg; /* first SG page */ | 121 | struct bfa_sgpg_s *sgpg; /* first SG page */ |
122 | struct list_head sgpg_q; /* allocated SG pages */ | 122 | struct list_head sgpg_q; /* allocated SG pages */ |
123 | struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ | 123 | struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ |
124 | bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ | 124 | bfa_cb_cbfn_t io_cbfn; /* IO completion handler */ |
125 | struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ | 125 | struct bfa_ioim_sp_s *iosp; /* slow-path IO handling */ |
126 | u8 reqq; /* Request queue for I/O */ | 126 | u8 reqq; /* Request queue for I/O */ |
127 | u64 start_time; /* IO's Profile start val */ | 127 | u64 start_time; /* IO's Profile start val */ |
128 | }; | 128 | }; |
129 | 129 | ||
130 | |||
131 | struct bfa_ioim_sp_s { | 130 | struct bfa_ioim_sp_s { |
132 | struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ | 131 | struct bfi_msg_s comp_rspmsg; /* IO comp f/w response */ |
133 | u8 *snsinfo; /* sense info for this IO */ | 132 | u8 *snsinfo; /* sense info for this IO */ |
134 | struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ | 133 | struct bfa_sgpg_wqe_s sgpg_wqe; /* waitq elem for sgpg */ |
135 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ | 134 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ |
136 | bfa_boolean_t abort_explicit; /* aborted by OS */ | 135 | bfa_boolean_t abort_explicit; /* aborted by OS */ |
137 | struct bfa_tskim_s *tskim; /* Relevant TM cmd */ | 136 | struct bfa_tskim_s *tskim; /* Relevant TM cmd */ |
138 | }; | 137 | }; |
@@ -143,35 +142,34 @@ struct bfa_ioim_sp_s { | |||
143 | struct bfa_tskim_s { | 142 | struct bfa_tskim_s { |
144 | struct list_head qe; | 143 | struct list_head qe; |
145 | bfa_sm_t sm; | 144 | bfa_sm_t sm; |
146 | struct bfa_s *bfa; /* BFA module */ | 145 | struct bfa_s *bfa; /* BFA module */ |
147 | struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ | 146 | struct bfa_fcpim_mod_s *fcpim; /* parent fcpim module */ |
148 | struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ | 147 | struct bfa_itnim_s *itnim; /* i-t-n nexus for this IO */ |
149 | struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ | 148 | struct bfad_tskim_s *dtsk; /* driver task mgmt cmnd */ |
150 | bfa_boolean_t notify; /* notify itnim on TM comp */ | 149 | bfa_boolean_t notify; /* notify itnim on TM comp */ |
151 | lun_t lun; /* lun if applicable */ | 150 | struct scsi_lun lun; /* lun if applicable */ |
152 | enum fcp_tm_cmnd tm_cmnd; /* task management command */ | 151 | enum fcp_tm_cmnd tm_cmnd; /* task management command */ |
153 | u16 tsk_tag; /* FWI IO tag */ | 152 | u16 tsk_tag; /* FWI IO tag */ |
154 | u8 tsecs; /* timeout in seconds */ | 153 | u8 tsecs; /* timeout in seconds */ |
155 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ | 154 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ |
156 | struct list_head io_q; /* queue of affected IOs */ | 155 | struct list_head io_q; /* queue of affected IOs */ |
157 | struct bfa_wc_s wc; /* waiting counter */ | 156 | struct bfa_wc_s wc; /* waiting counter */ |
158 | struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ | 157 | struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ |
159 | enum bfi_tskim_status tsk_status; /* TM status */ | 158 | enum bfi_tskim_status tsk_status; /* TM status */ |
160 | }; | 159 | }; |
161 | 160 | ||
162 | |||
163 | /* | 161 | /* |
164 | * BFA i-t-n (initiator mode) | 162 | * BFA i-t-n (initiator mode) |
165 | */ | 163 | */ |
166 | struct bfa_itnim_s { | 164 | struct bfa_itnim_s { |
167 | struct list_head qe; /* queue element */ | 165 | struct list_head qe; /* queue element */ |
168 | bfa_sm_t sm; /* i-t-n im BFA state machine */ | 166 | bfa_sm_t sm; /* i-t-n im BFA state machine */ |
169 | struct bfa_s *bfa; /* bfa instance */ | 167 | struct bfa_s *bfa; /* bfa instance */ |
170 | struct bfa_rport_s *rport; /* bfa rport */ | 168 | struct bfa_rport_s *rport; /* bfa rport */ |
171 | void *ditn; /* driver i-t-n structure */ | 169 | void *ditn; /* driver i-t-n structure */ |
172 | struct bfi_mhdr_s mhdr; /* pre-built mhdr */ | 170 | struct bfi_mhdr_s mhdr; /* pre-built mhdr */ |
173 | u8 msg_no; /* itnim/rport firmware handle */ | 171 | u8 msg_no; /* itnim/rport firmware handle */ |
174 | u8 reqq; /* CQ for requests */ | 172 | u8 reqq; /* CQ for requests */ |
175 | struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ | 173 | struct bfa_cb_qe_s hcb_qe; /* bfa callback qelem */ |
176 | struct list_head pending_q; /* queue of pending IO requests */ | 174 | struct list_head pending_q; /* queue of pending IO requests */ |
177 | struct list_head io_q; /* queue of active IO requests */ | 175 | struct list_head io_q; /* queue of active IO requests */ |
@@ -181,19 +179,19 @@ struct bfa_itnim_s { | |||
181 | bfa_boolean_t seq_rec; /* SQER supported */ | 179 | bfa_boolean_t seq_rec; /* SQER supported */ |
182 | bfa_boolean_t is_online; /* itnim is ONLINE for IO */ | 180 | bfa_boolean_t is_online; /* itnim is ONLINE for IO */ |
183 | bfa_boolean_t iotov_active; /* IO TOV timer is active */ | 181 | bfa_boolean_t iotov_active; /* IO TOV timer is active */ |
184 | struct bfa_wc_s wc; /* waiting counter */ | 182 | struct bfa_wc_s wc; /* waiting counter */ |
185 | struct bfa_timer_s timer; /* pending IO TOV */ | 183 | struct bfa_timer_s timer; /* pending IO TOV */ |
186 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ | 184 | struct bfa_reqq_wait_s reqq_wait; /* to wait for room in reqq */ |
187 | struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ | 185 | struct bfa_fcpim_mod_s *fcpim; /* fcpim module */ |
188 | struct bfa_itnim_iostats_s stats; | 186 | struct bfa_itnim_iostats_s stats; |
189 | struct bfa_itnim_ioprofile_s ioprofile; | 187 | struct bfa_itnim_ioprofile_s ioprofile; |
190 | }; | 188 | }; |
191 | 189 | ||
192 | |||
193 | #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) | 190 | #define bfa_itnim_is_online(_itnim) ((_itnim)->is_online) |
194 | #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) | 191 | #define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod) |
192 | #define BFA_IOIM_TAG_2_ID(_iotag) ((_iotag) & BFA_IOIM_IOTAG_MASK) | ||
195 | #define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ | 193 | #define BFA_IOIM_FROM_TAG(_fcpim, _iotag) \ |
196 | (&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)]) | 194 | (&fcpim->ioim_arr[(_iotag & BFA_IOIM_IOTAG_MASK)]) |
197 | #define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \ | 195 | #define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag) \ |
198 | (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) | 196 | (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)]) |
199 | 197 | ||
@@ -201,26 +199,26 @@ struct bfa_itnim_s { | |||
201 | (_bfa->modules.fcpim_mod.io_profile_start_time) | 199 | (_bfa->modules.fcpim_mod.io_profile_start_time) |
202 | #define bfa_fcpim_get_io_profile(_bfa) \ | 200 | #define bfa_fcpim_get_io_profile(_bfa) \ |
203 | (_bfa->modules.fcpim_mod.io_profile) | 201 | (_bfa->modules.fcpim_mod.io_profile) |
202 | #define bfa_ioim_update_iotag(__ioim) do { \ | ||
203 | uint16_t k = (__ioim)->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; \ | ||
204 | k++; (__ioim)->iotag &= BFA_IOIM_IOTAG_MASK; \ | ||
205 | (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \ | ||
206 | } while (0) | ||
204 | 207 | ||
205 | static inline bfa_boolean_t | 208 | static inline bfa_boolean_t |
206 | bfa_ioim_get_iotag(struct bfa_ioim_s *ioim) | 209 | bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim) |
207 | { | 210 | { |
208 | u16 k = ioim->iotag; | 211 | uint16_t k = ioim->iotag >> BFA_IOIM_RETRY_TAG_OFFSET; |
209 | 212 | if (k < BFA_IOIM_RETRY_MAX) | |
210 | k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++; | ||
211 | |||
212 | if (k > BFA_IOIM_RETRY_MAX) | ||
213 | return BFA_FALSE; | 213 | return BFA_FALSE; |
214 | ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK; | ||
215 | ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET; | ||
216 | return BFA_TRUE; | 214 | return BFA_TRUE; |
217 | } | 215 | } |
216 | |||
218 | /* | 217 | /* |
219 | * function prototypes | 218 | * function prototypes |
220 | */ | 219 | */ |
221 | void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, | 220 | void bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, |
222 | struct bfa_meminfo_s *minfo); | 221 | struct bfa_meminfo_s *minfo); |
223 | void bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim); | ||
224 | void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); | 222 | void bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); |
225 | void bfa_ioim_good_comp_isr(struct bfa_s *bfa, | 223 | void bfa_ioim_good_comp_isr(struct bfa_s *bfa, |
226 | struct bfi_msg_s *msg); | 224 | struct bfi_msg_s *msg); |
@@ -232,7 +230,6 @@ void bfa_ioim_tov(struct bfa_ioim_s *ioim); | |||
232 | 230 | ||
233 | void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, | 231 | void bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, |
234 | struct bfa_meminfo_s *minfo); | 232 | struct bfa_meminfo_s *minfo); |
235 | void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim); | ||
236 | void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); | 233 | void bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); |
237 | void bfa_tskim_iodone(struct bfa_tskim_s *tskim); | 234 | void bfa_tskim_iodone(struct bfa_tskim_s *tskim); |
238 | void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); | 235 | void bfa_tskim_iocdisable(struct bfa_tskim_s *tskim); |
@@ -248,32 +245,14 @@ void bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); | |||
248 | void bfa_itnim_iodone(struct bfa_itnim_s *itnim); | 245 | void bfa_itnim_iodone(struct bfa_itnim_s *itnim); |
249 | void bfa_itnim_tskdone(struct bfa_itnim_s *itnim); | 246 | void bfa_itnim_tskdone(struct bfa_itnim_s *itnim); |
250 | bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim); | 247 | bfa_boolean_t bfa_itnim_hold_io(struct bfa_itnim_s *itnim); |
251 | void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim); | ||
252 | void bfa_ioim_profile_start(struct bfa_ioim_s *ioim); | ||
253 | |||
254 | 248 | ||
255 | /* | 249 | /* |
256 | * bfa fcpim module API functions | 250 | * bfa fcpim module API functions |
257 | */ | 251 | */ |
258 | void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); | 252 | void bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov); |
259 | u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); | 253 | u16 bfa_fcpim_path_tov_get(struct bfa_s *bfa); |
260 | void bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth); | ||
261 | u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); | 254 | u16 bfa_fcpim_qdepth_get(struct bfa_s *bfa); |
262 | bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa, | 255 | |
263 | struct bfa_itnim_iostats_s *modstats); | ||
264 | bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa, | ||
265 | struct bfa_itnim_iostats_s *stats, u8 lp_tag); | ||
266 | bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa, | ||
267 | struct bfa_fcpim_del_itn_stats_s *modstats); | ||
268 | bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag); | ||
269 | void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats, | ||
270 | struct bfa_itnim_iostats_s *itnim_stats); | ||
271 | bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa); | ||
272 | void bfa_fcpim_set_ioredirect(struct bfa_s *bfa, | ||
273 | bfa_boolean_t state); | ||
274 | void bfa_fcpim_update_ioredirect(struct bfa_s *bfa); | ||
275 | bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time); | ||
276 | bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa); | ||
277 | #define bfa_fcpim_ioredirect_enabled(__bfa) \ | 256 | #define bfa_fcpim_ioredirect_enabled(__bfa) \ |
278 | (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect) | 257 | (((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect) |
279 | 258 | ||
@@ -291,48 +270,33 @@ bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa); | |||
291 | * bfa itnim API functions | 270 | * bfa itnim API functions |
292 | */ | 271 | */ |
293 | struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa, | 272 | struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa, |
294 | struct bfa_rport_s *rport, void *itnim); | 273 | struct bfa_rport_s *rport, void *itnim); |
295 | void bfa_itnim_delete(struct bfa_itnim_s *itnim); | 274 | void bfa_itnim_delete(struct bfa_itnim_s *itnim); |
296 | void bfa_itnim_online(struct bfa_itnim_s *itnim, | 275 | void bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec); |
297 | bfa_boolean_t seq_rec); | 276 | void bfa_itnim_offline(struct bfa_itnim_s *itnim); |
298 | void bfa_itnim_offline(struct bfa_itnim_s *itnim); | 277 | void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); |
299 | void bfa_itnim_get_stats(struct bfa_itnim_s *itnim, | 278 | bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, |
300 | struct bfa_itnim_iostats_s *stats); | 279 | struct bfa_itnim_ioprofile_s *ioprofile); |
301 | void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim); | 280 | |
302 | bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, | ||
303 | struct bfa_itnim_ioprofile_s *ioprofile); | ||
304 | #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) | 281 | #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) |
305 | 282 | ||
306 | /* | 283 | /* |
307 | * BFA completion callback for bfa_itnim_online(). | 284 | * BFA completion callback for bfa_itnim_online(). |
308 | * | ||
309 | * @param[in] itnim FCS or driver itnim instance | ||
310 | * | ||
311 | * return None | ||
312 | */ | 285 | */ |
313 | void bfa_cb_itnim_online(void *itnim); | 286 | void bfa_cb_itnim_online(void *itnim); |
314 | 287 | ||
315 | /* | 288 | /* |
316 | * BFA completion callback for bfa_itnim_offline(). | 289 | * BFA completion callback for bfa_itnim_offline(). |
317 | * | ||
318 | * @param[in] itnim FCS or driver itnim instance | ||
319 | * | ||
320 | * return None | ||
321 | */ | 290 | */ |
322 | void bfa_cb_itnim_offline(void *itnim); | 291 | void bfa_cb_itnim_offline(void *itnim); |
323 | void bfa_cb_itnim_tov_begin(void *itnim); | 292 | void bfa_cb_itnim_tov_begin(void *itnim); |
324 | void bfa_cb_itnim_tov(void *itnim); | 293 | void bfa_cb_itnim_tov(void *itnim); |
325 | 294 | ||
326 | /* | 295 | /* |
327 | * BFA notification to FCS/driver for second level error recovery. | 296 | * BFA notification to FCS/driver for second level error recovery. |
328 | * | ||
329 | * Atleast one I/O request has timedout and target is unresponsive to | 297 | * Atleast one I/O request has timedout and target is unresponsive to |
330 | * repeated abort requests. Second level error recovery should be initiated | 298 | * repeated abort requests. Second level error recovery should be initiated |
331 | * by starting implicit logout and recovery procedures. | 299 | * by starting implicit logout and recovery procedures. |
332 | * | ||
333 | * @param[in] itnim FCS or driver itnim instance | ||
334 | * | ||
335 | * return None | ||
336 | */ | 300 | */ |
337 | void bfa_cb_itnim_sler(void *itnim); | 301 | void bfa_cb_itnim_sler(void *itnim); |
338 | 302 | ||
@@ -349,10 +313,8 @@ void bfa_ioim_start(struct bfa_ioim_s *ioim); | |||
349 | bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim); | 313 | bfa_status_t bfa_ioim_abort(struct bfa_ioim_s *ioim); |
350 | void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, | 314 | void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, |
351 | bfa_boolean_t iotov); | 315 | bfa_boolean_t iotov); |
352 | |||
353 | |||
354 | /* | 316 | /* |
355 | * I/O completion notification. | 317 | * I/O completion notification. |
356 | * | 318 | * |
357 | * @param[in] dio driver IO structure | 319 | * @param[in] dio driver IO structure |
358 | * @param[in] io_status IO completion status | 320 | * @param[in] io_status IO completion status |
@@ -363,39 +325,31 @@ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, | |||
363 | * | 325 | * |
364 | * @return None | 326 | * @return None |
365 | */ | 327 | */ |
366 | void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, | 328 | void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, |
367 | enum bfi_ioim_status io_status, | 329 | enum bfi_ioim_status io_status, |
368 | u8 scsi_status, int sns_len, | 330 | u8 scsi_status, int sns_len, |
369 | u8 *sns_info, s32 residue); | 331 | u8 *sns_info, s32 residue); |
370 | 332 | ||
371 | /* | 333 | /* |
372 | * I/O good completion notification. | 334 | * I/O good completion notification. |
373 | * | ||
374 | * @param[in] dio driver IO structure | ||
375 | * | ||
376 | * @return None | ||
377 | */ | 335 | */ |
378 | void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); | 336 | void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); |
379 | 337 | ||
380 | /* | 338 | /* |
381 | * I/O abort completion notification | 339 | * I/O abort completion notification |
382 | * | ||
383 | * @param[in] dio driver IO that was aborted | ||
384 | * | ||
385 | * @return None | ||
386 | */ | 340 | */ |
387 | void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio); | 341 | void bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio); |
388 | 342 | ||
389 | /* | 343 | /* |
390 | * bfa tskim API functions | 344 | * bfa tskim API functions |
391 | */ | 345 | */ |
392 | struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa, | 346 | struct bfa_tskim_s *bfa_tskim_alloc(struct bfa_s *bfa, |
393 | struct bfad_tskim_s *dtsk); | 347 | struct bfad_tskim_s *dtsk); |
394 | void bfa_tskim_free(struct bfa_tskim_s *tskim); | 348 | void bfa_tskim_free(struct bfa_tskim_s *tskim); |
395 | void bfa_tskim_start(struct bfa_tskim_s *tskim, | 349 | void bfa_tskim_start(struct bfa_tskim_s *tskim, |
396 | struct bfa_itnim_s *itnim, lun_t lun, | 350 | struct bfa_itnim_s *itnim, struct scsi_lun lun, |
397 | enum fcp_tm_cmnd tm, u8 t_secs); | 351 | enum fcp_tm_cmnd tm, u8 t_secs); |
398 | void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, | 352 | void bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, |
399 | enum bfi_tskim_status tsk_status); | 353 | enum bfi_tskim_status tsk_status); |
400 | 354 | ||
401 | #endif /* __BFA_FCPIM_H__ */ | 355 | #endif /* __BFA_FCPIM_H__ */ |
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 045d7e87b632..f674f9318629 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c | |||
@@ -19,9 +19,9 @@ | |||
19 | * bfa_fcs.c BFA FCS main | 19 | * bfa_fcs.c BFA FCS main |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include "bfad_drv.h" | ||
22 | #include "bfa_fcs.h" | 23 | #include "bfa_fcs.h" |
23 | #include "bfa_fcbuild.h" | 24 | #include "bfa_fcbuild.h" |
24 | #include "bfad_drv.h" | ||
25 | 25 | ||
26 | BFA_TRC_FILE(FCS, FCS); | 26 | BFA_TRC_FILE(FCS, FCS); |
27 | 27 | ||
@@ -76,7 +76,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, | |||
76 | fcs->bfad = bfad; | 76 | fcs->bfad = bfad; |
77 | fcs->min_cfg = min_cfg; | 77 | fcs->min_cfg = min_cfg; |
78 | 78 | ||
79 | bfa_attach_fcs(bfa); | 79 | bfa->fcs = BFA_TRUE; |
80 | fcbuild_init(); | 80 | fcbuild_init(); |
81 | 81 | ||
82 | for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { | 82 | for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) { |
@@ -110,14 +110,6 @@ bfa_fcs_init(struct bfa_fcs_s *fcs) | |||
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
113 | /* | ||
114 | * Start FCS operations. | ||
115 | */ | ||
116 | void | ||
117 | bfa_fcs_start(struct bfa_fcs_s *fcs) | ||
118 | { | ||
119 | bfa_fcs_fabric_modstart(fcs); | ||
120 | } | ||
121 | 113 | ||
122 | /* | 114 | /* |
123 | * brief | 115 | * brief |
@@ -140,22 +132,6 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, | |||
140 | 132 | ||
141 | /* | 133 | /* |
142 | * brief | 134 | * brief |
143 | * FCS FDMI Driver Parameter Initialization | ||
144 | * | ||
145 | * param[in] fcs FCS instance | ||
146 | * param[in] fdmi_enable TRUE/FALSE | ||
147 | * | ||
148 | * return None | ||
149 | */ | ||
150 | void | ||
151 | bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) | ||
152 | { | ||
153 | |||
154 | fcs->fdmi_enabled = fdmi_enable; | ||
155 | |||
156 | } | ||
157 | /* | ||
158 | * brief | ||
159 | * FCS instance cleanup and exit. | 135 | * FCS instance cleanup and exit. |
160 | * | 136 | * |
161 | * param[in] fcs FCS instance | 137 | * param[in] fcs FCS instance |
@@ -184,18 +160,6 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs) | |||
184 | } | 160 | } |
185 | 161 | ||
186 | 162 | ||
187 | void | ||
188 | bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod) | ||
189 | { | ||
190 | fcs->trcmod = trcmod; | ||
191 | } | ||
192 | |||
193 | void | ||
194 | bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs) | ||
195 | { | ||
196 | bfa_wc_down(&fcs->wc); | ||
197 | } | ||
198 | |||
199 | /* | 163 | /* |
200 | * Fabric module implementation. | 164 | * Fabric module implementation. |
201 | */ | 165 | */ |
@@ -232,31 +196,6 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, | |||
232 | u32 rsp_len, | 196 | u32 rsp_len, |
233 | u32 resid_len, | 197 | u32 resid_len, |
234 | struct fchs_s *rspfchs); | 198 | struct fchs_s *rspfchs); |
235 | /* | ||
236 | * fcs_fabric_sm fabric state machine functions | ||
237 | */ | ||
238 | |||
239 | /* | ||
240 | * Fabric state machine events | ||
241 | */ | ||
242 | enum bfa_fcs_fabric_event { | ||
243 | BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */ | ||
244 | BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */ | ||
245 | BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */ | ||
246 | BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */ | ||
247 | BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */ | ||
248 | BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */ | ||
249 | BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */ | ||
250 | BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */ | ||
251 | BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */ | ||
252 | BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */ | ||
253 | BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */ | ||
254 | BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */ | ||
255 | BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */ | ||
256 | BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */ | ||
257 | BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */ | ||
258 | BFA_FCS_FABRIC_SM_START = 16, /* from driver */ | ||
259 | }; | ||
260 | 199 | ||
261 | static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, | 200 | static void bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, |
262 | enum bfa_fcs_fabric_event event); | 201 | enum bfa_fcs_fabric_event event); |
@@ -270,14 +209,8 @@ static void bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, | |||
270 | enum bfa_fcs_fabric_event event); | 209 | enum bfa_fcs_fabric_event event); |
271 | static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, | 210 | static void bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, |
272 | enum bfa_fcs_fabric_event event); | 211 | enum bfa_fcs_fabric_event event); |
273 | static void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, | ||
274 | enum bfa_fcs_fabric_event event); | ||
275 | static void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, | ||
276 | enum bfa_fcs_fabric_event event); | ||
277 | static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, | 212 | static void bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, |
278 | enum bfa_fcs_fabric_event event); | 213 | enum bfa_fcs_fabric_event event); |
279 | static void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, | ||
280 | enum bfa_fcs_fabric_event event); | ||
281 | static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, | 214 | static void bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, |
282 | enum bfa_fcs_fabric_event event); | 215 | enum bfa_fcs_fabric_event event); |
283 | static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, | 216 | static void bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, |
@@ -337,7 +270,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, | |||
337 | 270 | ||
338 | case BFA_FCS_FABRIC_SM_DELETE: | 271 | case BFA_FCS_FABRIC_SM_DELETE: |
339 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); | 272 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); |
340 | bfa_fcs_modexit_comp(fabric->fcs); | 273 | bfa_wc_down(&fabric->fcs->wc); |
341 | break; | 274 | break; |
342 | 275 | ||
343 | default: | 276 | default: |
@@ -410,7 +343,7 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, | |||
410 | 343 | ||
411 | case BFA_FCS_FABRIC_SM_LOOPBACK: | 344 | case BFA_FCS_FABRIC_SM_LOOPBACK: |
412 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); | 345 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback); |
413 | bfa_lps_discard(fabric->lps); | 346 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
414 | bfa_fcs_fabric_set_opertype(fabric); | 347 | bfa_fcs_fabric_set_opertype(fabric); |
415 | break; | 348 | break; |
416 | 349 | ||
@@ -424,12 +357,12 @@ bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric, | |||
424 | 357 | ||
425 | case BFA_FCS_FABRIC_SM_LINK_DOWN: | 358 | case BFA_FCS_FABRIC_SM_LINK_DOWN: |
426 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | 359 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); |
427 | bfa_lps_discard(fabric->lps); | 360 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
428 | break; | 361 | break; |
429 | 362 | ||
430 | case BFA_FCS_FABRIC_SM_DELETE: | 363 | case BFA_FCS_FABRIC_SM_DELETE: |
431 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); | 364 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting); |
432 | bfa_lps_discard(fabric->lps); | 365 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
433 | bfa_fcs_fabric_delete(fabric); | 366 | bfa_fcs_fabric_delete(fabric); |
434 | break; | 367 | break; |
435 | 368 | ||
@@ -481,7 +414,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, | |||
481 | switch (event) { | 414 | switch (event) { |
482 | case BFA_FCS_FABRIC_SM_AUTH_FAILED: | 415 | case BFA_FCS_FABRIC_SM_AUTH_FAILED: |
483 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); | 416 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); |
484 | bfa_lps_discard(fabric->lps); | 417 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
485 | break; | 418 | break; |
486 | 419 | ||
487 | case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: | 420 | case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: |
@@ -495,7 +428,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, | |||
495 | 428 | ||
496 | case BFA_FCS_FABRIC_SM_LINK_DOWN: | 429 | case BFA_FCS_FABRIC_SM_LINK_DOWN: |
497 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | 430 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); |
498 | bfa_lps_discard(fabric->lps); | 431 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
499 | break; | 432 | break; |
500 | 433 | ||
501 | case BFA_FCS_FABRIC_SM_DELETE: | 434 | case BFA_FCS_FABRIC_SM_DELETE: |
@@ -511,7 +444,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, | |||
511 | /* | 444 | /* |
512 | * Authentication failed | 445 | * Authentication failed |
513 | */ | 446 | */ |
514 | static void | 447 | void |
515 | bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, | 448 | bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, |
516 | enum bfa_fcs_fabric_event event) | 449 | enum bfa_fcs_fabric_event event) |
517 | { | 450 | { |
@@ -537,7 +470,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, | |||
537 | /* | 470 | /* |
538 | * Port is in loopback mode. | 471 | * Port is in loopback mode. |
539 | */ | 472 | */ |
540 | static void | 473 | void |
541 | bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, | 474 | bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, |
542 | enum bfa_fcs_fabric_event event) | 475 | enum bfa_fcs_fabric_event event) |
543 | { | 476 | { |
@@ -573,7 +506,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, | |||
573 | switch (event) { | 506 | switch (event) { |
574 | case BFA_FCS_FABRIC_SM_LINK_DOWN: | 507 | case BFA_FCS_FABRIC_SM_LINK_DOWN: |
575 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | 508 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); |
576 | bfa_lps_discard(fabric->lps); | 509 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
577 | bfa_fcs_fabric_notify_offline(fabric); | 510 | bfa_fcs_fabric_notify_offline(fabric); |
578 | break; | 511 | break; |
579 | 512 | ||
@@ -596,7 +529,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, | |||
596 | /* | 529 | /* |
597 | * Fabric is online - normal operating state. | 530 | * Fabric is online - normal operating state. |
598 | */ | 531 | */ |
599 | static void | 532 | void |
600 | bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, | 533 | bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, |
601 | enum bfa_fcs_fabric_event event) | 534 | enum bfa_fcs_fabric_event event) |
602 | { | 535 | { |
@@ -606,7 +539,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, | |||
606 | switch (event) { | 539 | switch (event) { |
607 | case BFA_FCS_FABRIC_SM_LINK_DOWN: | 540 | case BFA_FCS_FABRIC_SM_LINK_DOWN: |
608 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); | 541 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown); |
609 | bfa_lps_discard(fabric->lps); | 542 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
610 | bfa_fcs_fabric_notify_offline(fabric); | 543 | bfa_fcs_fabric_notify_offline(fabric); |
611 | break; | 544 | break; |
612 | 545 | ||
@@ -617,7 +550,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, | |||
617 | 550 | ||
618 | case BFA_FCS_FABRIC_SM_AUTH_FAILED: | 551 | case BFA_FCS_FABRIC_SM_AUTH_FAILED: |
619 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); | 552 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed); |
620 | bfa_lps_discard(fabric->lps); | 553 | bfa_sm_send_event(fabric->lps, BFA_LPS_SM_OFFLINE); |
621 | break; | 554 | break; |
622 | 555 | ||
623 | case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: | 556 | case BFA_FCS_FABRIC_SM_AUTH_SUCCESS: |
@@ -697,7 +630,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, | |||
697 | switch (event) { | 630 | switch (event) { |
698 | case BFA_FCS_FABRIC_SM_DELCOMP: | 631 | case BFA_FCS_FABRIC_SM_DELCOMP: |
699 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); | 632 | bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit); |
700 | bfa_fcs_modexit_comp(fabric->fcs); | 633 | bfa_wc_down(&fabric->fcs->wc); |
701 | break; | 634 | break; |
702 | 635 | ||
703 | case BFA_FCS_FABRIC_SM_LINK_UP: | 636 | case BFA_FCS_FABRIC_SM_LINK_UP: |
@@ -724,8 +657,8 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) | |||
724 | struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; | 657 | struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg; |
725 | 658 | ||
726 | port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; | 659 | port_cfg->roles = BFA_LPORT_ROLE_FCP_IM; |
727 | port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc); | 660 | port_cfg->nwwn = fabric->fcs->bfa->ioc.attr->nwwn; |
728 | port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); | 661 | port_cfg->pwwn = fabric->fcs->bfa->ioc.attr->pwwn; |
729 | } | 662 | } |
730 | 663 | ||
731 | /* | 664 | /* |
@@ -813,7 +746,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) | |||
813 | return; | 746 | return; |
814 | 747 | ||
815 | case BFA_STATUS_EPROTOCOL: | 748 | case BFA_STATUS_EPROTOCOL: |
816 | switch (bfa_lps_get_extstatus(fabric->lps)) { | 749 | switch (fabric->lps->ext_status) { |
817 | case BFA_EPROTO_BAD_ACCEPT: | 750 | case BFA_EPROTO_BAD_ACCEPT: |
818 | fabric->stats.flogi_acc_err++; | 751 | fabric->stats.flogi_acc_err++; |
819 | break; | 752 | break; |
@@ -840,26 +773,26 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) | |||
840 | return; | 773 | return; |
841 | } | 774 | } |
842 | 775 | ||
843 | fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps); | 776 | fabric->bb_credit = fabric->lps->pr_bbcred; |
844 | bfa_trc(fabric->fcs, fabric->bb_credit); | 777 | bfa_trc(fabric->fcs, fabric->bb_credit); |
845 | 778 | ||
846 | if (!bfa_lps_is_brcd_fabric(fabric->lps)) | 779 | if (!(fabric->lps->brcd_switch)) |
847 | fabric->fabric_name = bfa_lps_get_peer_nwwn(fabric->lps); | 780 | fabric->fabric_name = fabric->lps->pr_nwwn; |
848 | 781 | ||
849 | /* | 782 | /* |
850 | * Check port type. It should be 1 = F-port. | 783 | * Check port type. It should be 1 = F-port. |
851 | */ | 784 | */ |
852 | if (bfa_lps_is_fport(fabric->lps)) { | 785 | if (fabric->lps->fport) { |
853 | fabric->bport.pid = bfa_lps_get_pid(fabric->lps); | 786 | fabric->bport.pid = fabric->lps->lp_pid; |
854 | fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps); | 787 | fabric->is_npiv = fabric->lps->npiv_en; |
855 | fabric->is_auth = bfa_lps_is_authreq(fabric->lps); | 788 | fabric->is_auth = fabric->lps->auth_req; |
856 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); | 789 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP); |
857 | } else { | 790 | } else { |
858 | /* | 791 | /* |
859 | * Nport-2-Nport direct attached | 792 | * Nport-2-Nport direct attached |
860 | */ | 793 | */ |
861 | fabric->bport.port_topo.pn2n.rem_port_wwn = | 794 | fabric->bport.port_topo.pn2n.rem_port_wwn = |
862 | bfa_lps_get_peer_pwwn(fabric->lps); | 795 | fabric->lps->pr_pwwn; |
863 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); | 796 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC); |
864 | } | 797 | } |
865 | 798 | ||
@@ -987,7 +920,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) | |||
987 | INIT_LIST_HEAD(&fabric->vport_q); | 920 | INIT_LIST_HEAD(&fabric->vport_q); |
988 | INIT_LIST_HEAD(&fabric->vf_q); | 921 | INIT_LIST_HEAD(&fabric->vf_q); |
989 | fabric->lps = bfa_lps_alloc(fcs->bfa); | 922 | fabric->lps = bfa_lps_alloc(fcs->bfa); |
990 | bfa_assert(fabric->lps); | 923 | WARN_ON(!fabric->lps); |
991 | 924 | ||
992 | /* | 925 | /* |
993 | * Initialize fabric delete completion handler. Fabric deletion is | 926 | * Initialize fabric delete completion handler. Fabric deletion is |
@@ -1038,31 +971,6 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) | |||
1038 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); | 971 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); |
1039 | } | 972 | } |
1040 | 973 | ||
1041 | /* | ||
1042 | * Suspend fabric activity as part of driver suspend. | ||
1043 | */ | ||
1044 | void | ||
1045 | bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs) | ||
1046 | { | ||
1047 | } | ||
1048 | |||
1049 | bfa_boolean_t | ||
1050 | bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric) | ||
1051 | { | ||
1052 | return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback); | ||
1053 | } | ||
1054 | |||
1055 | bfa_boolean_t | ||
1056 | bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric) | ||
1057 | { | ||
1058 | return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed); | ||
1059 | } | ||
1060 | |||
1061 | enum bfa_port_type | ||
1062 | bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric) | ||
1063 | { | ||
1064 | return fabric->oper_type; | ||
1065 | } | ||
1066 | 974 | ||
1067 | /* | 975 | /* |
1068 | * Link up notification from BFA physical port module. | 976 | * Link up notification from BFA physical port module. |
@@ -1123,40 +1031,6 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, | |||
1123 | bfa_wc_down(&fabric->wc); | 1031 | bfa_wc_down(&fabric->wc); |
1124 | } | 1032 | } |
1125 | 1033 | ||
1126 | /* | ||
1127 | * Base port is deleted. | ||
1128 | */ | ||
1129 | void | ||
1130 | bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric) | ||
1131 | { | ||
1132 | bfa_wc_down(&fabric->wc); | ||
1133 | } | ||
1134 | |||
1135 | |||
1136 | /* | ||
1137 | * Check if fabric is online. | ||
1138 | * | ||
1139 | * param[in] fabric - Fabric instance. This can be a base fabric or vf. | ||
1140 | * | ||
1141 | * @return TRUE/FALSE | ||
1142 | */ | ||
1143 | int | ||
1144 | bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) | ||
1145 | { | ||
1146 | return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); | ||
1147 | } | ||
1148 | |||
1149 | /* | ||
1150 | * brief | ||
1151 | * | ||
1152 | */ | ||
1153 | bfa_status_t | ||
1154 | bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs, | ||
1155 | struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv) | ||
1156 | { | ||
1157 | bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit); | ||
1158 | return BFA_STATUS_OK; | ||
1159 | } | ||
1160 | 1034 | ||
1161 | /* | 1035 | /* |
1162 | * Lookup for a vport withing a fabric given its pwwn | 1036 | * Lookup for a vport withing a fabric given its pwwn |
@@ -1176,18 +1050,6 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) | |||
1176 | return NULL; | 1050 | return NULL; |
1177 | } | 1051 | } |
1178 | 1052 | ||
1179 | /* | ||
1180 | * In a given fabric, return the number of lports. | ||
1181 | * | ||
1182 | * param[in] fabric - Fabric instance. This can be a base fabric or vf. | ||
1183 | * | ||
1184 | * @return : 1 or more. | ||
1185 | */ | ||
1186 | u16 | ||
1187 | bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric) | ||
1188 | { | ||
1189 | return fabric->num_vports; | ||
1190 | } | ||
1191 | 1053 | ||
1192 | /* | 1054 | /* |
1193 | * Get OUI of the attached switch. | 1055 | * Get OUI of the attached switch. |
@@ -1207,7 +1069,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) | |||
1207 | u8 *tmp; | 1069 | u8 *tmp; |
1208 | u16 oui; | 1070 | u16 oui; |
1209 | 1071 | ||
1210 | fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps); | 1072 | fab_nwwn = fabric->lps->pr_nwwn; |
1211 | 1073 | ||
1212 | tmp = (u8 *)&fab_nwwn; | 1074 | tmp = (u8 *)&fab_nwwn; |
1213 | oui = (tmp[3] << 8) | tmp[4]; | 1075 | oui = (tmp[3] << 8) | tmp[4]; |
@@ -1235,7 +1097,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, | |||
1235 | * external loopback cable is in place. Our own FLOGI frames are | 1097 | * external loopback cable is in place. Our own FLOGI frames are |
1236 | * sometimes looped back when switch port gets temporarily bypassed. | 1098 | * sometimes looped back when switch port gets temporarily bypassed. |
1237 | */ | 1099 | */ |
1238 | if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) && | 1100 | if ((pid == bfa_ntoh3b(FC_FABRIC_PORT)) && |
1239 | (els_cmd->els_code == FC_ELS_FLOGI) && | 1101 | (els_cmd->els_code == FC_ELS_FLOGI) && |
1240 | (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { | 1102 | (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) { |
1241 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); | 1103 | bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK); |
@@ -1245,7 +1107,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, | |||
1245 | /* | 1107 | /* |
1246 | * FLOGI/EVFP exchanges should be consumed by base fabric. | 1108 | * FLOGI/EVFP exchanges should be consumed by base fabric. |
1247 | */ | 1109 | */ |
1248 | if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { | 1110 | if (fchs->d_id == bfa_hton3b(FC_FABRIC_PORT)) { |
1249 | bfa_trc(fabric->fcs, pid); | 1111 | bfa_trc(fabric->fcs, pid); |
1250 | bfa_fcs_fabric_process_uf(fabric, fchs, len); | 1112 | bfa_fcs_fabric_process_uf(fabric, fchs, len); |
1251 | return; | 1113 | return; |
@@ -1358,13 +1220,13 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) | |||
1358 | return; | 1220 | return; |
1359 | 1221 | ||
1360 | reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 1222 | reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
1361 | bfa_os_hton3b(FC_FABRIC_PORT), | 1223 | bfa_hton3b(FC_FABRIC_PORT), |
1362 | n2n_port->reply_oxid, pcfg->pwwn, | 1224 | n2n_port->reply_oxid, pcfg->pwwn, |
1363 | pcfg->nwwn, | 1225 | pcfg->nwwn, |
1364 | bfa_fcport_get_maxfrsize(bfa), | 1226 | bfa_fcport_get_maxfrsize(bfa), |
1365 | bfa_fcport_get_rx_bbcredit(bfa)); | 1227 | bfa_fcport_get_rx_bbcredit(bfa)); |
1366 | 1228 | ||
1367 | bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps), | 1229 | bfa_fcxp_send(fcxp, NULL, fabric->vf_id, fabric->lps->lp_tag, |
1368 | BFA_FALSE, FC_CLASS_3, | 1230 | BFA_FALSE, FC_CLASS_3, |
1369 | reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, | 1231 | reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric, |
1370 | FC_MAX_PDUSZ, 0); | 1232 | FC_MAX_PDUSZ, 0); |
@@ -1455,7 +1317,7 @@ bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event) | |||
1455 | break; | 1317 | break; |
1456 | 1318 | ||
1457 | default: | 1319 | default: |
1458 | bfa_assert(0); | 1320 | WARN_ON(1); |
1459 | } | 1321 | } |
1460 | } | 1322 | } |
1461 | 1323 | ||
@@ -1502,7 +1364,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) | |||
1502 | * drop frame if vfid is unknown | 1364 | * drop frame if vfid is unknown |
1503 | */ | 1365 | */ |
1504 | if (!fabric) { | 1366 | if (!fabric) { |
1505 | bfa_assert(0); | 1367 | WARN_ON(1); |
1506 | bfa_stats(fcs, uf.vfid_unknown); | 1368 | bfa_stats(fcs, uf.vfid_unknown); |
1507 | bfa_uf_free(uf); | 1369 | bfa_uf_free(uf); |
1508 | return; | 1370 | return; |
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 9cb6a55977c3..0fd63168573f 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h | |||
@@ -27,6 +27,22 @@ | |||
27 | #define BFA_FCS_OS_STR_LEN 64 | 27 | #define BFA_FCS_OS_STR_LEN 64 |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * lps_pvt BFA LPS private functions | ||
31 | */ | ||
32 | |||
33 | enum bfa_lps_event { | ||
34 | BFA_LPS_SM_LOGIN = 1, /* login request from user */ | ||
35 | BFA_LPS_SM_LOGOUT = 2, /* logout request from user */ | ||
36 | BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */ | ||
37 | BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ | ||
38 | BFA_LPS_SM_DELETE = 5, /* lps delete from user */ | ||
39 | BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ | ||
40 | BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ | ||
41 | BFA_LPS_SM_SET_N2N_PID = 8, /* Set assigned PID for n2n */ | ||
42 | }; | ||
43 | |||
44 | |||
45 | /* | ||
30 | * !!! Only append to the enums defined here to avoid any versioning | 46 | * !!! Only append to the enums defined here to avoid any versioning |
31 | * !!! needed between trace utility and driver version | 47 | * !!! needed between trace utility and driver version |
32 | */ | 48 | */ |
@@ -41,13 +57,12 @@ enum { | |||
41 | struct bfa_fcs_s; | 57 | struct bfa_fcs_s; |
42 | 58 | ||
43 | #define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) | 59 | #define __fcs_min_cfg(__fcs) ((__fcs)->min_cfg) |
44 | void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs); | ||
45 | 60 | ||
46 | #define BFA_FCS_BRCD_SWITCH_OUI 0x051e | 61 | #define BFA_FCS_BRCD_SWITCH_OUI 0x051e |
47 | #define N2N_LOCAL_PID 0x010000 | 62 | #define N2N_LOCAL_PID 0x010000 |
48 | #define N2N_REMOTE_PID 0x020000 | 63 | #define N2N_REMOTE_PID 0x020000 |
49 | #define BFA_FCS_RETRY_TIMEOUT 2000 | 64 | #define BFA_FCS_RETRY_TIMEOUT 2000 |
50 | #define BFA_FCS_PID_IS_WKA(pid) ((bfa_os_ntoh3b(pid) > 0xFFF000) ? 1 : 0) | 65 | #define BFA_FCS_PID_IS_WKA(pid) ((bfa_ntoh3b(pid) > 0xFFF000) ? 1 : 0) |
51 | 66 | ||
52 | 67 | ||
53 | 68 | ||
@@ -109,7 +124,7 @@ struct bfa_fcs_lport_loop_s { | |||
109 | 124 | ||
110 | struct bfa_fcs_lport_n2n_s { | 125 | struct bfa_fcs_lport_n2n_s { |
111 | u32 rsvd; | 126 | u32 rsvd; |
112 | u16 reply_oxid; /* ox_id from the req flogi to be | 127 | __be16 reply_oxid; /* ox_id from the req flogi to be |
113 | *used in flogi acc */ | 128 | *used in flogi acc */ |
114 | wwn_t rem_port_wwn; /* Attached port's wwn */ | 129 | wwn_t rem_port_wwn; /* Attached port's wwn */ |
115 | }; | 130 | }; |
@@ -316,8 +331,6 @@ void bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port, | |||
316 | struct bfa_fcs_rport_s *rport); | 331 | struct bfa_fcs_rport_s *rport); |
317 | void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port, | 332 | void bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port, |
318 | struct bfa_fcs_rport_s *rport); | 333 | struct bfa_fcs_rport_s *rport); |
319 | void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs); | ||
320 | void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs); | ||
321 | void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport); | 334 | void bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport); |
322 | void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport); | 335 | void bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport); |
323 | void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport); | 336 | void bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport); |
@@ -359,9 +372,6 @@ bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport); | |||
359 | bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); | 372 | bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport); |
360 | void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, | 373 | void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, |
361 | struct bfa_vport_attr_s *vport_attr); | 374 | struct bfa_vport_attr_s *vport_attr); |
362 | void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, | ||
363 | struct bfa_vport_stats_s *vport_stats); | ||
364 | void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport); | ||
365 | struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, | 375 | struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, |
366 | u16 vf_id, wwn_t vpwwn); | 376 | u16 vf_id, wwn_t vpwwn); |
367 | void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); | 377 | void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport); |
@@ -406,7 +416,7 @@ struct bfa_fcs_rport_s { | |||
406 | struct bfad_rport_s *rp_drv; /* driver peer instance */ | 416 | struct bfad_rport_s *rp_drv; /* driver peer instance */ |
407 | u32 pid; /* port ID of rport */ | 417 | u32 pid; /* port ID of rport */ |
408 | u16 maxfrsize; /* maximum frame size */ | 418 | u16 maxfrsize; /* maximum frame size */ |
409 | u16 reply_oxid; /* OX_ID of inbound requests */ | 419 | __be16 reply_oxid; /* OX_ID of inbound requests */ |
410 | enum fc_cos fc_cos; /* FC classes of service supp */ | 420 | enum fc_cos fc_cos; /* FC classes of service supp */ |
411 | bfa_boolean_t cisc; /* CISC capable device */ | 421 | bfa_boolean_t cisc; /* CISC capable device */ |
412 | bfa_boolean_t prlo; /* processing prlo or LOGO */ | 422 | bfa_boolean_t prlo; /* processing prlo or LOGO */ |
@@ -437,32 +447,18 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport) | |||
437 | /* | 447 | /* |
438 | * bfa fcs rport API functions | 448 | * bfa fcs rport API functions |
439 | */ | 449 | */ |
440 | bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, | ||
441 | struct bfa_fcs_rport_s *rport, | ||
442 | struct bfad_rport_s *rport_drv); | ||
443 | bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport); | ||
444 | void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, | ||
445 | struct bfa_rport_attr_s *attr); | ||
446 | void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, | ||
447 | struct bfa_rport_stats_s *stats); | ||
448 | void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport); | ||
449 | struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, | 450 | struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, |
450 | wwn_t rpwwn); | 451 | wwn_t rpwwn); |
451 | struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( | 452 | struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn( |
452 | struct bfa_fcs_lport_s *port, wwn_t rnwwn); | 453 | struct bfa_fcs_lport_s *port, wwn_t rnwwn); |
453 | void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); | 454 | void bfa_fcs_rport_set_del_timeout(u8 rport_tmo); |
454 | 455 | ||
455 | void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, | ||
456 | enum bfa_port_speed speed); | ||
457 | void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, | 456 | void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, |
458 | struct fchs_s *fchs, u16 len); | 457 | struct fchs_s *fchs, u16 len); |
459 | void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); | 458 | void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport); |
460 | 459 | ||
461 | struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, | 460 | struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, |
462 | u32 pid); | 461 | u32 pid); |
463 | void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport); | ||
464 | void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport); | ||
465 | void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport); | ||
466 | void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, | 462 | void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, |
467 | struct fc_logi_s *plogi_rsp); | 463 | struct fc_logi_s *plogi_rsp); |
468 | void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, | 464 | void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, |
@@ -470,10 +466,8 @@ void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, | |||
470 | struct fc_logi_s *plogi); | 466 | struct fc_logi_s *plogi); |
471 | void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, | 467 | void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs, |
472 | struct fc_logi_s *plogi); | 468 | struct fc_logi_s *plogi); |
473 | void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport); | 469 | void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id); |
474 | void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id); | ||
475 | 470 | ||
476 | void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport); | ||
477 | void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); | 471 | void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport); |
478 | void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport); | 472 | void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport); |
479 | int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport); | 473 | int bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport); |
@@ -618,7 +612,7 @@ struct bfa_fcs_fdmi_hba_attr_s { | |||
618 | u8 option_rom_ver[BFA_VERSION_LEN]; | 612 | u8 option_rom_ver[BFA_VERSION_LEN]; |
619 | u8 fw_version[8]; | 613 | u8 fw_version[8]; |
620 | u8 os_name[256]; | 614 | u8 os_name[256]; |
621 | u32 max_ct_pyld; | 615 | __be32 max_ct_pyld; |
622 | }; | 616 | }; |
623 | 617 | ||
624 | /* | 618 | /* |
@@ -626,9 +620,9 @@ struct bfa_fcs_fdmi_hba_attr_s { | |||
626 | */ | 620 | */ |
627 | struct bfa_fcs_fdmi_port_attr_s { | 621 | struct bfa_fcs_fdmi_port_attr_s { |
628 | u8 supp_fc4_types[32]; /* supported FC4 types */ | 622 | u8 supp_fc4_types[32]; /* supported FC4 types */ |
629 | u32 supp_speed; /* supported speed */ | 623 | __be32 supp_speed; /* supported speed */ |
630 | u32 curr_speed; /* current Speed */ | 624 | __be32 curr_speed; /* current Speed */ |
631 | u32 max_frm_size; /* max frame size */ | 625 | __be32 max_frm_size; /* max frame size */ |
632 | u8 os_device_name[256]; /* OS device Name */ | 626 | u8 os_device_name[256]; /* OS device Name */ |
633 | u8 host_name[256]; /* host name */ | 627 | u8 host_name[256]; /* host name */ |
634 | }; | 628 | }; |
@@ -664,6 +658,57 @@ struct bfa_fcs_s { | |||
664 | }; | 658 | }; |
665 | 659 | ||
666 | /* | 660 | /* |
661 | * fcs_fabric_sm fabric state machine functions | ||
662 | */ | ||
663 | |||
664 | /* | ||
665 | * Fabric state machine events | ||
666 | */ | ||
667 | enum bfa_fcs_fabric_event { | ||
668 | BFA_FCS_FABRIC_SM_CREATE = 1, /* create from driver */ | ||
669 | BFA_FCS_FABRIC_SM_DELETE = 2, /* delete from driver */ | ||
670 | BFA_FCS_FABRIC_SM_LINK_DOWN = 3, /* link down from port */ | ||
671 | BFA_FCS_FABRIC_SM_LINK_UP = 4, /* link up from port */ | ||
672 | BFA_FCS_FABRIC_SM_CONT_OP = 5, /* flogi/auth continue op */ | ||
673 | BFA_FCS_FABRIC_SM_RETRY_OP = 6, /* flogi/auth retry op */ | ||
674 | BFA_FCS_FABRIC_SM_NO_FABRIC = 7, /* from flogi/auth */ | ||
675 | BFA_FCS_FABRIC_SM_PERF_EVFP = 8, /* from flogi/auth */ | ||
676 | BFA_FCS_FABRIC_SM_ISOLATE = 9, /* from EVFP processing */ | ||
677 | BFA_FCS_FABRIC_SM_NO_TAGGING = 10, /* no VFT tagging from EVFP */ | ||
678 | BFA_FCS_FABRIC_SM_DELAYED = 11, /* timeout delay event */ | ||
679 | BFA_FCS_FABRIC_SM_AUTH_FAILED = 12, /* auth failed */ | ||
680 | BFA_FCS_FABRIC_SM_AUTH_SUCCESS = 13, /* auth successful */ | ||
681 | BFA_FCS_FABRIC_SM_DELCOMP = 14, /* all vports deleted event */ | ||
682 | BFA_FCS_FABRIC_SM_LOOPBACK = 15, /* Received our own FLOGI */ | ||
683 | BFA_FCS_FABRIC_SM_START = 16, /* from driver */ | ||
684 | }; | ||
685 | |||
686 | /* | ||
687 | * fcs_rport_sm FCS rport state machine events | ||
688 | */ | ||
689 | |||
690 | enum rport_event { | ||
691 | RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ | ||
692 | RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ | ||
693 | RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ | ||
694 | RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ | ||
695 | RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ | ||
696 | RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ | ||
697 | RPSM_EVENT_DELETE = 7, /* RPORT delete request */ | ||
698 | RPSM_EVENT_SCN = 8, /* state change notification */ | ||
699 | RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ | ||
700 | RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ | ||
701 | RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ | ||
702 | RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ | ||
703 | RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ | ||
704 | RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ | ||
705 | RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ | ||
706 | RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ | ||
707 | RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ | ||
708 | RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */ | ||
709 | }; | ||
710 | |||
711 | /* | ||
667 | * bfa fcs API functions | 712 | * bfa fcs API functions |
668 | */ | 713 | */ |
669 | void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, | 714 | void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, |
@@ -672,16 +717,12 @@ void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, | |||
672 | void bfa_fcs_init(struct bfa_fcs_s *fcs); | 717 | void bfa_fcs_init(struct bfa_fcs_s *fcs); |
673 | void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, | 718 | void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, |
674 | struct bfa_fcs_driver_info_s *driver_info); | 719 | struct bfa_fcs_driver_info_s *driver_info); |
675 | void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable); | ||
676 | void bfa_fcs_exit(struct bfa_fcs_s *fcs); | 720 | void bfa_fcs_exit(struct bfa_fcs_s *fcs); |
677 | void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); | ||
678 | void bfa_fcs_start(struct bfa_fcs_s *fcs); | ||
679 | 721 | ||
680 | /* | 722 | /* |
681 | * bfa fcs vf public functions | 723 | * bfa fcs vf public functions |
682 | */ | 724 | */ |
683 | bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); | 725 | bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); |
684 | u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); | ||
685 | 726 | ||
686 | /* | 727 | /* |
687 | * fabric protected interface functions | 728 | * fabric protected interface functions |
@@ -689,32 +730,29 @@ u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); | |||
689 | void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); | 730 | void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs); |
690 | void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); | 731 | void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs); |
691 | void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); | 732 | void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs); |
692 | void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs); | ||
693 | void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); | 733 | void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric); |
694 | void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); | 734 | void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric); |
695 | void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, | 735 | void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, |
696 | struct bfa_fcs_vport_s *vport); | 736 | struct bfa_fcs_vport_s *vport); |
697 | void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, | 737 | void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, |
698 | struct bfa_fcs_vport_s *vport); | 738 | struct bfa_fcs_vport_s *vport); |
699 | int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric); | ||
700 | struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup( | 739 | struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup( |
701 | struct bfa_fcs_fabric_s *fabric, wwn_t pwwn); | 740 | struct bfa_fcs_fabric_s *fabric, wwn_t pwwn); |
702 | void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs); | 741 | void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs); |
703 | void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, | 742 | void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, |
704 | struct fchs_s *fchs, u16 len); | 743 | struct fchs_s *fchs, u16 len); |
705 | bfa_boolean_t bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric); | ||
706 | bfa_boolean_t bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric); | ||
707 | enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric); | ||
708 | void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); | 744 | void bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric); |
709 | void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric); | ||
710 | bfa_status_t bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, | ||
711 | struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg, | ||
712 | struct bfad_vf_s *vf_drv); | ||
713 | void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, | 745 | void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, |
714 | wwn_t fabric_name); | 746 | wwn_t fabric_name); |
715 | u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); | 747 | u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); |
716 | void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); | 748 | void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); |
717 | void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); | 749 | void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); |
750 | void bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, | ||
751 | enum bfa_fcs_fabric_event event); | ||
752 | void bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, | ||
753 | enum bfa_fcs_fabric_event event); | ||
754 | void bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, | ||
755 | enum bfa_fcs_fabric_event event); | ||
718 | 756 | ||
719 | /* | 757 | /* |
720 | * BFA FCS callback interfaces | 758 | * BFA FCS callback interfaces |
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index 413b58eef93a..e7b49f4cb51f 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c | |||
@@ -19,9 +19,9 @@ | |||
19 | * fcpim.c - FCP initiator mode i-t nexus state machine | 19 | * fcpim.c - FCP initiator mode i-t nexus state machine |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include "bfad_drv.h" | ||
22 | #include "bfa_fcs.h" | 23 | #include "bfa_fcs.h" |
23 | #include "bfa_fcbuild.h" | 24 | #include "bfa_fcbuild.h" |
24 | #include "bfad_drv.h" | ||
25 | #include "bfad_im.h" | 25 | #include "bfad_im.h" |
26 | 26 | ||
27 | BFA_TRC_FILE(FCS, FCPIM); | 27 | BFA_TRC_FILE(FCS, FCPIM); |
@@ -103,7 +103,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, | |||
103 | break; | 103 | break; |
104 | 104 | ||
105 | case BFA_FCS_ITNIM_SM_OFFLINE: | 105 | case BFA_FCS_ITNIM_SM_OFFLINE: |
106 | bfa_fcs_rport_itnim_ack(itnim->rport); | 106 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
107 | break; | 107 | break; |
108 | 108 | ||
109 | case BFA_FCS_ITNIM_SM_INITIATOR: | 109 | case BFA_FCS_ITNIM_SM_INITIATOR: |
@@ -140,7 +140,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, | |||
140 | case BFA_FCS_ITNIM_SM_OFFLINE: | 140 | case BFA_FCS_ITNIM_SM_OFFLINE: |
141 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 141 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
142 | bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); | 142 | bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); |
143 | bfa_fcs_rport_itnim_ack(itnim->rport); | 143 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
144 | break; | 144 | break; |
145 | 145 | ||
146 | case BFA_FCS_ITNIM_SM_DELETE: | 146 | case BFA_FCS_ITNIM_SM_DELETE: |
@@ -181,7 +181,7 @@ bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, | |||
181 | case BFA_FCS_ITNIM_SM_OFFLINE: | 181 | case BFA_FCS_ITNIM_SM_OFFLINE: |
182 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 182 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
183 | bfa_fcxp_discard(itnim->fcxp); | 183 | bfa_fcxp_discard(itnim->fcxp); |
184 | bfa_fcs_rport_itnim_ack(itnim->rport); | 184 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
185 | break; | 185 | break; |
186 | 186 | ||
187 | case BFA_FCS_ITNIM_SM_INITIATOR: | 187 | case BFA_FCS_ITNIM_SM_INITIATOR: |
@@ -217,7 +217,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, | |||
217 | } else { | 217 | } else { |
218 | /* invoke target offline */ | 218 | /* invoke target offline */ |
219 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 219 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
220 | bfa_fcs_rport_logo_imp(itnim->rport); | 220 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); |
221 | } | 221 | } |
222 | break; | 222 | break; |
223 | 223 | ||
@@ -225,7 +225,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, | |||
225 | case BFA_FCS_ITNIM_SM_OFFLINE: | 225 | case BFA_FCS_ITNIM_SM_OFFLINE: |
226 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 226 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
227 | bfa_timer_stop(&itnim->timer); | 227 | bfa_timer_stop(&itnim->timer); |
228 | bfa_fcs_rport_itnim_ack(itnim->rport); | 228 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
229 | break; | 229 | break; |
230 | 230 | ||
231 | case BFA_FCS_ITNIM_SM_INITIATOR: | 231 | case BFA_FCS_ITNIM_SM_INITIATOR: |
@@ -269,7 +269,7 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, | |||
269 | case BFA_FCS_ITNIM_SM_OFFLINE: | 269 | case BFA_FCS_ITNIM_SM_OFFLINE: |
270 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 270 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
271 | bfa_itnim_offline(itnim->bfa_itnim); | 271 | bfa_itnim_offline(itnim->bfa_itnim); |
272 | bfa_fcs_rport_itnim_ack(itnim->rport); | 272 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
273 | break; | 273 | break; |
274 | 274 | ||
275 | case BFA_FCS_ITNIM_SM_DELETE: | 275 | case BFA_FCS_ITNIM_SM_DELETE: |
@@ -330,7 +330,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, | |||
330 | switch (event) { | 330 | switch (event) { |
331 | case BFA_FCS_ITNIM_SM_HCB_OFFLINE: | 331 | case BFA_FCS_ITNIM_SM_HCB_OFFLINE: |
332 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 332 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
333 | bfa_fcs_rport_itnim_ack(itnim->rport); | 333 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
334 | break; | 334 | break; |
335 | 335 | ||
336 | case BFA_FCS_ITNIM_SM_DELETE: | 336 | case BFA_FCS_ITNIM_SM_DELETE: |
@@ -358,7 +358,7 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, | |||
358 | switch (event) { | 358 | switch (event) { |
359 | case BFA_FCS_ITNIM_SM_OFFLINE: | 359 | case BFA_FCS_ITNIM_SM_OFFLINE: |
360 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); | 360 | bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); |
361 | bfa_fcs_rport_itnim_ack(itnim->rport); | 361 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); |
362 | break; | 362 | break; |
363 | 363 | ||
364 | case BFA_FCS_ITNIM_SM_RSP_ERROR: | 364 | case BFA_FCS_ITNIM_SM_RSP_ERROR: |
@@ -536,7 +536,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) | |||
536 | if (bfa_itnim == NULL) { | 536 | if (bfa_itnim == NULL) { |
537 | bfa_trc(port->fcs, rport->pwwn); | 537 | bfa_trc(port->fcs, rport->pwwn); |
538 | bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv); | 538 | bfa_fcb_itnim_free(port->fcs->bfad, itnim_drv); |
539 | bfa_assert(0); | 539 | WARN_ON(1); |
540 | return NULL; | 540 | return NULL; |
541 | } | 541 | } |
542 | 542 | ||
@@ -688,7 +688,7 @@ bfa_cb_itnim_sler(void *cb_arg) | |||
688 | 688 | ||
689 | itnim->stats.sler++; | 689 | itnim->stats.sler++; |
690 | bfa_trc(itnim->fcs, itnim->rport->pwwn); | 690 | bfa_trc(itnim->fcs, itnim->rport->pwwn); |
691 | bfa_fcs_rport_logo_imp(itnim->rport); | 691 | bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); |
692 | } | 692 | } |
693 | 693 | ||
694 | struct bfa_fcs_itnim_s * | 694 | struct bfa_fcs_itnim_s * |
@@ -700,7 +700,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) | |||
700 | if (!rport) | 700 | if (!rport) |
701 | return NULL; | 701 | return NULL; |
702 | 702 | ||
703 | bfa_assert(rport->itnim != NULL); | 703 | WARN_ON(rport->itnim == NULL); |
704 | return rport->itnim; | 704 | return rport->itnim; |
705 | } | 705 | } |
706 | 706 | ||
@@ -729,7 +729,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, | |||
729 | { | 729 | { |
730 | struct bfa_fcs_itnim_s *itnim = NULL; | 730 | struct bfa_fcs_itnim_s *itnim = NULL; |
731 | 731 | ||
732 | bfa_assert(port != NULL); | 732 | WARN_ON(port == NULL); |
733 | 733 | ||
734 | itnim = bfa_fcs_itnim_lookup(port, rpwwn); | 734 | itnim = bfa_fcs_itnim_lookup(port, rpwwn); |
735 | 735 | ||
@@ -746,7 +746,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) | |||
746 | { | 746 | { |
747 | struct bfa_fcs_itnim_s *itnim = NULL; | 747 | struct bfa_fcs_itnim_s *itnim = NULL; |
748 | 748 | ||
749 | bfa_assert(port != NULL); | 749 | WARN_ON(port == NULL); |
750 | 750 | ||
751 | itnim = bfa_fcs_itnim_lookup(port, rpwwn); | 751 | itnim = bfa_fcs_itnim_lookup(port, rpwwn); |
752 | 752 | ||
@@ -778,6 +778,6 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, | |||
778 | break; | 778 | break; |
779 | 779 | ||
780 | default: | 780 | default: |
781 | bfa_assert(0); | 781 | WARN_ON(1); |
782 | } | 782 | } |
783 | } | 783 | } |
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 8d651309302b..4e2eb92ba028 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c | |||
@@ -15,10 +15,10 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_fcs.h" | 19 | #include "bfa_fcs.h" |
19 | #include "bfa_fcbuild.h" | 20 | #include "bfa_fcbuild.h" |
20 | #include "bfa_fc.h" | 21 | #include "bfa_fc.h" |
21 | #include "bfad_drv.h" | ||
22 | 22 | ||
23 | BFA_TRC_FILE(FCS, PORT); | 23 | BFA_TRC_FILE(FCS, PORT); |
24 | 24 | ||
@@ -159,7 +159,7 @@ bfa_fcs_lport_sm_online( | |||
159 | bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); | 159 | bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); |
160 | list_for_each_safe(qe, qen, &port->rport_q) { | 160 | list_for_each_safe(qe, qen, &port->rport_q) { |
161 | rport = (struct bfa_fcs_rport_s *) qe; | 161 | rport = (struct bfa_fcs_rport_s *) qe; |
162 | bfa_fcs_rport_delete(rport); | 162 | bfa_sm_send_event(rport, RPSM_EVENT_DELETE); |
163 | } | 163 | } |
164 | } | 164 | } |
165 | break; | 165 | break; |
@@ -197,7 +197,7 @@ bfa_fcs_lport_sm_offline( | |||
197 | bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); | 197 | bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting); |
198 | list_for_each_safe(qe, qen, &port->rport_q) { | 198 | list_for_each_safe(qe, qen, &port->rport_q) { |
199 | rport = (struct bfa_fcs_rport_s *) qe; | 199 | rport = (struct bfa_fcs_rport_s *) qe; |
200 | bfa_fcs_rport_delete(rport); | 200 | bfa_sm_send_event(rport, RPSM_EVENT_DELETE); |
201 | } | 201 | } |
202 | } | 202 | } |
203 | break; | 203 | break; |
@@ -309,6 +309,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, | |||
309 | return; | 309 | return; |
310 | } | 310 | } |
311 | port->pid = rx_fchs->d_id; | 311 | port->pid = rx_fchs->d_id; |
312 | bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id); | ||
312 | } | 313 | } |
313 | 314 | ||
314 | /* | 315 | /* |
@@ -323,6 +324,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, | |||
323 | (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), | 324 | (memcmp((void *)&bfa_fcs_lport_get_pwwn(port), |
324 | (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { | 325 | (void *)&plogi->port_name, sizeof(wwn_t)) < 0)) { |
325 | port->pid = rx_fchs->d_id; | 326 | port->pid = rx_fchs->d_id; |
327 | bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id); | ||
326 | rport->pid = rx_fchs->s_id; | 328 | rport->pid = rx_fchs->s_id; |
327 | } | 329 | } |
328 | bfa_fcs_rport_plogi(rport, rx_fchs, plogi); | 330 | bfa_fcs_rport_plogi(rport, rx_fchs, plogi); |
@@ -349,8 +351,8 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, | |||
349 | * This is a different device with the same pid. Old device | 351 | * This is a different device with the same pid. Old device |
350 | * disappeared. Send implicit LOGO to old device. | 352 | * disappeared. Send implicit LOGO to old device. |
351 | */ | 353 | */ |
352 | bfa_assert(rport->pwwn != plogi->port_name); | 354 | WARN_ON(rport->pwwn == plogi->port_name); |
353 | bfa_fcs_rport_logo_imp(rport); | 355 | bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); |
354 | 356 | ||
355 | /* | 357 | /* |
356 | * Inbound PLOGI from a new device (with old PID). | 358 | * Inbound PLOGI from a new device (with old PID). |
@@ -362,7 +364,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, | |||
362 | /* | 364 | /* |
363 | * PLOGI crossing each other. | 365 | * PLOGI crossing each other. |
364 | */ | 366 | */ |
365 | bfa_assert(rport->pwwn == WWN_NULL); | 367 | WARN_ON(rport->pwwn != WWN_NULL); |
366 | bfa_fcs_rport_plogi(rport, rx_fchs, plogi); | 368 | bfa_fcs_rport_plogi(rport, rx_fchs, plogi); |
367 | } | 369 | } |
368 | 370 | ||
@@ -511,7 +513,8 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port) | |||
511 | __port_action[port->fabric->fab_type].offline(port); | 513 | __port_action[port->fabric->fab_type].offline(port); |
512 | 514 | ||
513 | wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); | 515 | wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port)); |
514 | if (bfa_fcs_fabric_is_online(port->fabric) == BFA_TRUE) | 516 | if (bfa_sm_cmp_state(port->fabric, |
517 | bfa_fcs_fabric_sm_online) == BFA_TRUE) | ||
515 | BFA_LOG(KERN_ERR, bfad, bfa_log_level, | 518 | BFA_LOG(KERN_ERR, bfad, bfa_log_level, |
516 | "Logical port lost fabric connectivity: WWN = %s Role = %s\n", | 519 | "Logical port lost fabric connectivity: WWN = %s Role = %s\n", |
517 | lpwwn_buf, "Initiator"); | 520 | lpwwn_buf, "Initiator"); |
@@ -522,26 +525,26 @@ bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port) | |||
522 | 525 | ||
523 | list_for_each_safe(qe, qen, &port->rport_q) { | 526 | list_for_each_safe(qe, qen, &port->rport_q) { |
524 | rport = (struct bfa_fcs_rport_s *) qe; | 527 | rport = (struct bfa_fcs_rport_s *) qe; |
525 | bfa_fcs_rport_offline(rport); | 528 | bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); |
526 | } | 529 | } |
527 | } | 530 | } |
528 | 531 | ||
529 | static void | 532 | static void |
530 | bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port) | 533 | bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port) |
531 | { | 534 | { |
532 | bfa_assert(0); | 535 | WARN_ON(1); |
533 | } | 536 | } |
534 | 537 | ||
535 | static void | 538 | static void |
536 | bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port) | 539 | bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port) |
537 | { | 540 | { |
538 | bfa_assert(0); | 541 | WARN_ON(1); |
539 | } | 542 | } |
540 | 543 | ||
541 | static void | 544 | static void |
542 | bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port) | 545 | bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port) |
543 | { | 546 | { |
544 | bfa_assert(0); | 547 | WARN_ON(1); |
545 | } | 548 | } |
546 | 549 | ||
547 | static void | 550 | static void |
@@ -584,33 +587,11 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port) | |||
584 | port->vport ? port->vport->vport_drv : NULL); | 587 | port->vport ? port->vport->vport_drv : NULL); |
585 | bfa_fcs_vport_delete_comp(port->vport); | 588 | bfa_fcs_vport_delete_comp(port->vport); |
586 | } else { | 589 | } else { |
587 | bfa_fcs_fabric_port_delete_comp(port->fabric); | 590 | bfa_wc_down(&port->fabric->wc); |
588 | } | 591 | } |
589 | } | 592 | } |
590 | 593 | ||
591 | 594 | ||
592 | |||
593 | /* | ||
594 | * fcs_lport_api BFA FCS port API | ||
595 | */ | ||
596 | /* | ||
597 | * Module initialization | ||
598 | */ | ||
599 | void | ||
600 | bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs) | ||
601 | { | ||
602 | |||
603 | } | ||
604 | |||
605 | /* | ||
606 | * Module cleanup | ||
607 | */ | ||
608 | void | ||
609 | bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs) | ||
610 | { | ||
611 | bfa_fcs_modexit_comp(fcs); | ||
612 | } | ||
613 | |||
614 | /* | 595 | /* |
615 | * Unsolicited frame receive handling. | 596 | * Unsolicited frame receive handling. |
616 | */ | 597 | */ |
@@ -623,6 +604,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, | |||
623 | struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); | 604 | struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1); |
624 | 605 | ||
625 | bfa_stats(lport, uf_recvs); | 606 | bfa_stats(lport, uf_recvs); |
607 | bfa_trc(lport->fcs, fchs->type); | ||
626 | 608 | ||
627 | if (!bfa_fcs_lport_is_online(lport)) { | 609 | if (!bfa_fcs_lport_is_online(lport)) { |
628 | bfa_stats(lport, uf_recv_drops); | 610 | bfa_stats(lport, uf_recv_drops); |
@@ -682,8 +664,11 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, | |||
682 | * Only handles ELS frames for now. | 664 | * Only handles ELS frames for now. |
683 | */ | 665 | */ |
684 | if (fchs->type != FC_TYPE_ELS) { | 666 | if (fchs->type != FC_TYPE_ELS) { |
685 | bfa_trc(lport->fcs, fchs->type); | 667 | bfa_trc(lport->fcs, fchs->s_id); |
686 | bfa_assert(0); | 668 | bfa_trc(lport->fcs, fchs->d_id); |
669 | /* ignore type FC_TYPE_FC_FSS */ | ||
670 | if (fchs->type != FC_TYPE_FC_FSS) | ||
671 | bfa_sm_fault(lport->fcs, fchs->type); | ||
687 | return; | 672 | return; |
688 | } | 673 | } |
689 | 674 | ||
@@ -792,7 +777,7 @@ bfa_fcs_lport_del_rport( | |||
792 | struct bfa_fcs_lport_s *port, | 777 | struct bfa_fcs_lport_s *port, |
793 | struct bfa_fcs_rport_s *rport) | 778 | struct bfa_fcs_rport_s *rport) |
794 | { | 779 | { |
795 | bfa_assert(bfa_q_is_on_q(&port->rport_q, rport)); | 780 | WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport)); |
796 | list_del(&rport->qe); | 781 | list_del(&rport->qe); |
797 | port->num_rports--; | 782 | port->num_rports--; |
798 | 783 | ||
@@ -850,8 +835,8 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, | |||
850 | lport->fcs = fcs; | 835 | lport->fcs = fcs; |
851 | lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); | 836 | lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id); |
852 | lport->vport = vport; | 837 | lport->vport = vport; |
853 | lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) : | 838 | lport->lp_tag = (vport) ? vport->lps->lp_tag : |
854 | bfa_lps_get_tag(lport->fabric->lps); | 839 | lport->fabric->lps->lp_tag; |
855 | 840 | ||
856 | INIT_LIST_HEAD(&lport->rport_q); | 841 | INIT_LIST_HEAD(&lport->rport_q); |
857 | lport->num_rports = 0; | 842 | lport->num_rports = 0; |
@@ -903,10 +888,12 @@ bfa_fcs_lport_get_attr( | |||
903 | port_attr->port_cfg = port->port_cfg; | 888 | port_attr->port_cfg = port->port_cfg; |
904 | 889 | ||
905 | if (port->fabric) { | 890 | if (port->fabric) { |
906 | port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric); | 891 | port_attr->port_type = port->fabric->oper_type; |
907 | port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric); | 892 | port_attr->loopback = bfa_sm_cmp_state(port->fabric, |
893 | bfa_fcs_fabric_sm_loopback); | ||
908 | port_attr->authfail = | 894 | port_attr->authfail = |
909 | bfa_fcs_fabric_is_auth_failed(port->fabric); | 895 | bfa_sm_cmp_state(port->fabric, |
896 | bfa_fcs_fabric_sm_auth_failed); | ||
910 | port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port); | 897 | port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port); |
911 | memcpy(port_attr->fabric_ip_addr, | 898 | memcpy(port_attr->fabric_ip_addr, |
912 | bfa_fcs_lport_get_fabric_ipaddr(port), | 899 | bfa_fcs_lport_get_fabric_ipaddr(port), |
@@ -915,10 +902,10 @@ bfa_fcs_lport_get_attr( | |||
915 | if (port->vport != NULL) { | 902 | if (port->vport != NULL) { |
916 | port_attr->port_type = BFA_PORT_TYPE_VPORT; | 903 | port_attr->port_type = BFA_PORT_TYPE_VPORT; |
917 | port_attr->fpma_mac = | 904 | port_attr->fpma_mac = |
918 | bfa_lps_get_lp_mac(port->vport->lps); | 905 | port->vport->lps->lp_mac; |
919 | } else { | 906 | } else { |
920 | port_attr->fpma_mac = | 907 | port_attr->fpma_mac = |
921 | bfa_lps_get_lp_mac(port->fabric->lps); | 908 | port->fabric->lps->lp_mac; |
922 | } | 909 | } |
923 | } else { | 910 | } else { |
924 | port_attr->port_type = BFA_PORT_TYPE_UNKNOWN; | 911 | port_attr->port_type = BFA_PORT_TYPE_UNKNOWN; |
@@ -998,6 +985,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) | |||
998 | ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, | 985 | ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, |
999 | sizeof(wwn_t)) > 0) { | 986 | sizeof(wwn_t)) > 0) { |
1000 | port->pid = N2N_LOCAL_PID; | 987 | port->pid = N2N_LOCAL_PID; |
988 | bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID); | ||
1001 | /* | 989 | /* |
1002 | * First, check if we know the device by pwwn. | 990 | * First, check if we know the device by pwwn. |
1003 | */ | 991 | */ |
@@ -1007,7 +995,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) | |||
1007 | bfa_trc(port->fcs, rport->pid); | 995 | bfa_trc(port->fcs, rport->pid); |
1008 | bfa_trc(port->fcs, rport->pwwn); | 996 | bfa_trc(port->fcs, rport->pwwn); |
1009 | rport->pid = N2N_REMOTE_PID; | 997 | rport->pid = N2N_REMOTE_PID; |
1010 | bfa_fcs_rport_online(rport); | 998 | bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); |
1011 | return; | 999 | return; |
1012 | } | 1000 | } |
1013 | 1001 | ||
@@ -1017,10 +1005,10 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) | |||
1017 | */ | 1005 | */ |
1018 | if (port->num_rports > 0) { | 1006 | if (port->num_rports > 0) { |
1019 | rport = bfa_fcs_lport_get_rport_by_pid(port, 0); | 1007 | rport = bfa_fcs_lport_get_rport_by_pid(port, 0); |
1020 | bfa_assert(rport != NULL); | 1008 | WARN_ON(rport == NULL); |
1021 | if (rport) { | 1009 | if (rport) { |
1022 | bfa_trc(port->fcs, rport->pwwn); | 1010 | bfa_trc(port->fcs, rport->pwwn); |
1023 | bfa_fcs_rport_delete(rport); | 1011 | bfa_sm_send_event(rport, RPSM_EVENT_DELETE); |
1024 | } | 1012 | } |
1025 | } | 1013 | } |
1026 | bfa_fcs_rport_create(port, N2N_REMOTE_PID); | 1014 | bfa_fcs_rport_create(port, N2N_REMOTE_PID); |
@@ -1569,6 +1557,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1569 | struct fdmi_attr_s *attr; | 1557 | struct fdmi_attr_s *attr; |
1570 | u8 *curr_ptr; | 1558 | u8 *curr_ptr; |
1571 | u16 len, count; | 1559 | u16 len, count; |
1560 | u16 templen; | ||
1572 | 1561 | ||
1573 | /* | 1562 | /* |
1574 | * get hba attributes | 1563 | * get hba attributes |
@@ -1594,69 +1583,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1594 | */ | 1583 | */ |
1595 | attr = (struct fdmi_attr_s *) curr_ptr; | 1584 | attr = (struct fdmi_attr_s *) curr_ptr; |
1596 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); | 1585 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); |
1597 | attr->len = sizeof(wwn_t); | 1586 | templen = sizeof(wwn_t); |
1598 | memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len); | 1587 | memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen); |
1599 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1588 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1600 | len += attr->len; | 1589 | len += templen; |
1601 | count++; | 1590 | count++; |
1602 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1591 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1603 | sizeof(attr->len)); | 1592 | sizeof(templen)); |
1604 | 1593 | ||
1605 | /* | 1594 | /* |
1606 | * Manufacturer | 1595 | * Manufacturer |
1607 | */ | 1596 | */ |
1608 | attr = (struct fdmi_attr_s *) curr_ptr; | 1597 | attr = (struct fdmi_attr_s *) curr_ptr; |
1609 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); | 1598 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); |
1610 | attr->len = (u16) strlen(fcs_hba_attr->manufacturer); | 1599 | templen = (u16) strlen(fcs_hba_attr->manufacturer); |
1611 | memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); | 1600 | memcpy(attr->value, fcs_hba_attr->manufacturer, templen); |
1612 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1601 | templen = fc_roundup(templen, sizeof(u32)); |
1613 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1602 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1614 | len += attr->len; | 1603 | len += templen; |
1615 | count++; | 1604 | count++; |
1616 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1605 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1617 | sizeof(attr->len)); | 1606 | sizeof(templen)); |
1618 | 1607 | ||
1619 | /* | 1608 | /* |
1620 | * Serial Number | 1609 | * Serial Number |
1621 | */ | 1610 | */ |
1622 | attr = (struct fdmi_attr_s *) curr_ptr; | 1611 | attr = (struct fdmi_attr_s *) curr_ptr; |
1623 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); | 1612 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); |
1624 | attr->len = (u16) strlen(fcs_hba_attr->serial_num); | 1613 | templen = (u16) strlen(fcs_hba_attr->serial_num); |
1625 | memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); | 1614 | memcpy(attr->value, fcs_hba_attr->serial_num, templen); |
1626 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1615 | templen = fc_roundup(templen, sizeof(u32)); |
1627 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1616 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1628 | len += attr->len; | 1617 | len += templen; |
1629 | count++; | 1618 | count++; |
1630 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1619 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1631 | sizeof(attr->len)); | 1620 | sizeof(templen)); |
1632 | 1621 | ||
1633 | /* | 1622 | /* |
1634 | * Model | 1623 | * Model |
1635 | */ | 1624 | */ |
1636 | attr = (struct fdmi_attr_s *) curr_ptr; | 1625 | attr = (struct fdmi_attr_s *) curr_ptr; |
1637 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); | 1626 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); |
1638 | attr->len = (u16) strlen(fcs_hba_attr->model); | 1627 | templen = (u16) strlen(fcs_hba_attr->model); |
1639 | memcpy(attr->value, fcs_hba_attr->model, attr->len); | 1628 | memcpy(attr->value, fcs_hba_attr->model, templen); |
1640 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1629 | templen = fc_roundup(templen, sizeof(u32)); |
1641 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1630 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1642 | len += attr->len; | 1631 | len += templen; |
1643 | count++; | 1632 | count++; |
1644 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1633 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1645 | sizeof(attr->len)); | 1634 | sizeof(templen)); |
1646 | 1635 | ||
1647 | /* | 1636 | /* |
1648 | * Model Desc | 1637 | * Model Desc |
1649 | */ | 1638 | */ |
1650 | attr = (struct fdmi_attr_s *) curr_ptr; | 1639 | attr = (struct fdmi_attr_s *) curr_ptr; |
1651 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); | 1640 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); |
1652 | attr->len = (u16) strlen(fcs_hba_attr->model_desc); | 1641 | templen = (u16) strlen(fcs_hba_attr->model_desc); |
1653 | memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); | 1642 | memcpy(attr->value, fcs_hba_attr->model_desc, templen); |
1654 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1643 | templen = fc_roundup(templen, sizeof(u32)); |
1655 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1644 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1656 | len += attr->len; | 1645 | len += templen; |
1657 | count++; | 1646 | count++; |
1658 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1647 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1659 | sizeof(attr->len)); | 1648 | sizeof(templen)); |
1660 | 1649 | ||
1661 | /* | 1650 | /* |
1662 | * H/W Version | 1651 | * H/W Version |
@@ -1664,14 +1653,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1664 | if (fcs_hba_attr->hw_version[0] != '\0') { | 1653 | if (fcs_hba_attr->hw_version[0] != '\0') { |
1665 | attr = (struct fdmi_attr_s *) curr_ptr; | 1654 | attr = (struct fdmi_attr_s *) curr_ptr; |
1666 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); | 1655 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); |
1667 | attr->len = (u16) strlen(fcs_hba_attr->hw_version); | 1656 | templen = (u16) strlen(fcs_hba_attr->hw_version); |
1668 | memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); | 1657 | memcpy(attr->value, fcs_hba_attr->hw_version, templen); |
1669 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1658 | templen = fc_roundup(templen, sizeof(u32)); |
1670 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1659 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1671 | len += attr->len; | 1660 | len += templen; |
1672 | count++; | 1661 | count++; |
1673 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1662 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1674 | sizeof(attr->len)); | 1663 | sizeof(templen)); |
1675 | } | 1664 | } |
1676 | 1665 | ||
1677 | /* | 1666 | /* |
@@ -1679,14 +1668,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1679 | */ | 1668 | */ |
1680 | attr = (struct fdmi_attr_s *) curr_ptr; | 1669 | attr = (struct fdmi_attr_s *) curr_ptr; |
1681 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); | 1670 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); |
1682 | attr->len = (u16) strlen(fcs_hba_attr->driver_version); | 1671 | templen = (u16) strlen(fcs_hba_attr->driver_version); |
1683 | memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); | 1672 | memcpy(attr->value, fcs_hba_attr->driver_version, templen); |
1684 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1673 | templen = fc_roundup(templen, sizeof(u32)); |
1685 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1674 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1686 | len += attr->len;; | 1675 | len += templen;; |
1687 | count++; | 1676 | count++; |
1688 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1677 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1689 | sizeof(attr->len)); | 1678 | sizeof(templen)); |
1690 | 1679 | ||
1691 | /* | 1680 | /* |
1692 | * Option Rom Version | 1681 | * Option Rom Version |
@@ -1694,14 +1683,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1694 | if (fcs_hba_attr->option_rom_ver[0] != '\0') { | 1683 | if (fcs_hba_attr->option_rom_ver[0] != '\0') { |
1695 | attr = (struct fdmi_attr_s *) curr_ptr; | 1684 | attr = (struct fdmi_attr_s *) curr_ptr; |
1696 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); | 1685 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); |
1697 | attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); | 1686 | templen = (u16) strlen(fcs_hba_attr->option_rom_ver); |
1698 | memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); | 1687 | memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen); |
1699 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1688 | templen = fc_roundup(templen, sizeof(u32)); |
1700 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1689 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1701 | len += attr->len; | 1690 | len += templen; |
1702 | count++; | 1691 | count++; |
1703 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1692 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1704 | sizeof(attr->len)); | 1693 | sizeof(templen)); |
1705 | } | 1694 | } |
1706 | 1695 | ||
1707 | /* | 1696 | /* |
@@ -1709,14 +1698,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1709 | */ | 1698 | */ |
1710 | attr = (struct fdmi_attr_s *) curr_ptr; | 1699 | attr = (struct fdmi_attr_s *) curr_ptr; |
1711 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); | 1700 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); |
1712 | attr->len = (u16) strlen(fcs_hba_attr->driver_version); | 1701 | templen = (u16) strlen(fcs_hba_attr->driver_version); |
1713 | memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); | 1702 | memcpy(attr->value, fcs_hba_attr->driver_version, templen); |
1714 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1703 | templen = fc_roundup(templen, sizeof(u32)); |
1715 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1704 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1716 | len += attr->len; | 1705 | len += templen; |
1717 | count++; | 1706 | count++; |
1718 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1707 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1719 | sizeof(attr->len)); | 1708 | sizeof(templen)); |
1720 | 1709 | ||
1721 | /* | 1710 | /* |
1722 | * OS Name | 1711 | * OS Name |
@@ -1724,14 +1713,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1724 | if (fcs_hba_attr->os_name[0] != '\0') { | 1713 | if (fcs_hba_attr->os_name[0] != '\0') { |
1725 | attr = (struct fdmi_attr_s *) curr_ptr; | 1714 | attr = (struct fdmi_attr_s *) curr_ptr; |
1726 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); | 1715 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); |
1727 | attr->len = (u16) strlen(fcs_hba_attr->os_name); | 1716 | templen = (u16) strlen(fcs_hba_attr->os_name); |
1728 | memcpy(attr->value, fcs_hba_attr->os_name, attr->len); | 1717 | memcpy(attr->value, fcs_hba_attr->os_name, templen); |
1729 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1718 | templen = fc_roundup(templen, sizeof(u32)); |
1730 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1719 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1731 | len += attr->len; | 1720 | len += templen; |
1732 | count++; | 1721 | count++; |
1733 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1722 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1734 | sizeof(attr->len)); | 1723 | sizeof(templen)); |
1735 | } | 1724 | } |
1736 | 1725 | ||
1737 | /* | 1726 | /* |
@@ -1739,12 +1728,12 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) | |||
1739 | */ | 1728 | */ |
1740 | attr = (struct fdmi_attr_s *) curr_ptr; | 1729 | attr = (struct fdmi_attr_s *) curr_ptr; |
1741 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); | 1730 | attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); |
1742 | attr->len = sizeof(fcs_hba_attr->max_ct_pyld); | 1731 | templen = sizeof(fcs_hba_attr->max_ct_pyld); |
1743 | memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); | 1732 | memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen); |
1744 | len += attr->len; | 1733 | len += templen; |
1745 | count++; | 1734 | count++; |
1746 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1735 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1747 | sizeof(attr->len)); | 1736 | sizeof(templen)); |
1748 | 1737 | ||
1749 | /* | 1738 | /* |
1750 | * Update size of payload | 1739 | * Update size of payload |
@@ -1845,6 +1834,7 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
1845 | u8 *curr_ptr; | 1834 | u8 *curr_ptr; |
1846 | u16 len; | 1835 | u16 len; |
1847 | u8 count = 0; | 1836 | u8 count = 0; |
1837 | u16 templen; | ||
1848 | 1838 | ||
1849 | /* | 1839 | /* |
1850 | * get port attributes | 1840 | * get port attributes |
@@ -1863,54 +1853,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
1863 | */ | 1853 | */ |
1864 | attr = (struct fdmi_attr_s *) curr_ptr; | 1854 | attr = (struct fdmi_attr_s *) curr_ptr; |
1865 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); | 1855 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); |
1866 | attr->len = sizeof(fcs_port_attr.supp_fc4_types); | 1856 | templen = sizeof(fcs_port_attr.supp_fc4_types); |
1867 | memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); | 1857 | memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen); |
1868 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1858 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1869 | len += attr->len; | 1859 | len += templen; |
1870 | ++count; | 1860 | ++count; |
1871 | attr->len = | 1861 | attr->len = |
1872 | cpu_to_be16(attr->len + sizeof(attr->type) + | 1862 | cpu_to_be16(templen + sizeof(attr->type) + |
1873 | sizeof(attr->len)); | 1863 | sizeof(templen)); |
1874 | 1864 | ||
1875 | /* | 1865 | /* |
1876 | * Supported Speed | 1866 | * Supported Speed |
1877 | */ | 1867 | */ |
1878 | attr = (struct fdmi_attr_s *) curr_ptr; | 1868 | attr = (struct fdmi_attr_s *) curr_ptr; |
1879 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); | 1869 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); |
1880 | attr->len = sizeof(fcs_port_attr.supp_speed); | 1870 | templen = sizeof(fcs_port_attr.supp_speed); |
1881 | memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); | 1871 | memcpy(attr->value, &fcs_port_attr.supp_speed, templen); |
1882 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1872 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1883 | len += attr->len; | 1873 | len += templen; |
1884 | ++count; | 1874 | ++count; |
1885 | attr->len = | 1875 | attr->len = |
1886 | cpu_to_be16(attr->len + sizeof(attr->type) + | 1876 | cpu_to_be16(templen + sizeof(attr->type) + |
1887 | sizeof(attr->len)); | 1877 | sizeof(templen)); |
1888 | 1878 | ||
1889 | /* | 1879 | /* |
1890 | * current Port Speed | 1880 | * current Port Speed |
1891 | */ | 1881 | */ |
1892 | attr = (struct fdmi_attr_s *) curr_ptr; | 1882 | attr = (struct fdmi_attr_s *) curr_ptr; |
1893 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); | 1883 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); |
1894 | attr->len = sizeof(fcs_port_attr.curr_speed); | 1884 | templen = sizeof(fcs_port_attr.curr_speed); |
1895 | memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); | 1885 | memcpy(attr->value, &fcs_port_attr.curr_speed, templen); |
1896 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1886 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1897 | len += attr->len; | 1887 | len += templen; |
1898 | ++count; | 1888 | ++count; |
1899 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1889 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1900 | sizeof(attr->len)); | 1890 | sizeof(templen)); |
1901 | 1891 | ||
1902 | /* | 1892 | /* |
1903 | * max frame size | 1893 | * max frame size |
1904 | */ | 1894 | */ |
1905 | attr = (struct fdmi_attr_s *) curr_ptr; | 1895 | attr = (struct fdmi_attr_s *) curr_ptr; |
1906 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); | 1896 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); |
1907 | attr->len = sizeof(fcs_port_attr.max_frm_size); | 1897 | templen = sizeof(fcs_port_attr.max_frm_size); |
1908 | memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); | 1898 | memcpy(attr->value, &fcs_port_attr.max_frm_size, templen); |
1909 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1899 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1910 | len += attr->len; | 1900 | len += templen; |
1911 | ++count; | 1901 | ++count; |
1912 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1902 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1913 | sizeof(attr->len)); | 1903 | sizeof(templen)); |
1914 | 1904 | ||
1915 | /* | 1905 | /* |
1916 | * OS Device Name | 1906 | * OS Device Name |
@@ -1918,14 +1908,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
1918 | if (fcs_port_attr.os_device_name[0] != '\0') { | 1908 | if (fcs_port_attr.os_device_name[0] != '\0') { |
1919 | attr = (struct fdmi_attr_s *) curr_ptr; | 1909 | attr = (struct fdmi_attr_s *) curr_ptr; |
1920 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); | 1910 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); |
1921 | attr->len = (u16) strlen(fcs_port_attr.os_device_name); | 1911 | templen = (u16) strlen(fcs_port_attr.os_device_name); |
1922 | memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); | 1912 | memcpy(attr->value, fcs_port_attr.os_device_name, templen); |
1923 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1913 | templen = fc_roundup(templen, sizeof(u32)); |
1924 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1914 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1925 | len += attr->len; | 1915 | len += templen; |
1926 | ++count; | 1916 | ++count; |
1927 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1917 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1928 | sizeof(attr->len)); | 1918 | sizeof(templen)); |
1929 | } | 1919 | } |
1930 | /* | 1920 | /* |
1931 | * Host Name | 1921 | * Host Name |
@@ -1933,14 +1923,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
1933 | if (fcs_port_attr.host_name[0] != '\0') { | 1923 | if (fcs_port_attr.host_name[0] != '\0') { |
1934 | attr = (struct fdmi_attr_s *) curr_ptr; | 1924 | attr = (struct fdmi_attr_s *) curr_ptr; |
1935 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); | 1925 | attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); |
1936 | attr->len = (u16) strlen(fcs_port_attr.host_name); | 1926 | templen = (u16) strlen(fcs_port_attr.host_name); |
1937 | memcpy(attr->value, fcs_port_attr.host_name, attr->len); | 1927 | memcpy(attr->value, fcs_port_attr.host_name, templen); |
1938 | attr->len = fc_roundup(attr->len, sizeof(u32)); | 1928 | templen = fc_roundup(templen, sizeof(u32)); |
1939 | curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; | 1929 | curr_ptr += sizeof(attr->type) + sizeof(templen) + templen; |
1940 | len += attr->len; | 1930 | len += templen; |
1941 | ++count; | 1931 | ++count; |
1942 | attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + | 1932 | attr->len = cpu_to_be16(templen + sizeof(attr->type) + |
1943 | sizeof(attr->len)); | 1933 | sizeof(templen)); |
1944 | } | 1934 | } |
1945 | 1935 | ||
1946 | /* | 1936 | /* |
@@ -2103,7 +2093,7 @@ bfa_fcs_lport_fdmi_timeout(void *arg) | |||
2103 | bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); | 2093 | bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT); |
2104 | } | 2094 | } |
2105 | 2095 | ||
2106 | void | 2096 | static void |
2107 | bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, | 2097 | bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, |
2108 | struct bfa_fcs_fdmi_hba_attr_s *hba_attr) | 2098 | struct bfa_fcs_fdmi_hba_attr_s *hba_attr) |
2109 | { | 2099 | { |
@@ -2147,7 +2137,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, | |||
2147 | hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); | 2137 | hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); |
2148 | } | 2138 | } |
2149 | 2139 | ||
2150 | void | 2140 | static void |
2151 | bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, | 2141 | bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, |
2152 | struct bfa_fcs_fdmi_port_attr_s *port_attr) | 2142 | struct bfa_fcs_fdmi_port_attr_s *port_attr) |
2153 | { | 2143 | { |
@@ -2560,7 +2550,7 @@ bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) | |||
2560 | 2550 | ||
2561 | len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 2551 | len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
2562 | bfa_fcs_lport_get_fcid(port), | 2552 | bfa_fcs_lport_get_fcid(port), |
2563 | bfa_lps_get_peer_nwwn(port->fabric->lps)); | 2553 | port->fabric->lps->pr_nwwn); |
2564 | 2554 | ||
2565 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, | 2555 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, |
2566 | FC_CLASS_3, len, &fchs, | 2556 | FC_CLASS_3, len, &fchs, |
@@ -2760,7 +2750,7 @@ bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) | |||
2760 | 2750 | ||
2761 | len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 2751 | len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
2762 | bfa_fcs_lport_get_fcid(port), | 2752 | bfa_fcs_lport_get_fcid(port), |
2763 | bfa_lps_get_peer_nwwn(port->fabric->lps)); | 2753 | port->fabric->lps->pr_nwwn); |
2764 | 2754 | ||
2765 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, | 2755 | bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, |
2766 | FC_CLASS_3, len, &fchs, | 2756 | FC_CLASS_3, len, &fchs, |
@@ -2836,7 +2826,7 @@ bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced) | |||
2836 | ms->fcxp = fcxp; | 2826 | ms->fcxp = fcxp; |
2837 | 2827 | ||
2838 | len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 2828 | len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
2839 | bfa_os_hton3b(FC_MGMT_SERVER), | 2829 | bfa_hton3b(FC_MGMT_SERVER), |
2840 | bfa_fcs_lport_get_fcid(port), 0, | 2830 | bfa_fcs_lport_get_fcid(port), 0, |
2841 | port->port_cfg.pwwn, port->port_cfg.nwwn, | 2831 | port->port_cfg.pwwn, port->port_cfg.nwwn, |
2842 | bfa_fcport_get_maxfrsize(port->fcs->bfa)); | 2832 | bfa_fcport_get_maxfrsize(port->fcs->bfa)); |
@@ -3593,7 +3583,7 @@ fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); | |||
3593 | ns->fcxp = fcxp; | 3583 | ns->fcxp = fcxp; |
3594 | 3584 | ||
3595 | len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 3585 | len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
3596 | bfa_os_hton3b(FC_NAME_SERVER), | 3586 | bfa_hton3b(FC_NAME_SERVER), |
3597 | bfa_fcs_lport_get_fcid(port), 0, | 3587 | bfa_fcs_lport_get_fcid(port), 0, |
3598 | port->port_cfg.pwwn, port->port_cfg.nwwn, | 3588 | port->port_cfg.pwwn, port->port_cfg.nwwn, |
3599 | bfa_fcport_get_maxfrsize(port->fcs->bfa)); | 3589 | bfa_fcport_get_maxfrsize(port->fcs->bfa)); |
@@ -4150,7 +4140,7 @@ bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port) | |||
4150 | bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); | 4140 | bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY); |
4151 | } | 4141 | } |
4152 | 4142 | ||
4153 | void | 4143 | static void |
4154 | bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) | 4144 | bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) |
4155 | { | 4145 | { |
4156 | 4146 | ||
@@ -4163,7 +4153,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) | |||
4163 | 4153 | ||
4164 | for (ii = 0 ; ii < nwwns; ++ii) { | 4154 | for (ii = 0 ; ii < nwwns; ++ii) { |
4165 | rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); | 4155 | rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]); |
4166 | bfa_assert(rport); | 4156 | WARN_ON(!rport); |
4167 | } | 4157 | } |
4168 | } | 4158 | } |
4169 | 4159 | ||
@@ -4352,8 +4342,8 @@ bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced) | |||
4352 | /* Handle VU registrations for Base port only */ | 4342 | /* Handle VU registrations for Base port only */ |
4353 | if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { | 4343 | if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { |
4354 | len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 4344 | len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
4355 | bfa_lps_is_brcd_fabric(port->fabric->lps), | 4345 | port->fabric->lps->brcd_switch, |
4356 | port->pid, 0); | 4346 | port->pid, 0); |
4357 | } else { | 4347 | } else { |
4358 | len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), | 4348 | len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), |
4359 | BFA_FALSE, | 4349 | BFA_FALSE, |
@@ -4626,7 +4616,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, | |||
4626 | 4616 | ||
4627 | 4617 | ||
4628 | default: | 4618 | default: |
4629 | bfa_assert(0); | 4619 | WARN_ON(1); |
4630 | nsquery = BFA_TRUE; | 4620 | nsquery = BFA_TRUE; |
4631 | } | 4621 | } |
4632 | } | 4622 | } |
@@ -4672,7 +4662,7 @@ bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index, | |||
4672 | 4662 | ||
4673 | while ((qe != qh) && (i < nrports)) { | 4663 | while ((qe != qh) && (i < nrports)) { |
4674 | rport = (struct bfa_fcs_rport_s *) qe; | 4664 | rport = (struct bfa_fcs_rport_s *) qe; |
4675 | if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { | 4665 | if (bfa_ntoh3b(rport->pid) > 0xFFF000) { |
4676 | qe = bfa_q_next(qe); | 4666 | qe = bfa_q_next(qe); |
4677 | bfa_trc(fcs, (u32) rport->pwwn); | 4667 | bfa_trc(fcs, (u32) rport->pwwn); |
4678 | bfa_trc(fcs, rport->pid); | 4668 | bfa_trc(fcs, rport->pid); |
@@ -4720,7 +4710,7 @@ bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port, | |||
4720 | 4710 | ||
4721 | while ((qe != qh) && (i < *nrports)) { | 4711 | while ((qe != qh) && (i < *nrports)) { |
4722 | rport = (struct bfa_fcs_rport_s *) qe; | 4712 | rport = (struct bfa_fcs_rport_s *) qe; |
4723 | if (bfa_os_ntoh3b(rport->pid) > 0xFFF000) { | 4713 | if (bfa_ntoh3b(rport->pid) > 0xFFF000) { |
4724 | qe = bfa_q_next(qe); | 4714 | qe = bfa_q_next(qe); |
4725 | bfa_trc(fcs, (u32) rport->pwwn); | 4715 | bfa_trc(fcs, (u32) rport->pwwn); |
4726 | bfa_trc(fcs, rport->pid); | 4716 | bfa_trc(fcs, rport->pid); |
@@ -4771,7 +4761,7 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port) | |||
4771 | 4761 | ||
4772 | while (qe != qh) { | 4762 | while (qe != qh) { |
4773 | rport = (struct bfa_fcs_rport_s *) qe; | 4763 | rport = (struct bfa_fcs_rport_s *) qe; |
4774 | if ((bfa_os_ntoh3b(rport->pid) > 0xFFF000) || | 4764 | if ((bfa_ntoh3b(rport->pid) > 0xFFF000) || |
4775 | (bfa_fcs_rport_get_state(rport) == | 4765 | (bfa_fcs_rport_get_state(rport) == |
4776 | BFA_RPORT_OFFLINE)) { | 4766 | BFA_RPORT_OFFLINE)) { |
4777 | qe = bfa_q_next(qe); | 4767 | qe = bfa_q_next(qe); |
@@ -4807,7 +4797,7 @@ bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn) | |||
4807 | struct bfa_fcs_vport_s *vport; | 4797 | struct bfa_fcs_vport_s *vport; |
4808 | bfa_fcs_vf_t *vf; | 4798 | bfa_fcs_vf_t *vf; |
4809 | 4799 | ||
4810 | bfa_assert(fcs != NULL); | 4800 | WARN_ON(fcs == NULL); |
4811 | 4801 | ||
4812 | vf = bfa_fcs_vf_lookup(fcs, vf_id); | 4802 | vf = bfa_fcs_vf_lookup(fcs, vf_id); |
4813 | if (vf == NULL) { | 4803 | if (vf == NULL) { |
@@ -4853,7 +4843,7 @@ bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port, | |||
4853 | port_info->max_vports_supp = | 4843 | port_info->max_vports_supp = |
4854 | bfa_lps_get_max_vport(port->fcs->bfa); | 4844 | bfa_lps_get_max_vport(port->fcs->bfa); |
4855 | port_info->num_vports_inuse = | 4845 | port_info->num_vports_inuse = |
4856 | bfa_fcs_fabric_vport_count(port->fabric); | 4846 | port->fabric->num_vports; |
4857 | port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; | 4847 | port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP; |
4858 | port_info->num_rports_inuse = port->num_rports; | 4848 | port_info->num_rports_inuse = port->num_rports; |
4859 | } else { | 4849 | } else { |
@@ -4997,7 +4987,8 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, | |||
4997 | 4987 | ||
4998 | switch (event) { | 4988 | switch (event) { |
4999 | case BFA_FCS_VPORT_SM_START: | 4989 | case BFA_FCS_VPORT_SM_START: |
5000 | if (bfa_fcs_fabric_is_online(__vport_fabric(vport)) | 4990 | if (bfa_sm_cmp_state(__vport_fabric(vport), |
4991 | bfa_fcs_fabric_sm_online) | ||
5001 | && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { | 4992 | && bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) { |
5002 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); | 4993 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); |
5003 | bfa_fcs_vport_do_fdisc(vport); | 4994 | bfa_fcs_vport_do_fdisc(vport); |
@@ -5080,13 +5071,13 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, | |||
5080 | switch (event) { | 5071 | switch (event) { |
5081 | case BFA_FCS_VPORT_SM_DELETE: | 5072 | case BFA_FCS_VPORT_SM_DELETE: |
5082 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); | 5073 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup); |
5083 | bfa_lps_discard(vport->lps); | 5074 | bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); |
5084 | bfa_fcs_lport_delete(&vport->lport); | 5075 | bfa_fcs_lport_delete(&vport->lport); |
5085 | break; | 5076 | break; |
5086 | 5077 | ||
5087 | case BFA_FCS_VPORT_SM_OFFLINE: | 5078 | case BFA_FCS_VPORT_SM_OFFLINE: |
5088 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); | 5079 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); |
5089 | bfa_lps_discard(vport->lps); | 5080 | bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); |
5090 | break; | 5081 | break; |
5091 | 5082 | ||
5092 | case BFA_FCS_VPORT_SM_RSP_OK: | 5083 | case BFA_FCS_VPORT_SM_RSP_OK: |
@@ -5166,7 +5157,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, | |||
5166 | 5157 | ||
5167 | case BFA_FCS_VPORT_SM_OFFLINE: | 5158 | case BFA_FCS_VPORT_SM_OFFLINE: |
5168 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); | 5159 | bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline); |
5169 | bfa_lps_discard(vport->lps); | 5160 | bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); |
5170 | bfa_fcs_lport_offline(&vport->lport); | 5161 | bfa_fcs_lport_offline(&vport->lport); |
5171 | break; | 5162 | break; |
5172 | 5163 | ||
@@ -5266,7 +5257,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, | |||
5266 | 5257 | ||
5267 | switch (event) { | 5258 | switch (event) { |
5268 | case BFA_FCS_VPORT_SM_OFFLINE: | 5259 | case BFA_FCS_VPORT_SM_OFFLINE: |
5269 | bfa_lps_discard(vport->lps); | 5260 | bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); |
5270 | /* | 5261 | /* |
5271 | * !!! fall through !!! | 5262 | * !!! fall through !!! |
5272 | */ | 5263 | */ |
@@ -5305,14 +5296,14 @@ bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport) | |||
5305 | static void | 5296 | static void |
5306 | bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) | 5297 | bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) |
5307 | { | 5298 | { |
5308 | u8 lsrjt_rsn = bfa_lps_get_lsrjt_rsn(vport->lps); | 5299 | u8 lsrjt_rsn = vport->lps->lsrjt_rsn; |
5309 | u8 lsrjt_expl = bfa_lps_get_lsrjt_expl(vport->lps); | 5300 | u8 lsrjt_expl = vport->lps->lsrjt_expl; |
5310 | 5301 | ||
5311 | bfa_trc(__vport_fcs(vport), lsrjt_rsn); | 5302 | bfa_trc(__vport_fcs(vport), lsrjt_rsn); |
5312 | bfa_trc(__vport_fcs(vport), lsrjt_expl); | 5303 | bfa_trc(__vport_fcs(vport), lsrjt_expl); |
5313 | 5304 | ||
5314 | /* For certain reason codes, we don't want to retry. */ | 5305 | /* For certain reason codes, we don't want to retry. */ |
5315 | switch (bfa_lps_get_lsrjt_expl(vport->lps)) { | 5306 | switch (vport->lps->lsrjt_expl) { |
5316 | case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ | 5307 | case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */ |
5317 | case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ | 5308 | case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */ |
5318 | if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) | 5309 | if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES) |
@@ -5476,7 +5467,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, | |||
5476 | if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) | 5467 | if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL) |
5477 | return BFA_STATUS_VPORT_EXISTS; | 5468 | return BFA_STATUS_VPORT_EXISTS; |
5478 | 5469 | ||
5479 | if (bfa_fcs_fabric_vport_count(&fcs->fabric) == | 5470 | if (fcs->fabric.num_vports == |
5480 | bfa_lps_get_max_vport(fcs->bfa)) | 5471 | bfa_lps_get_max_vport(fcs->bfa)) |
5481 | return BFA_STATUS_VPORT_MAX; | 5472 | return BFA_STATUS_VPORT_MAX; |
5482 | 5473 | ||
@@ -5618,33 +5609,6 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, | |||
5618 | attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); | 5609 | attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); |
5619 | } | 5610 | } |
5620 | 5611 | ||
5621 | /* | ||
5622 | * Use this function to get vport's statistics. | ||
5623 | * | ||
5624 | * param[in] vport pointer to bfa_fcs_vport_t. | ||
5625 | * param[out] stats pointer to return vport statistics in | ||
5626 | * | ||
5627 | * return None | ||
5628 | */ | ||
5629 | void | ||
5630 | bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, | ||
5631 | struct bfa_vport_stats_s *stats) | ||
5632 | { | ||
5633 | *stats = vport->vport_stats; | ||
5634 | } | ||
5635 | |||
5636 | /* | ||
5637 | * Use this function to clear vport's statistics. | ||
5638 | * | ||
5639 | * param[in] vport pointer to bfa_fcs_vport_t. | ||
5640 | * | ||
5641 | * return None | ||
5642 | */ | ||
5643 | void | ||
5644 | bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport) | ||
5645 | { | ||
5646 | memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); | ||
5647 | } | ||
5648 | 5612 | ||
5649 | /* | 5613 | /* |
5650 | * Lookup a virtual port. Excludes base port from lookup. | 5614 | * Lookup a virtual port. Excludes base port from lookup. |
@@ -5684,7 +5648,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) | |||
5684 | /* | 5648 | /* |
5685 | * Initialiaze the V-Port fields | 5649 | * Initialiaze the V-Port fields |
5686 | */ | 5650 | */ |
5687 | __vport_fcid(vport) = bfa_lps_get_pid(vport->lps); | 5651 | __vport_fcid(vport) = vport->lps->lp_pid; |
5688 | vport->vport_stats.fdisc_accepts++; | 5652 | vport->vport_stats.fdisc_accepts++; |
5689 | bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); | 5653 | bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); |
5690 | break; | 5654 | break; |
@@ -5697,7 +5661,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) | |||
5697 | break; | 5661 | break; |
5698 | 5662 | ||
5699 | case BFA_STATUS_EPROTOCOL: | 5663 | case BFA_STATUS_EPROTOCOL: |
5700 | switch (bfa_lps_get_extstatus(vport->lps)) { | 5664 | switch (vport->lps->ext_status) { |
5701 | case BFA_EPROTO_BAD_ACCEPT: | 5665 | case BFA_EPROTO_BAD_ACCEPT: |
5702 | vport->vport_stats.fdisc_acc_bad++; | 5666 | vport->vport_stats.fdisc_acc_bad++; |
5703 | break; | 5667 | break; |
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index cf4a6e73e60d..caaee6f06937 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c | |||
@@ -19,9 +19,9 @@ | |||
19 | * rport.c Remote port implementation. | 19 | * rport.c Remote port implementation. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include "bfad_drv.h" | ||
22 | #include "bfa_fcs.h" | 23 | #include "bfa_fcs.h" |
23 | #include "bfa_fcbuild.h" | 24 | #include "bfa_fcbuild.h" |
24 | #include "bfad_drv.h" | ||
25 | 25 | ||
26 | BFA_TRC_FILE(FCS, RPORT); | 26 | BFA_TRC_FILE(FCS, RPORT); |
27 | 27 | ||
@@ -75,30 +75,6 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, | |||
75 | static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, | 75 | static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, |
76 | struct fchs_s *rx_fchs, u16 len); | 76 | struct fchs_s *rx_fchs, u16 len); |
77 | static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); | 77 | static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); |
78 | /* | ||
79 | * fcs_rport_sm FCS rport state machine events | ||
80 | */ | ||
81 | |||
82 | enum rport_event { | ||
83 | RPSM_EVENT_PLOGI_SEND = 1, /* new rport; start with PLOGI */ | ||
84 | RPSM_EVENT_PLOGI_RCVD = 2, /* Inbound PLOGI from remote port */ | ||
85 | RPSM_EVENT_PLOGI_COMP = 3, /* PLOGI completed to rport */ | ||
86 | RPSM_EVENT_LOGO_RCVD = 4, /* LOGO from remote device */ | ||
87 | RPSM_EVENT_LOGO_IMP = 5, /* implicit logo for SLER */ | ||
88 | RPSM_EVENT_FCXP_SENT = 6, /* Frame from has been sent */ | ||
89 | RPSM_EVENT_DELETE = 7, /* RPORT delete request */ | ||
90 | RPSM_EVENT_SCN = 8, /* state change notification */ | ||
91 | RPSM_EVENT_ACCEPTED = 9, /* Good response from remote device */ | ||
92 | RPSM_EVENT_FAILED = 10, /* Request to rport failed. */ | ||
93 | RPSM_EVENT_TIMEOUT = 11, /* Rport SM timeout event */ | ||
94 | RPSM_EVENT_HCB_ONLINE = 12, /* BFA rport online callback */ | ||
95 | RPSM_EVENT_HCB_OFFLINE = 13, /* BFA rport offline callback */ | ||
96 | RPSM_EVENT_FC4_OFFLINE = 14, /* FC-4 offline complete */ | ||
97 | RPSM_EVENT_ADDRESS_CHANGE = 15, /* Rport's PID has changed */ | ||
98 | RPSM_EVENT_ADDRESS_DISC = 16, /* Need to Discover rport's PID */ | ||
99 | RPSM_EVENT_PRLO_RCVD = 17, /* PRLO from remote device */ | ||
100 | RPSM_EVENT_PLOGI_RETRY = 18, /* Retry PLOGI continously */ | ||
101 | }; | ||
102 | 78 | ||
103 | static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, | 79 | static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, |
104 | enum rport_event event); | 80 | enum rport_event event); |
@@ -498,24 +474,24 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, | |||
498 | 474 | ||
499 | case RPSM_EVENT_LOGO_RCVD: | 475 | case RPSM_EVENT_LOGO_RCVD: |
500 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); | 476 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); |
501 | bfa_rport_offline(rport->bfa_rport); | 477 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
502 | break; | 478 | break; |
503 | 479 | ||
504 | case RPSM_EVENT_LOGO_IMP: | 480 | case RPSM_EVENT_LOGO_IMP: |
505 | case RPSM_EVENT_ADDRESS_CHANGE: | 481 | case RPSM_EVENT_ADDRESS_CHANGE: |
506 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); | 482 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); |
507 | bfa_rport_offline(rport->bfa_rport); | 483 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
508 | break; | 484 | break; |
509 | 485 | ||
510 | case RPSM_EVENT_PLOGI_RCVD: | 486 | case RPSM_EVENT_PLOGI_RCVD: |
511 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); | 487 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending); |
512 | bfa_rport_offline(rport->bfa_rport); | 488 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
513 | bfa_fcs_rport_send_plogiacc(rport, NULL); | 489 | bfa_fcs_rport_send_plogiacc(rport, NULL); |
514 | break; | 490 | break; |
515 | 491 | ||
516 | case RPSM_EVENT_DELETE: | 492 | case RPSM_EVENT_DELETE: |
517 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); | 493 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); |
518 | bfa_rport_offline(rport->bfa_rport); | 494 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
519 | break; | 495 | break; |
520 | 496 | ||
521 | case RPSM_EVENT_SCN: | 497 | case RPSM_EVENT_SCN: |
@@ -824,7 +800,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, | |||
824 | switch (event) { | 800 | switch (event) { |
825 | case RPSM_EVENT_FC4_OFFLINE: | 801 | case RPSM_EVENT_FC4_OFFLINE: |
826 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); | 802 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv); |
827 | bfa_rport_offline(rport->bfa_rport); | 803 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
828 | break; | 804 | break; |
829 | 805 | ||
830 | case RPSM_EVENT_DELETE: | 806 | case RPSM_EVENT_DELETE: |
@@ -856,7 +832,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, | |||
856 | switch (event) { | 832 | switch (event) { |
857 | case RPSM_EVENT_FC4_OFFLINE: | 833 | case RPSM_EVENT_FC4_OFFLINE: |
858 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); | 834 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend); |
859 | bfa_rport_offline(rport->bfa_rport); | 835 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
860 | break; | 836 | break; |
861 | 837 | ||
862 | default: | 838 | default: |
@@ -878,7 +854,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, | |||
878 | switch (event) { | 854 | switch (event) { |
879 | case RPSM_EVENT_FC4_OFFLINE: | 855 | case RPSM_EVENT_FC4_OFFLINE: |
880 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); | 856 | bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline); |
881 | bfa_rport_offline(rport->bfa_rport); | 857 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE); |
882 | break; | 858 | break; |
883 | 859 | ||
884 | case RPSM_EVENT_SCN: | 860 | case RPSM_EVENT_SCN: |
@@ -1459,7 +1435,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, | |||
1459 | twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; | 1435 | twin->stats.plogi_rcvd += rport->stats.plogi_rcvd; |
1460 | twin->stats.plogi_accs++; | 1436 | twin->stats.plogi_accs++; |
1461 | 1437 | ||
1462 | bfa_fcs_rport_delete(rport); | 1438 | bfa_sm_send_event(rport, RPSM_EVENT_DELETE); |
1463 | 1439 | ||
1464 | bfa_fcs_rport_update(twin, plogi_rsp); | 1440 | bfa_fcs_rport_update(twin, plogi_rsp); |
1465 | twin->pid = rsp_fchs->s_id; | 1441 | twin->pid = rsp_fchs->s_id; |
@@ -1992,13 +1968,14 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) | |||
1992 | /* | 1968 | /* |
1993 | * allocate FC-4s | 1969 | * allocate FC-4s |
1994 | */ | 1970 | */ |
1995 | bfa_assert(bfa_fcs_lport_is_initiator(port)); | 1971 | WARN_ON(!bfa_fcs_lport_is_initiator(port)); |
1996 | 1972 | ||
1997 | if (bfa_fcs_lport_is_initiator(port)) { | 1973 | if (bfa_fcs_lport_is_initiator(port)) { |
1998 | rport->itnim = bfa_fcs_itnim_create(rport); | 1974 | rport->itnim = bfa_fcs_itnim_create(rport); |
1999 | if (!rport->itnim) { | 1975 | if (!rport->itnim) { |
2000 | bfa_trc(fcs, rpid); | 1976 | bfa_trc(fcs, rpid); |
2001 | bfa_rport_delete(rport->bfa_rport); | 1977 | bfa_sm_send_event(rport->bfa_rport, |
1978 | BFA_RPORT_SM_DELETE); | ||
2002 | kfree(rport_drv); | 1979 | kfree(rport_drv); |
2003 | return NULL; | 1980 | return NULL; |
2004 | } | 1981 | } |
@@ -2032,7 +2009,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) | |||
2032 | bfa_fcs_rpf_rport_offline(rport); | 2009 | bfa_fcs_rpf_rport_offline(rport); |
2033 | } | 2010 | } |
2034 | 2011 | ||
2035 | bfa_rport_delete(rport->bfa_rport); | 2012 | bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE); |
2036 | bfa_fcs_lport_del_rport(port, rport); | 2013 | bfa_fcs_lport_del_rport(port, rport); |
2037 | kfree(rport->rp_drv); | 2014 | kfree(rport->rp_drv); |
2038 | } | 2015 | } |
@@ -2307,40 +2284,8 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, | |||
2307 | bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); | 2284 | bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); |
2308 | } | 2285 | } |
2309 | 2286 | ||
2310 | /* | ||
2311 | * Called by bport/vport to delete a remote port instance. | ||
2312 | * | ||
2313 | * Rport delete is called under the following conditions: | ||
2314 | * - vport is deleted | ||
2315 | * - vf is deleted | ||
2316 | * - explicit request from OS to delete rport | ||
2317 | */ | ||
2318 | void | ||
2319 | bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport) | ||
2320 | { | ||
2321 | bfa_sm_send_event(rport, RPSM_EVENT_DELETE); | ||
2322 | } | ||
2323 | 2287 | ||
2324 | /* | 2288 | /* |
2325 | * Called by bport/vport to when a target goes offline. | ||
2326 | * | ||
2327 | */ | ||
2328 | void | ||
2329 | bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport) | ||
2330 | { | ||
2331 | bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); | ||
2332 | } | ||
2333 | |||
2334 | /* | ||
2335 | * Called by bport in n2n when a target (attached port) becomes online. | ||
2336 | * | ||
2337 | */ | ||
2338 | void | ||
2339 | bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport) | ||
2340 | { | ||
2341 | bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); | ||
2342 | } | ||
2343 | /* | ||
2344 | * Called by bport/vport to notify SCN for the remote port | 2289 | * Called by bport/vport to notify SCN for the remote port |
2345 | */ | 2290 | */ |
2346 | void | 2291 | void |
@@ -2350,23 +2295,6 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) | |||
2350 | bfa_sm_send_event(rport, RPSM_EVENT_SCN); | 2295 | bfa_sm_send_event(rport, RPSM_EVENT_SCN); |
2351 | } | 2296 | } |
2352 | 2297 | ||
2353 | /* | ||
2354 | * Called by fcpim to notify that the ITN cleanup is done. | ||
2355 | */ | ||
2356 | void | ||
2357 | bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport) | ||
2358 | { | ||
2359 | bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); | ||
2360 | } | ||
2361 | |||
2362 | /* | ||
2363 | * Called by fcptm to notify that the ITN cleanup is done. | ||
2364 | */ | ||
2365 | void | ||
2366 | bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport) | ||
2367 | { | ||
2368 | bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); | ||
2369 | } | ||
2370 | 2298 | ||
2371 | /* | 2299 | /* |
2372 | * brief | 2300 | * brief |
@@ -2465,15 +2393,6 @@ bfa_cb_rport_qos_scn_prio(void *cbarg, | |||
2465 | * Called to process any unsolicted frames from this remote port | 2393 | * Called to process any unsolicted frames from this remote port |
2466 | */ | 2394 | */ |
2467 | void | 2395 | void |
2468 | bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport) | ||
2469 | { | ||
2470 | bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); | ||
2471 | } | ||
2472 | |||
2473 | /* | ||
2474 | * Called to process any unsolicted frames from this remote port | ||
2475 | */ | ||
2476 | void | ||
2477 | bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, | 2396 | bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, |
2478 | struct fchs_s *fchs, u16 len) | 2397 | struct fchs_s *fchs, u16 len) |
2479 | { | 2398 | { |
@@ -2586,6 +2505,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) | |||
2586 | return bfa_sm_to_state(rport_sm_table, rport->sm); | 2505 | return bfa_sm_to_state(rport_sm_table, rport->sm); |
2587 | } | 2506 | } |
2588 | 2507 | ||
2508 | |||
2589 | /* | 2509 | /* |
2590 | * brief | 2510 | * brief |
2591 | * Called by the Driver to set rport delete/ageout timeout | 2511 | * Called by the Driver to set rport delete/ageout timeout |
@@ -2602,7 +2522,7 @@ bfa_fcs_rport_set_del_timeout(u8 rport_tmo) | |||
2602 | bfa_fcs_rport_del_timeout = rport_tmo * 1000; | 2522 | bfa_fcs_rport_del_timeout = rport_tmo * 1000; |
2603 | } | 2523 | } |
2604 | void | 2524 | void |
2605 | bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id) | 2525 | bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id) |
2606 | { | 2526 | { |
2607 | bfa_trc(rport->fcs, rport->pid); | 2527 | bfa_trc(rport->fcs, rport->pid); |
2608 | 2528 | ||
@@ -2621,106 +2541,6 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id) | |||
2621 | * fcs_rport_api FCS rport API. | 2541 | * fcs_rport_api FCS rport API. |
2622 | */ | 2542 | */ |
2623 | 2543 | ||
2624 | /* | ||
2625 | * Direct API to add a target by port wwn. This interface is used, for | ||
2626 | * example, by bios when target pwwn is known from boot lun configuration. | ||
2627 | */ | ||
2628 | bfa_status_t | ||
2629 | bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, | ||
2630 | struct bfa_fcs_rport_s *rport, struct bfad_rport_s *rport_drv) | ||
2631 | { | ||
2632 | bfa_trc(port->fcs, *pwwn); | ||
2633 | |||
2634 | return BFA_STATUS_OK; | ||
2635 | } | ||
2636 | |||
2637 | /* | ||
2638 | * Direct API to remove a target and its associated resources. This | ||
2639 | * interface is used, for example, by driver to remove target | ||
2640 | * ports from the target list for a VM. | ||
2641 | */ | ||
2642 | bfa_status_t | ||
2643 | bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in) | ||
2644 | { | ||
2645 | |||
2646 | struct bfa_fcs_rport_s *rport; | ||
2647 | |||
2648 | bfa_trc(rport_in->fcs, rport_in->pwwn); | ||
2649 | |||
2650 | rport = bfa_fcs_lport_get_rport_by_pwwn(rport_in->port, rport_in->pwwn); | ||
2651 | if (rport == NULL) { | ||
2652 | /* | ||
2653 | * TBD Error handling | ||
2654 | */ | ||
2655 | bfa_trc(rport_in->fcs, rport_in->pid); | ||
2656 | return BFA_STATUS_UNKNOWN_RWWN; | ||
2657 | } | ||
2658 | |||
2659 | /* | ||
2660 | * TBD if this remote port is online, send a logo | ||
2661 | */ | ||
2662 | return BFA_STATUS_OK; | ||
2663 | |||
2664 | } | ||
2665 | |||
2666 | /* | ||
2667 | * Remote device status for display/debug. | ||
2668 | */ | ||
2669 | void | ||
2670 | bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, | ||
2671 | struct bfa_rport_attr_s *rport_attr) | ||
2672 | { | ||
2673 | struct bfa_rport_qos_attr_s qos_attr; | ||
2674 | bfa_fcs_lport_t *port = rport->port; | ||
2675 | bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; | ||
2676 | |||
2677 | memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); | ||
2678 | |||
2679 | rport_attr->pid = rport->pid; | ||
2680 | rport_attr->pwwn = rport->pwwn; | ||
2681 | rport_attr->nwwn = rport->nwwn; | ||
2682 | rport_attr->cos_supported = rport->fc_cos; | ||
2683 | rport_attr->df_sz = rport->maxfrsize; | ||
2684 | rport_attr->state = bfa_fcs_rport_get_state(rport); | ||
2685 | rport_attr->fc_cos = rport->fc_cos; | ||
2686 | rport_attr->cisc = rport->cisc; | ||
2687 | rport_attr->scsi_function = rport->scsi_function; | ||
2688 | rport_attr->curr_speed = rport->rpf.rpsc_speed; | ||
2689 | rport_attr->assigned_speed = rport->rpf.assigned_speed; | ||
2690 | |||
2691 | bfa_rport_get_qos_attr(rport->bfa_rport, &qos_attr); | ||
2692 | rport_attr->qos_attr = qos_attr; | ||
2693 | |||
2694 | rport_attr->trl_enforced = BFA_FALSE; | ||
2695 | if (bfa_fcport_is_ratelim(port->fcs->bfa)) { | ||
2696 | if (rport_speed == BFA_PORT_SPEED_UNKNOWN) { | ||
2697 | /* Use default ratelim speed setting */ | ||
2698 | rport_speed = | ||
2699 | bfa_fcport_get_ratelim_speed(rport->fcs->bfa); | ||
2700 | } | ||
2701 | |||
2702 | if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) | ||
2703 | rport_attr->trl_enforced = BFA_TRUE; | ||
2704 | } | ||
2705 | } | ||
2706 | |||
2707 | /* | ||
2708 | * Per remote device statistics. | ||
2709 | */ | ||
2710 | void | ||
2711 | bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, | ||
2712 | struct bfa_rport_stats_s *stats) | ||
2713 | { | ||
2714 | *stats = rport->stats; | ||
2715 | } | ||
2716 | |||
2717 | void | ||
2718 | bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport) | ||
2719 | { | ||
2720 | memset((char *)&rport->stats, 0, | ||
2721 | sizeof(struct bfa_rport_stats_s)); | ||
2722 | } | ||
2723 | |||
2724 | struct bfa_fcs_rport_s * | 2544 | struct bfa_fcs_rport_s * |
2725 | bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) | 2545 | bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) |
2726 | { | 2546 | { |
@@ -2752,22 +2572,6 @@ bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn) | |||
2752 | } | 2572 | } |
2753 | 2573 | ||
2754 | /* | 2574 | /* |
2755 | * This API is to set the Rport's speed. Should be used when RPSC is not | ||
2756 | * supported by the rport. | ||
2757 | */ | ||
2758 | void | ||
2759 | bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed) | ||
2760 | { | ||
2761 | rport->rpf.assigned_speed = speed; | ||
2762 | |||
2763 | /* Set this speed in f/w only if the RPSC speed is not available */ | ||
2764 | if (rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN) | ||
2765 | bfa_rport_speed(rport->bfa_rport, speed); | ||
2766 | } | ||
2767 | |||
2768 | |||
2769 | |||
2770 | /* | ||
2771 | * Remote port features (RPF) implementation. | 2575 | * Remote port features (RPF) implementation. |
2772 | */ | 2576 | */ |
2773 | 2577 | ||
@@ -2827,7 +2631,7 @@ bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) | |||
2827 | case RPFSM_EVENT_RPORT_ONLINE: | 2631 | case RPFSM_EVENT_RPORT_ONLINE: |
2828 | /* Send RPSC2 to a Brocade fabric only. */ | 2632 | /* Send RPSC2 to a Brocade fabric only. */ |
2829 | if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && | 2633 | if ((!BFA_FCS_PID_IS_WKA(rport->pid)) && |
2830 | ((bfa_lps_is_brcd_fabric(rport->port->fabric->lps)) || | 2634 | ((rport->port->fabric->lps->brcd_switch) || |
2831 | (bfa_fcs_fabric_get_switch_oui(fabric) == | 2635 | (bfa_fcs_fabric_get_switch_oui(fabric) == |
2832 | BFA_FCS_BRCD_SWITCH_OUI))) { | 2636 | BFA_FCS_BRCD_SWITCH_OUI))) { |
2833 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); | 2637 | bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending); |
@@ -3093,7 +2897,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, | |||
3093 | num_ents = be16_to_cpu(rpsc2_acc->num_pids); | 2897 | num_ents = be16_to_cpu(rpsc2_acc->num_pids); |
3094 | bfa_trc(rport->fcs, num_ents); | 2898 | bfa_trc(rport->fcs, num_ents); |
3095 | if (num_ents > 0) { | 2899 | if (num_ents > 0) { |
3096 | bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); | 2900 | WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid); |
3097 | bfa_trc(rport->fcs, | 2901 | bfa_trc(rport->fcs, |
3098 | be16_to_cpu(rpsc2_acc->port_info[0].pid)); | 2902 | be16_to_cpu(rpsc2_acc->port_info[0].pid)); |
3099 | bfa_trc(rport->fcs, | 2903 | bfa_trc(rport->fcs, |
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index d8464ae60070..977e681ec803 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_modules.h" | 19 | #include "bfa_modules.h" |
19 | #include "bfi_cbreg.h" | 20 | #include "bfi_cbreg.h" |
20 | 21 | ||
@@ -110,7 +111,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) | |||
110 | { | 111 | { |
111 | int i; | 112 | int i; |
112 | 113 | ||
113 | bfa_assert((nvecs == 1) || (nvecs == __HFN_NUMINTS)); | 114 | WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); |
114 | 115 | ||
115 | bfa->msix.nvecs = nvecs; | 116 | bfa->msix.nvecs = nvecs; |
116 | if (nvecs == 1) { | 117 | if (nvecs == 1) { |
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index b0efbc713ffe..21018d98a07b 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_modules.h" | 19 | #include "bfa_modules.h" |
19 | #include "bfi_ctreg.h" | 20 | #include "bfi_ctreg.h" |
20 | 21 | ||
@@ -116,7 +117,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, | |||
116 | void | 117 | void |
117 | bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) | 118 | bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) |
118 | { | 119 | { |
119 | bfa_assert((nvecs == 1) || (nvecs == BFA_MSIX_CT_MAX)); | 120 | WARN_ON((nvecs != 1) && (nvecs != BFA_MSIX_CT_MAX)); |
120 | bfa_trc(bfa, nvecs); | 121 | bfa_trc(bfa, nvecs); |
121 | 122 | ||
122 | bfa->msix.nvecs = nvecs; | 123 | bfa->msix.nvecs = nvecs; |
@@ -143,7 +144,7 @@ bfa_hwct_msix_install(struct bfa_s *bfa) | |||
143 | for (; i <= BFA_MSIX_RME_Q3; i++) | 144 | for (; i <= BFA_MSIX_RME_Q3; i++) |
144 | bfa->msix.handler[i] = bfa_msix_rspq; | 145 | bfa->msix.handler[i] = bfa_msix_rspq; |
145 | 146 | ||
146 | bfa_assert(i == BFA_MSIX_LPU_ERR); | 147 | WARN_ON(i != BFA_MSIX_LPU_ERR); |
147 | bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err; | 148 | bfa->msix.handler[BFA_MSIX_LPU_ERR] = bfa_msix_lpu_err; |
148 | } | 149 | } |
149 | 150 | ||
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 9f4aa391ea9d..c1f72c49196f 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c | |||
@@ -15,11 +15,11 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_ioc.h" | 19 | #include "bfa_ioc.h" |
19 | #include "bfi_ctreg.h" | 20 | #include "bfi_ctreg.h" |
20 | #include "bfa_defs.h" | 21 | #include "bfa_defs.h" |
21 | #include "bfa_defs_svc.h" | 22 | #include "bfa_defs_svc.h" |
22 | #include "bfad_drv.h" | ||
23 | 23 | ||
24 | BFA_TRC_FILE(CNA, IOC); | 24 | BFA_TRC_FILE(CNA, IOC); |
25 | 25 | ||
@@ -29,7 +29,7 @@ BFA_TRC_FILE(CNA, IOC); | |||
29 | #define BFA_IOC_TOV 3000 /* msecs */ | 29 | #define BFA_IOC_TOV 3000 /* msecs */ |
30 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ | 30 | #define BFA_IOC_HWSEM_TOV 500 /* msecs */ |
31 | #define BFA_IOC_HB_TOV 500 /* msecs */ | 31 | #define BFA_IOC_HB_TOV 500 /* msecs */ |
32 | #define BFA_IOC_HWINIT_MAX 2 | 32 | #define BFA_IOC_HWINIT_MAX 5 |
33 | #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV | 33 | #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV |
34 | 34 | ||
35 | #define bfa_ioc_timer_start(__ioc) \ | 35 | #define bfa_ioc_timer_start(__ioc) \ |
@@ -42,11 +42,6 @@ BFA_TRC_FILE(CNA, IOC); | |||
42 | bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) | 42 | bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV) |
43 | #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) | 43 | #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer) |
44 | 44 | ||
45 | #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) | ||
46 | #define BFA_DBG_FWTRC_LEN \ | ||
47 | (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ | ||
48 | (sizeof(struct bfa_trc_mod_s) - \ | ||
49 | BFA_TRC_MAX * sizeof(struct bfa_trc_s))) | ||
50 | #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) | 45 | #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) |
51 | 46 | ||
52 | /* | 47 | /* |
@@ -59,17 +54,16 @@ BFA_TRC_FILE(CNA, IOC); | |||
59 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) | 54 | ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) |
60 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) | 55 | #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) |
61 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) | 56 | #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) |
62 | #define bfa_ioc_notify_hbfail(__ioc) \ | 57 | #define bfa_ioc_notify_fail(__ioc) \ |
63 | ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) | 58 | ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) |
64 | 59 | #define bfa_ioc_sync_join(__ioc) \ | |
65 | #ifdef BFA_IOC_IS_UEFI | 60 | ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) |
66 | #define bfa_ioc_is_bios_optrom(__ioc) (0) | 61 | #define bfa_ioc_sync_leave(__ioc) \ |
67 | #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI | 62 | ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) |
68 | #else | 63 | #define bfa_ioc_sync_ack(__ioc) \ |
69 | #define bfa_ioc_is_bios_optrom(__ioc) \ | 64 | ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) |
70 | (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) | 65 | #define bfa_ioc_sync_complete(__ioc) \ |
71 | #define bfa_ioc_is_uefi(__ioc) (0) | 66 | ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) |
72 | #endif | ||
73 | 67 | ||
74 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ | 68 | #define bfa_ioc_mbox_cmd_pending(__ioc) \ |
75 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ | 69 | (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ |
@@ -81,29 +75,22 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE; | |||
81 | * forward declarations | 75 | * forward declarations |
82 | */ | 76 | */ |
83 | static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); | 77 | static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); |
84 | static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc); | ||
85 | static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); | 78 | static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); |
86 | static void bfa_ioc_timeout(void *ioc); | 79 | static void bfa_ioc_timeout(void *ioc); |
87 | static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); | 80 | static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); |
88 | static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); | 81 | static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); |
89 | static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); | 82 | static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); |
90 | static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); | 83 | static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc); |
91 | static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc); | ||
92 | static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force); | ||
93 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); | 84 | static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc); |
94 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); | 85 | static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc); |
95 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); | 86 | static void bfa_ioc_recover(struct bfa_ioc_s *ioc); |
96 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); | 87 | static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc); |
97 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); | 88 | static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); |
98 | static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); | 89 | static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); |
99 | static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc); | 90 | static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); |
100 | static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc); | 91 | static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); |
101 | static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); | ||
102 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); | 92 | static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); |
103 | 93 | ||
104 | /* | ||
105 | * hal_ioc_sm | ||
106 | */ | ||
107 | 94 | ||
108 | /* | 95 | /* |
109 | * IOC state machine definitions/declarations | 96 | * IOC state machine definitions/declarations |
@@ -116,10 +103,11 @@ enum ioc_event { | |||
116 | IOC_E_ENABLED = 5, /* f/w enabled */ | 103 | IOC_E_ENABLED = 5, /* f/w enabled */ |
117 | IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ | 104 | IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ |
118 | IOC_E_DISABLED = 7, /* f/w disabled */ | 105 | IOC_E_DISABLED = 7, /* f/w disabled */ |
119 | IOC_E_FAILED = 8, /* failure notice by iocpf sm */ | 106 | IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */ |
120 | IOC_E_HBFAIL = 9, /* heartbeat failure */ | 107 | IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */ |
121 | IOC_E_HWERROR = 10, /* hardware error interrupt */ | 108 | IOC_E_HBFAIL = 10, /* heartbeat failure */ |
122 | IOC_E_TIMEOUT = 11, /* timeout */ | 109 | IOC_E_HWERROR = 11, /* hardware error interrupt */ |
110 | IOC_E_TIMEOUT = 12, /* timeout */ | ||
123 | }; | 111 | }; |
124 | 112 | ||
125 | bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); | 113 | bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); |
@@ -127,7 +115,7 @@ bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event); | |||
127 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); | 115 | bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event); |
128 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); | 116 | bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event); |
129 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); | 117 | bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event); |
130 | bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event); | 118 | bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event); |
131 | bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); | 119 | bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event); |
132 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); | 120 | bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event); |
133 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); | 121 | bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event); |
@@ -138,7 +126,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = { | |||
138 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, | 126 | {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, |
139 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, | 127 | {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, |
140 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, | 128 | {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, |
141 | {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, | 129 | {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, |
142 | {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, | 130 | {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, |
143 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, | 131 | {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, |
144 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, | 132 | {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, |
@@ -165,12 +153,6 @@ static struct bfa_sm_table_s ioc_sm_table[] = { | |||
165 | /* | 153 | /* |
166 | * Forward declareations for iocpf state machine | 154 | * Forward declareations for iocpf state machine |
167 | */ | 155 | */ |
168 | static void bfa_iocpf_enable(struct bfa_ioc_s *ioc); | ||
169 | static void bfa_iocpf_disable(struct bfa_ioc_s *ioc); | ||
170 | static void bfa_iocpf_fail(struct bfa_ioc_s *ioc); | ||
171 | static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc); | ||
172 | static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc); | ||
173 | static void bfa_iocpf_stop(struct bfa_ioc_s *ioc); | ||
174 | static void bfa_iocpf_timeout(void *ioc_arg); | 156 | static void bfa_iocpf_timeout(void *ioc_arg); |
175 | static void bfa_iocpf_sem_timeout(void *ioc_arg); | 157 | static void bfa_iocpf_sem_timeout(void *ioc_arg); |
176 | 158 | ||
@@ -213,9 +195,14 @@ bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event); | |||
213 | bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); | 195 | bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event); |
214 | bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); | 196 | bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event); |
215 | bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); | 197 | bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event); |
198 | bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s, | ||
199 | enum iocpf_event); | ||
216 | bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); | 200 | bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event); |
201 | bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event); | ||
217 | bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); | 202 | bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event); |
218 | bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); | 203 | bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event); |
204 | bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s, | ||
205 | enum iocpf_event); | ||
219 | bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); | 206 | bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event); |
220 | 207 | ||
221 | static struct bfa_sm_table_s iocpf_sm_table[] = { | 208 | static struct bfa_sm_table_s iocpf_sm_table[] = { |
@@ -226,9 +213,12 @@ static struct bfa_sm_table_s iocpf_sm_table[] = { | |||
226 | {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, | 213 | {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, |
227 | {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, | 214 | {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, |
228 | {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, | 215 | {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, |
216 | {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, | ||
229 | {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, | 217 | {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, |
218 | {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, | ||
230 | {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, | 219 | {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, |
231 | {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, | 220 | {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, |
221 | {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, | ||
232 | {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, | 222 | {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, |
233 | }; | 223 | }; |
234 | 224 | ||
@@ -301,7 +291,7 @@ bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
301 | static void | 291 | static void |
302 | bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) | 292 | bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) |
303 | { | 293 | { |
304 | bfa_iocpf_enable(ioc); | 294 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); |
305 | } | 295 | } |
306 | 296 | ||
307 | /* | 297 | /* |
@@ -318,13 +308,13 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
318 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | 308 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); |
319 | break; | 309 | break; |
320 | 310 | ||
321 | case IOC_E_FAILED: | 311 | case IOC_E_PFFAILED: |
322 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 312 | /* !!! fall through !!! */ |
323 | break; | ||
324 | |||
325 | case IOC_E_HWERROR: | 313 | case IOC_E_HWERROR: |
326 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 314 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
327 | bfa_iocpf_initfail(ioc); | 315 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); |
316 | if (event != IOC_E_PFFAILED) | ||
317 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); | ||
328 | break; | 318 | break; |
329 | 319 | ||
330 | case IOC_E_DISABLE: | 320 | case IOC_E_DISABLE: |
@@ -333,7 +323,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
333 | 323 | ||
334 | case IOC_E_DETACH: | 324 | case IOC_E_DETACH: |
335 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | 325 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); |
336 | bfa_iocpf_stop(ioc); | 326 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); |
337 | break; | 327 | break; |
338 | 328 | ||
339 | case IOC_E_ENABLE: | 329 | case IOC_E_ENABLE: |
@@ -367,18 +357,16 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
367 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); | 357 | bfa_fsm_set_state(ioc, bfa_ioc_sm_op); |
368 | break; | 358 | break; |
369 | 359 | ||
370 | case IOC_E_FAILED: | ||
371 | bfa_ioc_timer_stop(ioc); | ||
372 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | ||
373 | break; | 360 | break; |
374 | 361 | case IOC_E_PFFAILED: | |
375 | case IOC_E_HWERROR: | 362 | case IOC_E_HWERROR: |
376 | bfa_ioc_timer_stop(ioc); | 363 | bfa_ioc_timer_stop(ioc); |
377 | /* fall through */ | 364 | /* !!! fall through !!! */ |
378 | |||
379 | case IOC_E_TIMEOUT: | 365 | case IOC_E_TIMEOUT: |
380 | bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); | 366 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
381 | bfa_iocpf_getattrfail(ioc); | 367 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); |
368 | if (event != IOC_E_PFFAILED) | ||
369 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); | ||
382 | break; | 370 | break; |
383 | 371 | ||
384 | case IOC_E_DISABLE: | 372 | case IOC_E_DISABLE: |
@@ -415,22 +403,24 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
415 | break; | 403 | break; |
416 | 404 | ||
417 | case IOC_E_DISABLE: | 405 | case IOC_E_DISABLE: |
418 | bfa_ioc_hb_stop(ioc); | 406 | bfa_hb_timer_stop(ioc); |
419 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | 407 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); |
420 | break; | 408 | break; |
421 | 409 | ||
422 | case IOC_E_FAILED: | 410 | case IOC_E_PFFAILED: |
423 | bfa_ioc_hb_stop(ioc); | ||
424 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
425 | break; | ||
426 | |||
427 | case IOC_E_HWERROR: | 411 | case IOC_E_HWERROR: |
428 | bfa_ioc_hb_stop(ioc); | 412 | bfa_hb_timer_stop(ioc); |
429 | /* !!! fall through !!! */ | 413 | /* !!! fall through !!! */ |
430 | |||
431 | case IOC_E_HBFAIL: | 414 | case IOC_E_HBFAIL: |
432 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | 415 | bfa_ioc_fail_notify(ioc); |
433 | bfa_iocpf_fail(ioc); | 416 | |
417 | if (ioc->iocpf.auto_recover) | ||
418 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); | ||
419 | else | ||
420 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
421 | |||
422 | if (event != IOC_E_PFFAILED) | ||
423 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); | ||
434 | break; | 424 | break; |
435 | 425 | ||
436 | default: | 426 | default: |
@@ -443,7 +433,7 @@ static void | |||
443 | bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) | 433 | bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) |
444 | { | 434 | { |
445 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | 435 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; |
446 | bfa_iocpf_disable(ioc); | 436 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); |
447 | BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); | 437 | BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n"); |
448 | } | 438 | } |
449 | 439 | ||
@@ -466,7 +456,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
466 | * after iocpf sm completes failure processing and | 456 | * after iocpf sm completes failure processing and |
467 | * moves to disabled state. | 457 | * moves to disabled state. |
468 | */ | 458 | */ |
469 | bfa_iocpf_fail(ioc); | 459 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); |
470 | break; | 460 | break; |
471 | 461 | ||
472 | default: | 462 | default: |
@@ -499,7 +489,7 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
499 | 489 | ||
500 | case IOC_E_DETACH: | 490 | case IOC_E_DETACH: |
501 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | 491 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); |
502 | bfa_iocpf_stop(ioc); | 492 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); |
503 | break; | 493 | break; |
504 | 494 | ||
505 | default: | 495 | default: |
@@ -509,16 +499,16 @@ bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
509 | 499 | ||
510 | 500 | ||
511 | static void | 501 | static void |
512 | bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) | 502 | bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc) |
513 | { | 503 | { |
514 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 504 | bfa_trc(ioc, 0); |
515 | } | 505 | } |
516 | 506 | ||
517 | /* | 507 | /* |
518 | * Hardware initialization failed. | 508 | * Hardware initialization retry. |
519 | */ | 509 | */ |
520 | static void | 510 | static void |
521 | bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) | 511 | bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) |
522 | { | 512 | { |
523 | bfa_trc(ioc, event); | 513 | bfa_trc(ioc, event); |
524 | 514 | ||
@@ -527,11 +517,21 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
527 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | 517 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); |
528 | break; | 518 | break; |
529 | 519 | ||
530 | case IOC_E_FAILED: | 520 | case IOC_E_PFFAILED: |
521 | case IOC_E_HWERROR: | ||
531 | /* | 522 | /* |
532 | * Initialization failure during iocpf init retry. | 523 | * Initialization retry failed. |
533 | */ | 524 | */ |
534 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 525 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
526 | if (event != IOC_E_PFFAILED) | ||
527 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); | ||
528 | break; | ||
529 | |||
530 | case IOC_E_INITFAILED: | ||
531 | bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); | ||
532 | break; | ||
533 | |||
534 | case IOC_E_ENABLE: | ||
535 | break; | 535 | break; |
536 | 536 | ||
537 | case IOC_E_DISABLE: | 537 | case IOC_E_DISABLE: |
@@ -540,7 +540,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
540 | 540 | ||
541 | case IOC_E_DETACH: | 541 | case IOC_E_DETACH: |
542 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | 542 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); |
543 | bfa_iocpf_stop(ioc); | 543 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); |
544 | break; | 544 | break; |
545 | 545 | ||
546 | default: | 546 | default: |
@@ -552,21 +552,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
552 | static void | 552 | static void |
553 | bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) | 553 | bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) |
554 | { | 554 | { |
555 | struct list_head *qe; | 555 | bfa_trc(ioc, 0); |
556 | struct bfa_ioc_hbfail_notify_s *notify; | ||
557 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | ||
558 | |||
559 | /* | ||
560 | * Notify driver and common modules registered for notification. | ||
561 | */ | ||
562 | ioc->cbfn->hbfail_cbfn(ioc->bfa); | ||
563 | list_for_each(qe, &ioc->hb_notify_q) { | ||
564 | notify = (struct bfa_ioc_hbfail_notify_s *) qe; | ||
565 | notify->cbfn(notify->cbarg); | ||
566 | } | ||
567 | |||
568 | BFA_LOG(KERN_CRIT, bfad, bfa_log_level, | ||
569 | "Heart Beat of IOC has failed\n"); | ||
570 | } | 556 | } |
571 | 557 | ||
572 | /* | 558 | /* |
@@ -579,23 +565,19 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
579 | 565 | ||
580 | switch (event) { | 566 | switch (event) { |
581 | 567 | ||
582 | case IOC_E_FAILED: | ||
583 | /* | ||
584 | * Initialization failure during iocpf recovery. | ||
585 | * !!! Fall through !!! | ||
586 | */ | ||
587 | case IOC_E_ENABLE: | 568 | case IOC_E_ENABLE: |
588 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); | 569 | ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); |
589 | break; | 570 | break; |
590 | 571 | ||
591 | case IOC_E_ENABLED: | ||
592 | bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); | ||
593 | break; | ||
594 | |||
595 | case IOC_E_DISABLE: | 572 | case IOC_E_DISABLE: |
596 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); | 573 | bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); |
597 | break; | 574 | break; |
598 | 575 | ||
576 | case IOC_E_DETACH: | ||
577 | bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); | ||
578 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); | ||
579 | break; | ||
580 | |||
599 | case IOC_E_HWERROR: | 581 | case IOC_E_HWERROR: |
600 | /* | 582 | /* |
601 | * HB failure notification, ignore. | 583 | * HB failure notification, ignore. |
@@ -606,13 +588,10 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) | |||
606 | } | 588 | } |
607 | } | 589 | } |
608 | 590 | ||
609 | |||
610 | |||
611 | /* | 591 | /* |
612 | * IOCPF State Machine | 592 | * IOCPF State Machine |
613 | */ | 593 | */ |
614 | 594 | ||
615 | |||
616 | /* | 595 | /* |
617 | * Reset entry actions -- initialize state machine | 596 | * Reset entry actions -- initialize state machine |
618 | */ | 597 | */ |
@@ -668,22 +647,29 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
668 | switch (event) { | 647 | switch (event) { |
669 | case IOCPF_E_SEMLOCKED: | 648 | case IOCPF_E_SEMLOCKED: |
670 | if (bfa_ioc_firmware_lock(ioc)) { | 649 | if (bfa_ioc_firmware_lock(ioc)) { |
671 | iocpf->retry_count = 0; | 650 | if (bfa_ioc_sync_complete(ioc)) { |
672 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | 651 | iocpf->retry_count = 0; |
652 | bfa_ioc_sync_join(ioc); | ||
653 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
654 | } else { | ||
655 | bfa_ioc_firmware_unlock(ioc); | ||
656 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
657 | bfa_sem_timer_start(ioc); | ||
658 | } | ||
673 | } else { | 659 | } else { |
674 | bfa_ioc_hw_sem_release(ioc); | 660 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
675 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); | 661 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); |
676 | } | 662 | } |
677 | break; | 663 | break; |
678 | 664 | ||
679 | case IOCPF_E_DISABLE: | 665 | case IOCPF_E_DISABLE: |
680 | bfa_ioc_hw_sem_get_cancel(ioc); | 666 | bfa_sem_timer_stop(ioc); |
681 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | 667 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
682 | bfa_ioc_pf_disabled(ioc); | 668 | bfa_fsm_send_event(ioc, IOC_E_DISABLED); |
683 | break; | 669 | break; |
684 | 670 | ||
685 | case IOCPF_E_STOP: | 671 | case IOCPF_E_STOP: |
686 | bfa_ioc_hw_sem_get_cancel(ioc); | 672 | bfa_sem_timer_stop(ioc); |
687 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | 673 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
688 | break; | 674 | break; |
689 | 675 | ||
@@ -726,7 +712,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
726 | case IOCPF_E_DISABLE: | 712 | case IOCPF_E_DISABLE: |
727 | bfa_iocpf_timer_stop(ioc); | 713 | bfa_iocpf_timer_stop(ioc); |
728 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | 714 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
729 | bfa_ioc_pf_disabled(ioc); | 715 | bfa_fsm_send_event(ioc, IOC_E_DISABLED); |
730 | break; | 716 | break; |
731 | 717 | ||
732 | case IOCPF_E_STOP: | 718 | case IOCPF_E_STOP: |
@@ -760,13 +746,18 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
760 | 746 | ||
761 | switch (event) { | 747 | switch (event) { |
762 | case IOCPF_E_SEMLOCKED: | 748 | case IOCPF_E_SEMLOCKED: |
763 | iocpf->retry_count = 0; | 749 | if (bfa_ioc_sync_complete(ioc)) { |
764 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | 750 | bfa_ioc_sync_join(ioc); |
751 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
752 | } else { | ||
753 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
754 | bfa_sem_timer_start(ioc); | ||
755 | } | ||
765 | break; | 756 | break; |
766 | 757 | ||
767 | case IOCPF_E_DISABLE: | 758 | case IOCPF_E_DISABLE: |
768 | bfa_ioc_hw_sem_get_cancel(ioc); | 759 | bfa_sem_timer_stop(ioc); |
769 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | 760 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); |
770 | break; | 761 | break; |
771 | 762 | ||
772 | default: | 763 | default: |
@@ -774,12 +765,11 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
774 | } | 765 | } |
775 | } | 766 | } |
776 | 767 | ||
777 | |||
778 | static void | 768 | static void |
779 | bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) | 769 | bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) |
780 | { | 770 | { |
781 | bfa_iocpf_timer_start(iocpf->ioc); | 771 | bfa_iocpf_timer_start(iocpf->ioc); |
782 | bfa_ioc_reset(iocpf->ioc, BFA_FALSE); | 772 | bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); |
783 | } | 773 | } |
784 | 774 | ||
785 | /* | 775 | /* |
@@ -806,23 +796,16 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
806 | */ | 796 | */ |
807 | 797 | ||
808 | case IOCPF_E_TIMEOUT: | 798 | case IOCPF_E_TIMEOUT: |
809 | iocpf->retry_count++; | 799 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
810 | if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { | ||
811 | bfa_iocpf_timer_start(ioc); | ||
812 | bfa_ioc_reset(ioc, BFA_TRUE); | ||
813 | break; | ||
814 | } | ||
815 | |||
816 | bfa_ioc_hw_sem_release(ioc); | ||
817 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | ||
818 | |||
819 | if (event == IOCPF_E_TIMEOUT) | 800 | if (event == IOCPF_E_TIMEOUT) |
820 | bfa_ioc_pf_failed(ioc); | 801 | bfa_fsm_send_event(ioc, IOC_E_PFFAILED); |
802 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); | ||
821 | break; | 803 | break; |
822 | 804 | ||
823 | case IOCPF_E_DISABLE: | 805 | case IOCPF_E_DISABLE: |
824 | bfa_ioc_hw_sem_release(ioc); | ||
825 | bfa_iocpf_timer_stop(ioc); | 806 | bfa_iocpf_timer_stop(ioc); |
807 | bfa_ioc_sync_leave(ioc); | ||
808 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
826 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | 809 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
827 | break; | 810 | break; |
828 | 811 | ||
@@ -831,7 +814,6 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
831 | } | 814 | } |
832 | } | 815 | } |
833 | 816 | ||
834 | |||
835 | static void | 817 | static void |
836 | bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) | 818 | bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) |
837 | { | 819 | { |
@@ -853,7 +835,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
853 | switch (event) { | 835 | switch (event) { |
854 | case IOCPF_E_FWRSP_ENABLE: | 836 | case IOCPF_E_FWRSP_ENABLE: |
855 | bfa_iocpf_timer_stop(ioc); | 837 | bfa_iocpf_timer_stop(ioc); |
856 | bfa_ioc_hw_sem_release(ioc); | 838 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
857 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); | 839 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); |
858 | break; | 840 | break; |
859 | 841 | ||
@@ -864,23 +846,15 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
864 | */ | 846 | */ |
865 | 847 | ||
866 | case IOCPF_E_TIMEOUT: | 848 | case IOCPF_E_TIMEOUT: |
867 | iocpf->retry_count++; | 849 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
868 | if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { | ||
869 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); | ||
870 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
871 | break; | ||
872 | } | ||
873 | |||
874 | bfa_ioc_hw_sem_release(ioc); | ||
875 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | ||
876 | |||
877 | if (event == IOCPF_E_TIMEOUT) | 850 | if (event == IOCPF_E_TIMEOUT) |
878 | bfa_ioc_pf_failed(ioc); | 851 | bfa_fsm_send_event(ioc, IOC_E_PFFAILED); |
852 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); | ||
879 | break; | 853 | break; |
880 | 854 | ||
881 | case IOCPF_E_DISABLE: | 855 | case IOCPF_E_DISABLE: |
882 | bfa_iocpf_timer_stop(ioc); | 856 | bfa_iocpf_timer_stop(ioc); |
883 | bfa_ioc_hw_sem_release(ioc); | 857 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
884 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); | 858 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); |
885 | break; | 859 | break; |
886 | 860 | ||
@@ -893,12 +867,10 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
893 | } | 867 | } |
894 | } | 868 | } |
895 | 869 | ||
896 | |||
897 | |||
898 | static void | 870 | static void |
899 | bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) | 871 | bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf) |
900 | { | 872 | { |
901 | bfa_ioc_pf_enabled(iocpf->ioc); | 873 | bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED); |
902 | } | 874 | } |
903 | 875 | ||
904 | static void | 876 | static void |
@@ -914,20 +886,21 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
914 | break; | 886 | break; |
915 | 887 | ||
916 | case IOCPF_E_GETATTRFAIL: | 888 | case IOCPF_E_GETATTRFAIL: |
917 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | 889 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); |
918 | break; | 890 | break; |
919 | 891 | ||
920 | case IOCPF_E_FAIL: | 892 | case IOCPF_E_FAIL: |
921 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); | 893 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); |
922 | break; | 894 | break; |
923 | 895 | ||
924 | case IOCPF_E_FWREADY: | 896 | case IOCPF_E_FWREADY: |
925 | if (bfa_ioc_is_operational(ioc)) | 897 | if (bfa_ioc_is_operational(ioc)) { |
926 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); | 898 | bfa_fsm_send_event(ioc, IOC_E_PFFAILED); |
927 | else | 899 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); |
928 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | 900 | } else { |
929 | 901 | bfa_fsm_send_event(ioc, IOC_E_PFFAILED); | |
930 | bfa_ioc_pf_failed(ioc); | 902 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); |
903 | } | ||
931 | break; | 904 | break; |
932 | 905 | ||
933 | default: | 906 | default: |
@@ -935,7 +908,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
935 | } | 908 | } |
936 | } | 909 | } |
937 | 910 | ||
938 | |||
939 | static void | 911 | static void |
940 | bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) | 912 | bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) |
941 | { | 913 | { |
@@ -957,7 +929,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
957 | case IOCPF_E_FWRSP_DISABLE: | 929 | case IOCPF_E_FWRSP_DISABLE: |
958 | case IOCPF_E_FWREADY: | 930 | case IOCPF_E_FWREADY: |
959 | bfa_iocpf_timer_stop(ioc); | 931 | bfa_iocpf_timer_stop(ioc); |
960 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | 932 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); |
961 | break; | 933 | break; |
962 | 934 | ||
963 | case IOCPF_E_FAIL: | 935 | case IOCPF_E_FAIL: |
@@ -968,7 +940,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
968 | 940 | ||
969 | case IOCPF_E_TIMEOUT: | 941 | case IOCPF_E_TIMEOUT: |
970 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | 942 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); |
971 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | 943 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); |
972 | break; | 944 | break; |
973 | 945 | ||
974 | case IOCPF_E_FWRSP_ENABLE: | 946 | case IOCPF_E_FWRSP_ENABLE: |
@@ -979,13 +951,44 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
979 | } | 951 | } |
980 | } | 952 | } |
981 | 953 | ||
954 | static void | ||
955 | bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf) | ||
956 | { | ||
957 | bfa_ioc_hw_sem_get(iocpf->ioc); | ||
958 | } | ||
959 | |||
960 | /* | ||
961 | * IOC hb ack request is being removed. | ||
962 | */ | ||
963 | static void | ||
964 | bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | ||
965 | { | ||
966 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
967 | |||
968 | bfa_trc(ioc, event); | ||
969 | |||
970 | switch (event) { | ||
971 | case IOCPF_E_SEMLOCKED: | ||
972 | bfa_ioc_sync_leave(ioc); | ||
973 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
974 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | ||
975 | break; | ||
976 | |||
977 | case IOCPF_E_FAIL: | ||
978 | break; | ||
979 | |||
980 | default: | ||
981 | bfa_sm_fault(ioc, event); | ||
982 | } | ||
983 | } | ||
984 | |||
982 | /* | 985 | /* |
983 | * IOC disable completion entry. | 986 | * IOC disable completion entry. |
984 | */ | 987 | */ |
985 | static void | 988 | static void |
986 | bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) | 989 | bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf) |
987 | { | 990 | { |
988 | bfa_ioc_pf_disabled(iocpf->ioc); | 991 | bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED); |
989 | } | 992 | } |
990 | 993 | ||
991 | static void | 994 | static void |
@@ -997,6 +1000,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
997 | 1000 | ||
998 | switch (event) { | 1001 | switch (event) { |
999 | case IOCPF_E_ENABLE: | 1002 | case IOCPF_E_ENABLE: |
1003 | iocpf->retry_count = 0; | ||
1000 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | 1004 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); |
1001 | break; | 1005 | break; |
1002 | 1006 | ||
@@ -1010,11 +1014,64 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
1010 | } | 1014 | } |
1011 | } | 1015 | } |
1012 | 1016 | ||
1017 | static void | ||
1018 | bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf) | ||
1019 | { | ||
1020 | bfa_ioc_hw_sem_get(iocpf->ioc); | ||
1021 | } | ||
1022 | |||
1023 | /* | ||
1024 | * Hardware initialization failed. | ||
1025 | */ | ||
1026 | static void | ||
1027 | bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | ||
1028 | { | ||
1029 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
1030 | |||
1031 | bfa_trc(ioc, event); | ||
1032 | |||
1033 | switch (event) { | ||
1034 | case IOCPF_E_SEMLOCKED: | ||
1035 | bfa_ioc_notify_fail(ioc); | ||
1036 | bfa_ioc_sync_ack(ioc); | ||
1037 | iocpf->retry_count++; | ||
1038 | if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { | ||
1039 | bfa_ioc_sync_leave(ioc); | ||
1040 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
1041 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); | ||
1042 | } else { | ||
1043 | if (bfa_ioc_sync_complete(ioc)) | ||
1044 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
1045 | else { | ||
1046 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
1047 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
1048 | } | ||
1049 | } | ||
1050 | break; | ||
1051 | |||
1052 | case IOCPF_E_DISABLE: | ||
1053 | bfa_sem_timer_stop(ioc); | ||
1054 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); | ||
1055 | break; | ||
1056 | |||
1057 | case IOCPF_E_STOP: | ||
1058 | bfa_sem_timer_stop(ioc); | ||
1059 | bfa_ioc_firmware_unlock(ioc); | ||
1060 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | ||
1061 | break; | ||
1062 | |||
1063 | case IOCPF_E_FAIL: | ||
1064 | break; | ||
1065 | |||
1066 | default: | ||
1067 | bfa_sm_fault(ioc, event); | ||
1068 | } | ||
1069 | } | ||
1013 | 1070 | ||
1014 | static void | 1071 | static void |
1015 | bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) | 1072 | bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) |
1016 | { | 1073 | { |
1017 | bfa_iocpf_timer_start(iocpf->ioc); | 1074 | bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED); |
1018 | } | 1075 | } |
1019 | 1076 | ||
1020 | /* | 1077 | /* |
@@ -1029,47 +1086,77 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
1029 | 1086 | ||
1030 | switch (event) { | 1087 | switch (event) { |
1031 | case IOCPF_E_DISABLE: | 1088 | case IOCPF_E_DISABLE: |
1032 | bfa_iocpf_timer_stop(ioc); | ||
1033 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | 1089 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
1034 | break; | 1090 | break; |
1035 | 1091 | ||
1036 | case IOCPF_E_STOP: | 1092 | case IOCPF_E_STOP: |
1037 | bfa_iocpf_timer_stop(ioc); | ||
1038 | bfa_ioc_firmware_unlock(ioc); | 1093 | bfa_ioc_firmware_unlock(ioc); |
1039 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); | 1094 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); |
1040 | break; | 1095 | break; |
1041 | 1096 | ||
1042 | case IOCPF_E_TIMEOUT: | ||
1043 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
1044 | break; | ||
1045 | |||
1046 | default: | 1097 | default: |
1047 | bfa_sm_fault(ioc, event); | 1098 | bfa_sm_fault(ioc, event); |
1048 | } | 1099 | } |
1049 | } | 1100 | } |
1050 | 1101 | ||
1051 | |||
1052 | static void | 1102 | static void |
1053 | bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) | 1103 | bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf) |
1054 | { | 1104 | { |
1055 | /* | 1105 | /* |
1056 | * Mark IOC as failed in hardware and stop firmware. | 1106 | * Mark IOC as failed in hardware and stop firmware. |
1057 | */ | 1107 | */ |
1058 | bfa_ioc_lpu_stop(iocpf->ioc); | 1108 | bfa_ioc_lpu_stop(iocpf->ioc); |
1059 | writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate); | ||
1060 | |||
1061 | /* | ||
1062 | * Notify other functions on HB failure. | ||
1063 | */ | ||
1064 | bfa_ioc_notify_hbfail(iocpf->ioc); | ||
1065 | 1109 | ||
1066 | /* | 1110 | /* |
1067 | * Flush any queued up mailbox requests. | 1111 | * Flush any queued up mailbox requests. |
1068 | */ | 1112 | */ |
1069 | bfa_ioc_mbox_hbfail(iocpf->ioc); | 1113 | bfa_ioc_mbox_hbfail(iocpf->ioc); |
1070 | 1114 | ||
1071 | if (iocpf->auto_recover) | 1115 | bfa_ioc_hw_sem_get(iocpf->ioc); |
1072 | bfa_iocpf_recovery_timer_start(iocpf->ioc); | 1116 | } |
1117 | |||
1118 | static void | ||
1119 | bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | ||
1120 | { | ||
1121 | struct bfa_ioc_s *ioc = iocpf->ioc; | ||
1122 | |||
1123 | bfa_trc(ioc, event); | ||
1124 | |||
1125 | switch (event) { | ||
1126 | case IOCPF_E_SEMLOCKED: | ||
1127 | iocpf->retry_count = 0; | ||
1128 | bfa_ioc_sync_ack(ioc); | ||
1129 | bfa_ioc_notify_fail(ioc); | ||
1130 | if (!iocpf->auto_recover) { | ||
1131 | bfa_ioc_sync_leave(ioc); | ||
1132 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
1133 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); | ||
1134 | } else { | ||
1135 | if (bfa_ioc_sync_complete(ioc)) | ||
1136 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); | ||
1137 | else { | ||
1138 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
1139 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
1140 | } | ||
1141 | } | ||
1142 | break; | ||
1143 | |||
1144 | case IOCPF_E_DISABLE: | ||
1145 | bfa_sem_timer_stop(ioc); | ||
1146 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); | ||
1147 | break; | ||
1148 | |||
1149 | case IOCPF_E_FAIL: | ||
1150 | break; | ||
1151 | |||
1152 | default: | ||
1153 | bfa_sm_fault(ioc, event); | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | static void | ||
1158 | bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) | ||
1159 | { | ||
1073 | } | 1160 | } |
1074 | 1161 | ||
1075 | /* | 1162 | /* |
@@ -1084,24 +1171,16 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) | |||
1084 | 1171 | ||
1085 | switch (event) { | 1172 | switch (event) { |
1086 | case IOCPF_E_DISABLE: | 1173 | case IOCPF_E_DISABLE: |
1087 | if (iocpf->auto_recover) | ||
1088 | bfa_iocpf_timer_stop(ioc); | ||
1089 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); | 1174 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); |
1090 | break; | 1175 | break; |
1091 | 1176 | ||
1092 | case IOCPF_E_TIMEOUT: | ||
1093 | bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); | ||
1094 | break; | ||
1095 | |||
1096 | default: | 1177 | default: |
1097 | bfa_sm_fault(ioc, event); | 1178 | bfa_sm_fault(ioc, event); |
1098 | } | 1179 | } |
1099 | } | 1180 | } |
1100 | 1181 | ||
1101 | |||
1102 | |||
1103 | /* | 1182 | /* |
1104 | * hal_ioc_pvt BFA IOC private functions | 1183 | * BFA IOC private functions |
1105 | */ | 1184 | */ |
1106 | 1185 | ||
1107 | static void | 1186 | static void |
@@ -1139,16 +1218,10 @@ bfa_ioc_sem_get(void __iomem *sem_reg) | |||
1139 | if (r32 == 0) | 1218 | if (r32 == 0) |
1140 | return BFA_TRUE; | 1219 | return BFA_TRUE; |
1141 | 1220 | ||
1142 | bfa_assert(cnt < BFA_SEM_SPINCNT); | 1221 | WARN_ON(cnt >= BFA_SEM_SPINCNT); |
1143 | return BFA_FALSE; | 1222 | return BFA_FALSE; |
1144 | } | 1223 | } |
1145 | 1224 | ||
1146 | void | ||
1147 | bfa_ioc_sem_release(void __iomem *sem_reg) | ||
1148 | { | ||
1149 | writel(1, sem_reg); | ||
1150 | } | ||
1151 | |||
1152 | static void | 1225 | static void |
1153 | bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) | 1226 | bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) |
1154 | { | 1227 | { |
@@ -1167,18 +1240,6 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) | |||
1167 | bfa_sem_timer_start(ioc); | 1240 | bfa_sem_timer_start(ioc); |
1168 | } | 1241 | } |
1169 | 1242 | ||
1170 | void | ||
1171 | bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) | ||
1172 | { | ||
1173 | writel(1, ioc->ioc_regs.ioc_sem_reg); | ||
1174 | } | ||
1175 | |||
1176 | static void | ||
1177 | bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) | ||
1178 | { | ||
1179 | bfa_sem_timer_stop(ioc); | ||
1180 | } | ||
1181 | |||
1182 | /* | 1243 | /* |
1183 | * Initialize LPU local memory (aka secondary memory / SRAM) | 1244 | * Initialize LPU local memory (aka secondary memory / SRAM) |
1184 | */ | 1245 | */ |
@@ -1212,7 +1273,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) | |||
1212 | * If memory initialization is not successful, IOC timeout will catch | 1273 | * If memory initialization is not successful, IOC timeout will catch |
1213 | * such failures. | 1274 | * such failures. |
1214 | */ | 1275 | */ |
1215 | bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE); | 1276 | WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); |
1216 | bfa_trc(ioc, pss_ctl); | 1277 | bfa_trc(ioc, pss_ctl); |
1217 | 1278 | ||
1218 | pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); | 1279 | pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); |
@@ -1258,8 +1319,8 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) | |||
1258 | int i; | 1319 | int i; |
1259 | u32 *fwsig = (u32 *) fwhdr; | 1320 | u32 *fwsig = (u32 *) fwhdr; |
1260 | 1321 | ||
1261 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 1322 | pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); |
1262 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); | 1323 | pgoff = PSS_SMEM_PGOFF(loff); |
1263 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | 1324 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); |
1264 | 1325 | ||
1265 | for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); | 1326 | for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); |
@@ -1304,12 +1365,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) | |||
1304 | { | 1365 | { |
1305 | struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; | 1366 | struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; |
1306 | 1367 | ||
1307 | /* | ||
1308 | * If bios/efi boot (flash based) -- return true | ||
1309 | */ | ||
1310 | if (bfa_ioc_is_bios_optrom(ioc)) | ||
1311 | return BFA_TRUE; | ||
1312 | |||
1313 | bfa_ioc_fwver_get(ioc, &fwhdr); | 1368 | bfa_ioc_fwver_get(ioc, &fwhdr); |
1314 | drv_fwhdr = (struct bfi_ioc_image_hdr_s *) | 1369 | drv_fwhdr = (struct bfi_ioc_image_hdr_s *) |
1315 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); | 1370 | bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); |
@@ -1342,7 +1397,6 @@ bfa_ioc_msgflush(struct bfa_ioc_s *ioc) | |||
1342 | writel(1, ioc->ioc_regs.lpu_mbox_cmd); | 1397 | writel(1, ioc->ioc_regs.lpu_mbox_cmd); |
1343 | } | 1398 | } |
1344 | 1399 | ||
1345 | |||
1346 | static void | 1400 | static void |
1347 | bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | 1401 | bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) |
1348 | { | 1402 | { |
@@ -1362,22 +1416,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
1362 | boot_env = BFI_BOOT_LOADER_OS; | 1416 | boot_env = BFI_BOOT_LOADER_OS; |
1363 | 1417 | ||
1364 | /* | 1418 | /* |
1365 | * Flash based firmware boot BIOS env. | ||
1366 | */ | ||
1367 | if (bfa_ioc_is_bios_optrom(ioc)) { | ||
1368 | boot_type = BFI_BOOT_TYPE_FLASH; | ||
1369 | boot_env = BFI_BOOT_LOADER_BIOS; | ||
1370 | } | ||
1371 | |||
1372 | /* | ||
1373 | * Flash based firmware boot UEFI env. | ||
1374 | */ | ||
1375 | if (bfa_ioc_is_uefi(ioc)) { | ||
1376 | boot_type = BFI_BOOT_TYPE_FLASH; | ||
1377 | boot_env = BFI_BOOT_LOADER_UEFI; | ||
1378 | } | ||
1379 | |||
1380 | /* | ||
1381 | * check if firmware is valid | 1419 | * check if firmware is valid |
1382 | */ | 1420 | */ |
1383 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? | 1421 | fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? |
@@ -1405,8 +1443,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) | |||
1405 | * convergence, IOC will be in operational state when 2nd driver | 1443 | * convergence, IOC will be in operational state when 2nd driver |
1406 | * is loaded. | 1444 | * is loaded. |
1407 | */ | 1445 | */ |
1408 | if (ioc_fwstate == BFI_IOC_DISABLED || | 1446 | if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { |
1409 | (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { | ||
1410 | 1447 | ||
1411 | /* | 1448 | /* |
1412 | * When using MSI-X any pending firmware ready event should | 1449 | * When using MSI-X any pending firmware ready event should |
@@ -1442,7 +1479,7 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) | |||
1442 | bfa_trc(ioc, msgp[0]); | 1479 | bfa_trc(ioc, msgp[0]); |
1443 | bfa_trc(ioc, len); | 1480 | bfa_trc(ioc, len); |
1444 | 1481 | ||
1445 | bfa_assert(len <= BFI_IOC_MSGLEN_MAX); | 1482 | WARN_ON(len > BFI_IOC_MSGLEN_MAX); |
1446 | 1483 | ||
1447 | /* | 1484 | /* |
1448 | * first write msg to mailbox registers | 1485 | * first write msg to mailbox registers |
@@ -1465,12 +1502,12 @@ static void | |||
1465 | bfa_ioc_send_enable(struct bfa_ioc_s *ioc) | 1502 | bfa_ioc_send_enable(struct bfa_ioc_s *ioc) |
1466 | { | 1503 | { |
1467 | struct bfi_ioc_ctrl_req_s enable_req; | 1504 | struct bfi_ioc_ctrl_req_s enable_req; |
1468 | struct bfa_timeval_s tv; | 1505 | struct timeval tv; |
1469 | 1506 | ||
1470 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, | 1507 | bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, |
1471 | bfa_ioc_portid(ioc)); | 1508 | bfa_ioc_portid(ioc)); |
1472 | enable_req.ioc_class = ioc->ioc_mc; | 1509 | enable_req.ioc_class = ioc->ioc_mc; |
1473 | bfa_os_gettimeofday(&tv); | 1510 | do_gettimeofday(&tv); |
1474 | enable_req.tv_sec = be32_to_cpu(tv.tv_sec); | 1511 | enable_req.tv_sec = be32_to_cpu(tv.tv_sec); |
1475 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); | 1512 | bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); |
1476 | } | 1513 | } |
@@ -1504,7 +1541,6 @@ bfa_ioc_hb_check(void *cbarg) | |||
1504 | 1541 | ||
1505 | hb_count = readl(ioc->ioc_regs.heartbeat); | 1542 | hb_count = readl(ioc->ioc_regs.heartbeat); |
1506 | if (ioc->hb_count == hb_count) { | 1543 | if (ioc->hb_count == hb_count) { |
1507 | printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count); | ||
1508 | bfa_ioc_recover(ioc); | 1544 | bfa_ioc_recover(ioc); |
1509 | return; | 1545 | return; |
1510 | } else { | 1546 | } else { |
@@ -1522,13 +1558,6 @@ bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) | |||
1522 | bfa_hb_timer_start(ioc); | 1558 | bfa_hb_timer_start(ioc); |
1523 | } | 1559 | } |
1524 | 1560 | ||
1525 | static void | ||
1526 | bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) | ||
1527 | { | ||
1528 | bfa_hb_timer_stop(ioc); | ||
1529 | } | ||
1530 | |||
1531 | |||
1532 | /* | 1561 | /* |
1533 | * Initiate a full firmware download. | 1562 | * Initiate a full firmware download. |
1534 | */ | 1563 | */ |
@@ -1550,8 +1579,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1550 | bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); | 1579 | bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc))); |
1551 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); | 1580 | fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); |
1552 | 1581 | ||
1553 | pgnum = bfa_ioc_smem_pgnum(ioc, loff); | 1582 | pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); |
1554 | pgoff = bfa_ioc_smem_pgoff(ioc, loff); | 1583 | pgoff = PSS_SMEM_PGOFF(loff); |
1555 | 1584 | ||
1556 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | 1585 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); |
1557 | 1586 | ||
@@ -1581,7 +1610,8 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1581 | } | 1610 | } |
1582 | } | 1611 | } |
1583 | 1612 | ||
1584 | writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); | 1613 | writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), |
1614 | ioc->ioc_regs.host_page_num_fn); | ||
1585 | 1615 | ||
1586 | /* | 1616 | /* |
1587 | * Set boot type and boot param at the end. | 1617 | * Set boot type and boot param at the end. |
@@ -1592,11 +1622,6 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, | |||
1592 | swab32(boot_env)); | 1622 | swab32(boot_env)); |
1593 | } | 1623 | } |
1594 | 1624 | ||
1595 | static void | ||
1596 | bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) | ||
1597 | { | ||
1598 | bfa_ioc_hwinit(ioc, force); | ||
1599 | } | ||
1600 | 1625 | ||
1601 | /* | 1626 | /* |
1602 | * Update BFA configuration from firmware configuration. | 1627 | * Update BFA configuration from firmware configuration. |
@@ -1683,12 +1708,13 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) | |||
1683 | static bfa_status_t | 1708 | static bfa_status_t |
1684 | bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) | 1709 | bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) |
1685 | { | 1710 | { |
1686 | u32 pgnum, loff, r32; | 1711 | u32 pgnum, loff; |
1712 | __be32 r32; | ||
1687 | int i, len; | 1713 | int i, len; |
1688 | u32 *buf = tbuf; | 1714 | u32 *buf = tbuf; |
1689 | 1715 | ||
1690 | pgnum = bfa_ioc_smem_pgnum(ioc, soff); | 1716 | pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); |
1691 | loff = bfa_ioc_smem_pgoff(ioc, soff); | 1717 | loff = PSS_SMEM_PGOFF(soff); |
1692 | bfa_trc(ioc, pgnum); | 1718 | bfa_trc(ioc, pgnum); |
1693 | bfa_trc(ioc, loff); | 1719 | bfa_trc(ioc, loff); |
1694 | bfa_trc(ioc, sz); | 1720 | bfa_trc(ioc, sz); |
@@ -1719,11 +1745,12 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) | |||
1719 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | 1745 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); |
1720 | } | 1746 | } |
1721 | } | 1747 | } |
1722 | writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); | 1748 | writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), |
1749 | ioc->ioc_regs.host_page_num_fn); | ||
1723 | /* | 1750 | /* |
1724 | * release semaphore. | 1751 | * release semaphore. |
1725 | */ | 1752 | */ |
1726 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | 1753 | writel(1, ioc->ioc_regs.ioc_init_sem_reg); |
1727 | 1754 | ||
1728 | bfa_trc(ioc, pgnum); | 1755 | bfa_trc(ioc, pgnum); |
1729 | return BFA_STATUS_OK; | 1756 | return BFA_STATUS_OK; |
@@ -1742,8 +1769,8 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) | |||
1742 | int i, len; | 1769 | int i, len; |
1743 | u32 pgnum, loff; | 1770 | u32 pgnum, loff; |
1744 | 1771 | ||
1745 | pgnum = bfa_ioc_smem_pgnum(ioc, soff); | 1772 | pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); |
1746 | loff = bfa_ioc_smem_pgoff(ioc, soff); | 1773 | loff = PSS_SMEM_PGOFF(soff); |
1747 | bfa_trc(ioc, pgnum); | 1774 | bfa_trc(ioc, pgnum); |
1748 | bfa_trc(ioc, loff); | 1775 | bfa_trc(ioc, loff); |
1749 | bfa_trc(ioc, sz); | 1776 | bfa_trc(ioc, sz); |
@@ -1773,35 +1800,38 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) | |||
1773 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); | 1800 | writel(pgnum, ioc->ioc_regs.host_page_num_fn); |
1774 | } | 1801 | } |
1775 | } | 1802 | } |
1776 | writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); | 1803 | writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), |
1804 | ioc->ioc_regs.host_page_num_fn); | ||
1777 | 1805 | ||
1778 | /* | 1806 | /* |
1779 | * release semaphore. | 1807 | * release semaphore. |
1780 | */ | 1808 | */ |
1781 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | 1809 | writel(1, ioc->ioc_regs.ioc_init_sem_reg); |
1782 | bfa_trc(ioc, pgnum); | 1810 | bfa_trc(ioc, pgnum); |
1783 | return BFA_STATUS_OK; | 1811 | return BFA_STATUS_OK; |
1784 | } | 1812 | } |
1785 | 1813 | ||
1786 | /* | ||
1787 | * hal iocpf to ioc interface | ||
1788 | */ | ||
1789 | static void | 1814 | static void |
1790 | bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc) | 1815 | bfa_ioc_fail_notify(struct bfa_ioc_s *ioc) |
1791 | { | 1816 | { |
1792 | bfa_fsm_send_event(ioc, IOC_E_ENABLED); | 1817 | struct list_head *qe; |
1793 | } | 1818 | struct bfa_ioc_hbfail_notify_s *notify; |
1819 | struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; | ||
1794 | 1820 | ||
1795 | static void | 1821 | /* |
1796 | bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc) | 1822 | * Notify driver and common modules registered for notification. |
1797 | { | 1823 | */ |
1798 | bfa_fsm_send_event(ioc, IOC_E_DISABLED); | 1824 | ioc->cbfn->hbfail_cbfn(ioc->bfa); |
1799 | } | 1825 | list_for_each(qe, &ioc->hb_notify_q) { |
1826 | notify = (struct bfa_ioc_hbfail_notify_s *) qe; | ||
1827 | notify->cbfn(notify->cbarg); | ||
1828 | } | ||
1829 | |||
1830 | bfa_ioc_debug_save_ftrc(ioc); | ||
1831 | |||
1832 | BFA_LOG(KERN_CRIT, bfad, bfa_log_level, | ||
1833 | "Heart Beat of IOC has failed\n"); | ||
1800 | 1834 | ||
1801 | static void | ||
1802 | bfa_ioc_pf_failed(struct bfa_ioc_s *ioc) | ||
1803 | { | ||
1804 | bfa_fsm_send_event(ioc, IOC_E_FAILED); | ||
1805 | } | 1835 | } |
1806 | 1836 | ||
1807 | static void | 1837 | static void |
@@ -1817,12 +1847,6 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) | |||
1817 | "with the driver version\n"); | 1847 | "with the driver version\n"); |
1818 | } | 1848 | } |
1819 | 1849 | ||
1820 | |||
1821 | |||
1822 | /* | ||
1823 | * hal_ioc_public | ||
1824 | */ | ||
1825 | |||
1826 | bfa_status_t | 1850 | bfa_status_t |
1827 | bfa_ioc_pll_init(struct bfa_ioc_s *ioc) | 1851 | bfa_ioc_pll_init(struct bfa_ioc_s *ioc) |
1828 | { | 1852 | { |
@@ -1838,7 +1862,7 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) | |||
1838 | /* | 1862 | /* |
1839 | * release semaphore. | 1863 | * release semaphore. |
1840 | */ | 1864 | */ |
1841 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); | 1865 | writel(1, ioc->ioc_regs.ioc_init_sem_reg); |
1842 | 1866 | ||
1843 | return BFA_STATUS_OK; | 1867 | return BFA_STATUS_OK; |
1844 | } | 1868 | } |
@@ -1909,7 +1933,7 @@ bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) | |||
1909 | void | 1933 | void |
1910 | bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) | 1934 | bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) |
1911 | { | 1935 | { |
1912 | u32 *msgp = mbmsg; | 1936 | __be32 *msgp = mbmsg; |
1913 | u32 r32; | 1937 | u32 r32; |
1914 | int i; | 1938 | int i; |
1915 | 1939 | ||
@@ -1962,7 +1986,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) | |||
1962 | 1986 | ||
1963 | default: | 1987 | default: |
1964 | bfa_trc(ioc, msg->mh.msg_id); | 1988 | bfa_trc(ioc, msg->mh.msg_id); |
1965 | bfa_assert(0); | 1989 | WARN_ON(1); |
1966 | } | 1990 | } |
1967 | } | 1991 | } |
1968 | 1992 | ||
@@ -2043,15 +2067,6 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) | |||
2043 | ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; | 2067 | ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; |
2044 | } | 2068 | } |
2045 | 2069 | ||
2046 | /* | ||
2047 | * Return size of dma memory required. | ||
2048 | */ | ||
2049 | u32 | ||
2050 | bfa_ioc_meminfo(void) | ||
2051 | { | ||
2052 | return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ); | ||
2053 | } | ||
2054 | |||
2055 | void | 2070 | void |
2056 | bfa_ioc_enable(struct bfa_ioc_s *ioc) | 2071 | bfa_ioc_enable(struct bfa_ioc_s *ioc) |
2057 | { | 2072 | { |
@@ -2068,18 +2083,6 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc) | |||
2068 | bfa_fsm_send_event(ioc, IOC_E_DISABLE); | 2083 | bfa_fsm_send_event(ioc, IOC_E_DISABLE); |
2069 | } | 2084 | } |
2070 | 2085 | ||
2071 | /* | ||
2072 | * Returns memory required for saving firmware trace in case of crash. | ||
2073 | * Driver must call this interface to allocate memory required for | ||
2074 | * automatic saving of firmware trace. Driver should call | ||
2075 | * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this | ||
2076 | * trace memory. | ||
2077 | */ | ||
2078 | int | ||
2079 | bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) | ||
2080 | { | ||
2081 | return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; | ||
2082 | } | ||
2083 | 2086 | ||
2084 | /* | 2087 | /* |
2085 | * Initialize memory for saving firmware trace. Driver must initialize | 2088 | * Initialize memory for saving firmware trace. Driver must initialize |
@@ -2089,19 +2092,7 @@ void | |||
2089 | bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) | 2092 | bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) |
2090 | { | 2093 | { |
2091 | ioc->dbg_fwsave = dbg_fwsave; | 2094 | ioc->dbg_fwsave = dbg_fwsave; |
2092 | ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover); | 2095 | ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0; |
2093 | } | ||
2094 | |||
2095 | u32 | ||
2096 | bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr) | ||
2097 | { | ||
2098 | return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); | ||
2099 | } | ||
2100 | |||
2101 | u32 | ||
2102 | bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr) | ||
2103 | { | ||
2104 | return PSS_SMEM_PGOFF(fmaddr); | ||
2105 | } | 2096 | } |
2106 | 2097 | ||
2107 | /* | 2098 | /* |
@@ -2265,14 +2256,13 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) | |||
2265 | } | 2256 | } |
2266 | 2257 | ||
2267 | /* | 2258 | /* |
2268 | * Add to IOC heartbeat failure notification queue. To be used by common | 2259 | * Reset IOC fwstate registers. |
2269 | * modules such as cee, port, diag. | ||
2270 | */ | 2260 | */ |
2271 | void | 2261 | void |
2272 | bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, | 2262 | bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc) |
2273 | struct bfa_ioc_hbfail_notify_s *notify) | ||
2274 | { | 2263 | { |
2275 | list_add_tail(¬ify->qe, &ioc->hb_notify_q); | 2264 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); |
2265 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); | ||
2276 | } | 2266 | } |
2277 | 2267 | ||
2278 | #define BFA_MFG_NAME "Brocade" | 2268 | #define BFA_MFG_NAME "Brocade" |
@@ -2306,7 +2296,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | |||
2306 | else | 2296 | else |
2307 | ad_attr->prototype = 0; | 2297 | ad_attr->prototype = 0; |
2308 | 2298 | ||
2309 | ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); | 2299 | ad_attr->pwwn = ioc->attr->pwwn; |
2310 | ad_attr->mac = bfa_ioc_get_mac(ioc); | 2300 | ad_attr->mac = bfa_ioc_get_mac(ioc); |
2311 | 2301 | ||
2312 | ad_attr->pcie_gen = ioc_attr->pcie_gen; | 2302 | ad_attr->pcie_gen = ioc_attr->pcie_gen; |
@@ -2317,7 +2307,8 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | |||
2317 | bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); | 2307 | bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); |
2318 | 2308 | ||
2319 | ad_attr->cna_capable = ioc->cna; | 2309 | ad_attr->cna_capable = ioc->cna; |
2320 | ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; | 2310 | ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna && |
2311 | !ad_attr->is_mezz; | ||
2321 | } | 2312 | } |
2322 | 2313 | ||
2323 | enum bfa_ioc_type_e | 2314 | enum bfa_ioc_type_e |
@@ -2330,7 +2321,7 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc) | |||
2330 | else if (ioc->ioc_mc == BFI_MC_LL) | 2321 | else if (ioc->ioc_mc == BFI_MC_LL) |
2331 | return BFA_IOC_TYPE_LL; | 2322 | return BFA_IOC_TYPE_LL; |
2332 | else { | 2323 | else { |
2333 | bfa_assert(ioc->ioc_mc == BFI_MC_LL); | 2324 | WARN_ON(ioc->ioc_mc != BFI_MC_LL); |
2334 | return BFA_IOC_TYPE_LL; | 2325 | return BFA_IOC_TYPE_LL; |
2335 | } | 2326 | } |
2336 | } | 2327 | } |
@@ -2354,7 +2345,7 @@ bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) | |||
2354 | void | 2345 | void |
2355 | bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) | 2346 | bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) |
2356 | { | 2347 | { |
2357 | bfa_assert(chip_rev); | 2348 | WARN_ON(!chip_rev); |
2358 | 2349 | ||
2359 | memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); | 2350 | memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); |
2360 | 2351 | ||
@@ -2386,7 +2377,7 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) | |||
2386 | { | 2377 | { |
2387 | struct bfi_ioc_attr_s *ioc_attr; | 2378 | struct bfi_ioc_attr_s *ioc_attr; |
2388 | 2379 | ||
2389 | bfa_assert(model); | 2380 | WARN_ON(!model); |
2390 | memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); | 2381 | memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); |
2391 | 2382 | ||
2392 | ioc_attr = ioc->attr; | 2383 | ioc_attr = ioc->attr; |
@@ -2455,27 +2446,6 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) | |||
2455 | bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); | 2446 | bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); |
2456 | } | 2447 | } |
2457 | 2448 | ||
2458 | /* | ||
2459 | * hal_wwn_public | ||
2460 | */ | ||
2461 | wwn_t | ||
2462 | bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc) | ||
2463 | { | ||
2464 | return ioc->attr->pwwn; | ||
2465 | } | ||
2466 | |||
2467 | wwn_t | ||
2468 | bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc) | ||
2469 | { | ||
2470 | return ioc->attr->nwwn; | ||
2471 | } | ||
2472 | |||
2473 | u64 | ||
2474 | bfa_ioc_get_adid(struct bfa_ioc_s *ioc) | ||
2475 | { | ||
2476 | return ioc->attr->mfg_pwwn; | ||
2477 | } | ||
2478 | |||
2479 | mac_t | 2449 | mac_t |
2480 | bfa_ioc_get_mac(struct bfa_ioc_s *ioc) | 2450 | bfa_ioc_get_mac(struct bfa_ioc_s *ioc) |
2481 | { | 2451 | { |
@@ -2488,18 +2458,6 @@ bfa_ioc_get_mac(struct bfa_ioc_s *ioc) | |||
2488 | return ioc->attr->mac; | 2458 | return ioc->attr->mac; |
2489 | } | 2459 | } |
2490 | 2460 | ||
2491 | wwn_t | ||
2492 | bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc) | ||
2493 | { | ||
2494 | return ioc->attr->mfg_pwwn; | ||
2495 | } | ||
2496 | |||
2497 | wwn_t | ||
2498 | bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc) | ||
2499 | { | ||
2500 | return ioc->attr->mfg_nwwn; | ||
2501 | } | ||
2502 | |||
2503 | mac_t | 2461 | mac_t |
2504 | bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) | 2462 | bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc) |
2505 | { | 2463 | { |
@@ -2541,14 +2499,6 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) | |||
2541 | return BFA_STATUS_OK; | 2499 | return BFA_STATUS_OK; |
2542 | } | 2500 | } |
2543 | 2501 | ||
2544 | /* | ||
2545 | * Clear saved firmware trace | ||
2546 | */ | ||
2547 | void | ||
2548 | bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) | ||
2549 | { | ||
2550 | ioc->dbg_fwsave_once = BFA_TRUE; | ||
2551 | } | ||
2552 | 2502 | ||
2553 | /* | 2503 | /* |
2554 | * Retrieve saved firmware trace from a prior IOC failure. | 2504 | * Retrieve saved firmware trace from a prior IOC failure. |
@@ -2701,13 +2651,16 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) | |||
2701 | * Save firmware trace if configured. | 2651 | * Save firmware trace if configured. |
2702 | */ | 2652 | */ |
2703 | static void | 2653 | static void |
2704 | bfa_ioc_debug_save(struct bfa_ioc_s *ioc) | 2654 | bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc) |
2705 | { | 2655 | { |
2706 | int tlen; | 2656 | int tlen; |
2707 | 2657 | ||
2708 | if (ioc->dbg_fwsave_len) { | 2658 | if (ioc->dbg_fwsave_once) { |
2709 | tlen = ioc->dbg_fwsave_len; | 2659 | ioc->dbg_fwsave_once = BFA_FALSE; |
2710 | bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); | 2660 | if (ioc->dbg_fwsave_len) { |
2661 | tlen = ioc->dbg_fwsave_len; | ||
2662 | bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); | ||
2663 | } | ||
2711 | } | 2664 | } |
2712 | } | 2665 | } |
2713 | 2666 | ||
@@ -2717,11 +2670,6 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc) | |||
2717 | static void | 2670 | static void |
2718 | bfa_ioc_recover(struct bfa_ioc_s *ioc) | 2671 | bfa_ioc_recover(struct bfa_ioc_s *ioc) |
2719 | { | 2672 | { |
2720 | if (ioc->dbg_fwsave_once) { | ||
2721 | ioc->dbg_fwsave_once = BFA_FALSE; | ||
2722 | bfa_ioc_debug_save(ioc); | ||
2723 | } | ||
2724 | |||
2725 | bfa_ioc_stats(ioc, ioc_hbfails); | 2673 | bfa_ioc_stats(ioc, ioc_hbfails); |
2726 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); | 2674 | bfa_fsm_send_event(ioc, IOC_E_HBFAIL); |
2727 | } | 2675 | } |
@@ -2734,45 +2682,8 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) | |||
2734 | } | 2682 | } |
2735 | 2683 | ||
2736 | /* | 2684 | /* |
2737 | * hal_iocpf_pvt BFA IOC PF private functions | 2685 | * BFA IOC PF private functions |
2738 | */ | 2686 | */ |
2739 | |||
2740 | static void | ||
2741 | bfa_iocpf_enable(struct bfa_ioc_s *ioc) | ||
2742 | { | ||
2743 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); | ||
2744 | } | ||
2745 | |||
2746 | static void | ||
2747 | bfa_iocpf_disable(struct bfa_ioc_s *ioc) | ||
2748 | { | ||
2749 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); | ||
2750 | } | ||
2751 | |||
2752 | static void | ||
2753 | bfa_iocpf_fail(struct bfa_ioc_s *ioc) | ||
2754 | { | ||
2755 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); | ||
2756 | } | ||
2757 | |||
2758 | static void | ||
2759 | bfa_iocpf_initfail(struct bfa_ioc_s *ioc) | ||
2760 | { | ||
2761 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); | ||
2762 | } | ||
2763 | |||
2764 | static void | ||
2765 | bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc) | ||
2766 | { | ||
2767 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); | ||
2768 | } | ||
2769 | |||
2770 | static void | ||
2771 | bfa_iocpf_stop(struct bfa_ioc_s *ioc) | ||
2772 | { | ||
2773 | bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); | ||
2774 | } | ||
2775 | |||
2776 | static void | 2687 | static void |
2777 | bfa_iocpf_timeout(void *ioc_arg) | 2688 | bfa_iocpf_timeout(void *ioc_arg) |
2778 | { | 2689 | { |
@@ -2794,12 +2705,6 @@ bfa_iocpf_sem_timeout(void *ioc_arg) | |||
2794 | * bfa timer function | 2705 | * bfa timer function |
2795 | */ | 2706 | */ |
2796 | void | 2707 | void |
2797 | bfa_timer_init(struct bfa_timer_mod_s *mod) | ||
2798 | { | ||
2799 | INIT_LIST_HEAD(&mod->timer_q); | ||
2800 | } | ||
2801 | |||
2802 | void | ||
2803 | bfa_timer_beat(struct bfa_timer_mod_s *mod) | 2708 | bfa_timer_beat(struct bfa_timer_mod_s *mod) |
2804 | { | 2709 | { |
2805 | struct list_head *qh = &mod->timer_q; | 2710 | struct list_head *qh = &mod->timer_q; |
@@ -2843,8 +2748,8 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, | |||
2843 | void (*timercb) (void *), void *arg, unsigned int timeout) | 2748 | void (*timercb) (void *), void *arg, unsigned int timeout) |
2844 | { | 2749 | { |
2845 | 2750 | ||
2846 | bfa_assert(timercb != NULL); | 2751 | WARN_ON(timercb == NULL); |
2847 | bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer)); | 2752 | WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer)); |
2848 | 2753 | ||
2849 | timer->timeout = timeout; | 2754 | timer->timeout = timeout; |
2850 | timer->timercb = timercb; | 2755 | timer->timercb = timercb; |
@@ -2859,7 +2764,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, | |||
2859 | void | 2764 | void |
2860 | bfa_timer_stop(struct bfa_timer_s *timer) | 2765 | bfa_timer_stop(struct bfa_timer_s *timer) |
2861 | { | 2766 | { |
2862 | bfa_assert(!list_empty(&timer->qe)); | 2767 | WARN_ON(list_empty(&timer->qe)); |
2863 | 2768 | ||
2864 | list_del(&timer->qe); | 2769 | list_del(&timer->qe); |
2865 | } | 2770 | } |
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 9c407a87a1a1..ec9cf08b0e7f 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h | |||
@@ -18,10 +18,15 @@ | |||
18 | #ifndef __BFA_IOC_H__ | 18 | #ifndef __BFA_IOC_H__ |
19 | #define __BFA_IOC_H__ | 19 | #define __BFA_IOC_H__ |
20 | 20 | ||
21 | #include "bfa_os_inc.h" | 21 | #include "bfad_drv.h" |
22 | #include "bfa_cs.h" | 22 | #include "bfa_cs.h" |
23 | #include "bfi.h" | 23 | #include "bfi.h" |
24 | 24 | ||
25 | #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS) | ||
26 | #define BFA_DBG_FWTRC_LEN \ | ||
27 | (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \ | ||
28 | (sizeof(struct bfa_trc_mod_s) - \ | ||
29 | BFA_TRC_MAX * sizeof(struct bfa_trc_s))) | ||
25 | /* | 30 | /* |
26 | * BFA timer declarations | 31 | * BFA timer declarations |
27 | */ | 32 | */ |
@@ -47,7 +52,6 @@ struct bfa_timer_mod_s { | |||
47 | #define BFA_TIMER_FREQ 200 /* specified in millisecs */ | 52 | #define BFA_TIMER_FREQ 200 /* specified in millisecs */ |
48 | 53 | ||
49 | void bfa_timer_beat(struct bfa_timer_mod_s *mod); | 54 | void bfa_timer_beat(struct bfa_timer_mod_s *mod); |
50 | void bfa_timer_init(struct bfa_timer_mod_s *mod); | ||
51 | void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, | 55 | void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, |
52 | bfa_timer_cbfn_t timercb, void *arg, | 56 | bfa_timer_cbfn_t timercb, void *arg, |
53 | unsigned int timeout); | 57 | unsigned int timeout); |
@@ -70,7 +74,7 @@ struct bfa_sge_s { | |||
70 | #define bfa_swap_words(_x) ( \ | 74 | #define bfa_swap_words(_x) ( \ |
71 | ((_x) << 32) | ((_x) >> 32)) | 75 | ((_x) << 32) | ((_x) >> 32)) |
72 | 76 | ||
73 | #ifdef __BIGENDIAN | 77 | #ifdef __BIG_ENDIAN |
74 | #define bfa_sge_to_be(_x) | 78 | #define bfa_sge_to_be(_x) |
75 | #define bfa_sge_to_le(_x) bfa_sge_word_swap(_x) | 79 | #define bfa_sge_to_le(_x) bfa_sge_word_swap(_x) |
76 | #define bfa_sgaddr_le(_x) bfa_swap_words(_x) | 80 | #define bfa_sgaddr_le(_x) bfa_swap_words(_x) |
@@ -115,8 +119,8 @@ struct bfa_dma_s { | |||
115 | static inline void | 119 | static inline void |
116 | __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) | 120 | __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) |
117 | { | 121 | { |
118 | dma_addr->a32.addr_lo = (u32) pa; | 122 | dma_addr->a32.addr_lo = (__be32) pa; |
119 | dma_addr->a32.addr_hi = (u32) (bfa_os_u32(pa)); | 123 | dma_addr->a32.addr_hi = (__be32) (pa >> 32); |
120 | } | 124 | } |
121 | 125 | ||
122 | 126 | ||
@@ -125,8 +129,8 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) | |||
125 | static inline void | 129 | static inline void |
126 | __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) | 130 | __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) |
127 | { | 131 | { |
128 | dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa); | 132 | dma_addr->a32.addr_lo = cpu_to_be32(pa); |
129 | dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa)); | 133 | dma_addr->a32.addr_hi = cpu_to_be32(pa >> 32); |
130 | } | 134 | } |
131 | 135 | ||
132 | struct bfa_ioc_regs_s { | 136 | struct bfa_ioc_regs_s { |
@@ -145,8 +149,11 @@ struct bfa_ioc_regs_s { | |||
145 | void __iomem *host_page_num_fn; | 149 | void __iomem *host_page_num_fn; |
146 | void __iomem *heartbeat; | 150 | void __iomem *heartbeat; |
147 | void __iomem *ioc_fwstate; | 151 | void __iomem *ioc_fwstate; |
152 | void __iomem *alt_ioc_fwstate; | ||
148 | void __iomem *ll_halt; | 153 | void __iomem *ll_halt; |
154 | void __iomem *alt_ll_halt; | ||
149 | void __iomem *err_set; | 155 | void __iomem *err_set; |
156 | void __iomem *ioc_fail_sync; | ||
150 | void __iomem *shirq_isr_next; | 157 | void __iomem *shirq_isr_next; |
151 | void __iomem *shirq_msk_next; | 158 | void __iomem *shirq_msk_next; |
152 | void __iomem *smem_page_start; | 159 | void __iomem *smem_page_start; |
@@ -254,8 +261,12 @@ struct bfa_ioc_hwif_s { | |||
254 | void (*ioc_map_port) (struct bfa_ioc_s *ioc); | 261 | void (*ioc_map_port) (struct bfa_ioc_s *ioc); |
255 | void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, | 262 | void (*ioc_isr_mode_set) (struct bfa_ioc_s *ioc, |
256 | bfa_boolean_t msix); | 263 | bfa_boolean_t msix); |
257 | void (*ioc_notify_hbfail) (struct bfa_ioc_s *ioc); | 264 | void (*ioc_notify_fail) (struct bfa_ioc_s *ioc); |
258 | void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); | 265 | void (*ioc_ownership_reset) (struct bfa_ioc_s *ioc); |
266 | void (*ioc_sync_join) (struct bfa_ioc_s *ioc); | ||
267 | void (*ioc_sync_leave) (struct bfa_ioc_s *ioc); | ||
268 | void (*ioc_sync_ack) (struct bfa_ioc_s *ioc); | ||
269 | bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc); | ||
259 | }; | 270 | }; |
260 | 271 | ||
261 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) | 272 | #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) |
@@ -325,7 +336,6 @@ void bfa_ioc_auto_recover(bfa_boolean_t auto_recover); | |||
325 | void bfa_ioc_detach(struct bfa_ioc_s *ioc); | 336 | void bfa_ioc_detach(struct bfa_ioc_s *ioc); |
326 | void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, | 337 | void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, |
327 | enum bfi_mclass mc); | 338 | enum bfi_mclass mc); |
328 | u32 bfa_ioc_meminfo(void); | ||
329 | void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); | 339 | void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa); |
330 | void bfa_ioc_enable(struct bfa_ioc_s *ioc); | 340 | void bfa_ioc_enable(struct bfa_ioc_s *ioc); |
331 | void bfa_ioc_disable(struct bfa_ioc_s *ioc); | 341 | void bfa_ioc_disable(struct bfa_ioc_s *ioc); |
@@ -340,6 +350,7 @@ bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc); | |||
340 | bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); | 350 | bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc); |
341 | bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); | 351 | bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc); |
342 | bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); | 352 | bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc); |
353 | void bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc); | ||
343 | enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); | 354 | enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc); |
344 | void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); | 355 | void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num); |
345 | void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); | 356 | void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver); |
@@ -353,24 +364,16 @@ enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc); | |||
353 | void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); | 364 | void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr); |
354 | void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, | 365 | void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, |
355 | struct bfa_adapter_attr_s *ad_attr); | 366 | struct bfa_adapter_attr_s *ad_attr); |
356 | int bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover); | ||
357 | void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); | 367 | void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave); |
358 | bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, | 368 | bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, |
359 | int *trclen); | 369 | int *trclen); |
360 | void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc); | ||
361 | bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, | 370 | bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, |
362 | int *trclen); | 371 | int *trclen); |
363 | bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, | 372 | bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, |
364 | u32 *offset, int *buflen); | 373 | u32 *offset, int *buflen); |
365 | u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr); | ||
366 | u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr); | ||
367 | void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); | 374 | void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); |
368 | bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); | 375 | bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); |
369 | void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, | ||
370 | struct bfa_ioc_hbfail_notify_s *notify); | ||
371 | bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); | 376 | bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); |
372 | void bfa_ioc_sem_release(void __iomem *sem_reg); | ||
373 | void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); | ||
374 | void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, | 377 | void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, |
375 | struct bfi_ioc_image_hdr_s *fwhdr); | 378 | struct bfi_ioc_image_hdr_s *fwhdr); |
376 | bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, | 379 | bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, |
@@ -381,13 +384,8 @@ bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc); | |||
381 | /* | 384 | /* |
382 | * bfa mfg wwn API functions | 385 | * bfa mfg wwn API functions |
383 | */ | 386 | */ |
384 | wwn_t bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc); | ||
385 | wwn_t bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc); | ||
386 | mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); | 387 | mac_t bfa_ioc_get_mac(struct bfa_ioc_s *ioc); |
387 | wwn_t bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc); | ||
388 | wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc); | ||
389 | mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); | 388 | mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc); |
390 | u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc); | ||
391 | 389 | ||
392 | /* | 390 | /* |
393 | * F/W Image Size & Chunk | 391 | * F/W Image Size & Chunk |
@@ -421,7 +419,7 @@ bfa_cb_image_get_chunk(int type, u32 off) | |||
421 | return bfi_image_ct_cna_get_chunk(off); break; | 419 | return bfi_image_ct_cna_get_chunk(off); break; |
422 | case BFI_IMAGE_CB_FC: | 420 | case BFI_IMAGE_CB_FC: |
423 | return bfi_image_cb_fc_get_chunk(off); break; | 421 | return bfi_image_cb_fc_get_chunk(off); break; |
424 | default: return 0; | 422 | default: return NULL; |
425 | } | 423 | } |
426 | } | 424 | } |
427 | 425 | ||
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index 909945043850..e4a0713185b6 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_ioc.h" | 19 | #include "bfa_ioc.h" |
19 | #include "bfi_cbreg.h" | 20 | #include "bfi_cbreg.h" |
20 | #include "bfa_defs.h" | 21 | #include "bfa_defs.h" |
@@ -29,10 +30,14 @@ static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc); | |||
29 | static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); | 30 | static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc); |
30 | static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); | 31 | static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc); |
31 | static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); | 32 | static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); |
32 | static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc); | 33 | static void bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc); |
33 | static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); | 34 | static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); |
35 | static void bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc); | ||
36 | static void bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc); | ||
37 | static void bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc); | ||
38 | static bfa_boolean_t bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc); | ||
34 | 39 | ||
35 | struct bfa_ioc_hwif_s hwif_cb; | 40 | static struct bfa_ioc_hwif_s hwif_cb; |
36 | 41 | ||
37 | /* | 42 | /* |
38 | * Called from bfa_ioc_attach() to map asic specific calls. | 43 | * Called from bfa_ioc_attach() to map asic specific calls. |
@@ -46,8 +51,12 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) | |||
46 | hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init; | 51 | hwif_cb.ioc_reg_init = bfa_ioc_cb_reg_init; |
47 | hwif_cb.ioc_map_port = bfa_ioc_cb_map_port; | 52 | hwif_cb.ioc_map_port = bfa_ioc_cb_map_port; |
48 | hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; | 53 | hwif_cb.ioc_isr_mode_set = bfa_ioc_cb_isr_mode_set; |
49 | hwif_cb.ioc_notify_hbfail = bfa_ioc_cb_notify_hbfail; | 54 | hwif_cb.ioc_notify_fail = bfa_ioc_cb_notify_fail; |
50 | hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; | 55 | hwif_cb.ioc_ownership_reset = bfa_ioc_cb_ownership_reset; |
56 | hwif_cb.ioc_sync_join = bfa_ioc_cb_sync_join; | ||
57 | hwif_cb.ioc_sync_leave = bfa_ioc_cb_sync_leave; | ||
58 | hwif_cb.ioc_sync_ack = bfa_ioc_cb_sync_ack; | ||
59 | hwif_cb.ioc_sync_complete = bfa_ioc_cb_sync_complete; | ||
51 | 60 | ||
52 | ioc->ioc_hwif = &hwif_cb; | 61 | ioc->ioc_hwif = &hwif_cb; |
53 | } | 62 | } |
@@ -58,6 +67,21 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) | |||
58 | static bfa_boolean_t | 67 | static bfa_boolean_t |
59 | bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) | 68 | bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc) |
60 | { | 69 | { |
70 | struct bfi_ioc_image_hdr_s fwhdr; | ||
71 | uint32_t fwstate = readl(ioc->ioc_regs.ioc_fwstate); | ||
72 | |||
73 | if (fwstate == BFI_IOC_UNINIT) | ||
74 | return BFA_TRUE; | ||
75 | |||
76 | bfa_ioc_fwver_get(ioc, &fwhdr); | ||
77 | |||
78 | if (swab32(fwhdr.exec) == BFI_BOOT_TYPE_NORMAL) | ||
79 | return BFA_TRUE; | ||
80 | |||
81 | bfa_trc(ioc, fwstate); | ||
82 | bfa_trc(ioc, fwhdr.exec); | ||
83 | writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); | ||
84 | |||
61 | return BFA_TRUE; | 85 | return BFA_TRUE; |
62 | } | 86 | } |
63 | 87 | ||
@@ -70,7 +94,7 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) | |||
70 | * Notify other functions on HB failure. | 94 | * Notify other functions on HB failure. |
71 | */ | 95 | */ |
72 | static void | 96 | static void |
73 | bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) | 97 | bfa_ioc_cb_notify_fail(struct bfa_ioc_s *ioc) |
74 | { | 98 | { |
75 | writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); | 99 | writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); |
76 | readl(ioc->ioc_regs.err_set); | 100 | readl(ioc->ioc_regs.err_set); |
@@ -108,9 +132,11 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) | |||
108 | if (ioc->port_id == 0) { | 132 | if (ioc->port_id == 0) { |
109 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; | 133 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; |
110 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; | 134 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; |
135 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; | ||
111 | } else { | 136 | } else { |
112 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); | 137 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); |
113 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); | 138 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); |
139 | ioc->ioc_regs.alt_ioc_fwstate = (rb + BFA_IOC0_STATE_REG); | ||
114 | } | 140 | } |
115 | 141 | ||
116 | /* | 142 | /* |
@@ -181,10 +207,71 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) | |||
181 | * will lock it instead of clearing it. | 207 | * will lock it instead of clearing it. |
182 | */ | 208 | */ |
183 | readl(ioc->ioc_regs.ioc_sem_reg); | 209 | readl(ioc->ioc_regs.ioc_sem_reg); |
184 | bfa_ioc_hw_sem_release(ioc); | 210 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
185 | } | 211 | } |
186 | 212 | ||
213 | /* | ||
214 | * Synchronized IOC failure processing routines | ||
215 | */ | ||
216 | static void | ||
217 | bfa_ioc_cb_sync_join(struct bfa_ioc_s *ioc) | ||
218 | { | ||
219 | } | ||
187 | 220 | ||
221 | static void | ||
222 | bfa_ioc_cb_sync_leave(struct bfa_ioc_s *ioc) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | static void | ||
227 | bfa_ioc_cb_sync_ack(struct bfa_ioc_s *ioc) | ||
228 | { | ||
229 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | ||
230 | } | ||
231 | |||
232 | static bfa_boolean_t | ||
233 | bfa_ioc_cb_sync_complete(struct bfa_ioc_s *ioc) | ||
234 | { | ||
235 | uint32_t fwstate, alt_fwstate; | ||
236 | fwstate = readl(ioc->ioc_regs.ioc_fwstate); | ||
237 | |||
238 | /* | ||
239 | * At this point, this IOC is hoding the hw sem in the | ||
240 | * start path (fwcheck) OR in the disable/enable path | ||
241 | * OR to check if the other IOC has acknowledged failure. | ||
242 | * | ||
243 | * So, this IOC can be in UNINIT, INITING, DISABLED, FAIL | ||
244 | * or in MEMTEST states. In a normal scenario, this IOC | ||
245 | * can not be in OP state when this function is called. | ||
246 | * | ||
247 | * However, this IOC could still be in OP state when | ||
248 | * the OS driver is starting up, if the OptROM code has | ||
249 | * left it in that state. | ||
250 | * | ||
251 | * If we had marked this IOC's fwstate as BFI_IOC_FAIL | ||
252 | * in the failure case and now, if the fwstate is not | ||
253 | * BFI_IOC_FAIL it implies that the other PCI fn have | ||
254 | * reinitialized the ASIC or this IOC got disabled, so | ||
255 | * return TRUE. | ||
256 | */ | ||
257 | if (fwstate == BFI_IOC_UNINIT || | ||
258 | fwstate == BFI_IOC_INITING || | ||
259 | fwstate == BFI_IOC_DISABLED || | ||
260 | fwstate == BFI_IOC_MEMTEST || | ||
261 | fwstate == BFI_IOC_OP) | ||
262 | return BFA_TRUE; | ||
263 | else { | ||
264 | alt_fwstate = readl(ioc->ioc_regs.alt_ioc_fwstate); | ||
265 | if (alt_fwstate == BFI_IOC_FAIL || | ||
266 | alt_fwstate == BFI_IOC_DISABLED || | ||
267 | alt_fwstate == BFI_IOC_UNINIT || | ||
268 | alt_fwstate == BFI_IOC_INITING || | ||
269 | alt_fwstate == BFI_IOC_MEMTEST) | ||
270 | return BFA_TRUE; | ||
271 | else | ||
272 | return BFA_FALSE; | ||
273 | } | ||
274 | } | ||
188 | 275 | ||
189 | bfa_status_t | 276 | bfa_status_t |
190 | bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) | 277 | bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) |
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 115730c0aa77..008d129ddfcd 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c | |||
@@ -15,12 +15,22 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_ioc.h" | 19 | #include "bfa_ioc.h" |
19 | #include "bfi_ctreg.h" | 20 | #include "bfi_ctreg.h" |
20 | #include "bfa_defs.h" | 21 | #include "bfa_defs.h" |
21 | 22 | ||
22 | BFA_TRC_FILE(CNA, IOC_CT); | 23 | BFA_TRC_FILE(CNA, IOC_CT); |
23 | 24 | ||
25 | #define bfa_ioc_ct_sync_pos(__ioc) \ | ||
26 | ((uint32_t) (1 << bfa_ioc_pcifn(__ioc))) | ||
27 | #define BFA_IOC_SYNC_REQD_SH 16 | ||
28 | #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) | ||
29 | #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) | ||
30 | #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) | ||
31 | #define bfa_ioc_ct_sync_reqd_pos(__ioc) \ | ||
32 | (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) | ||
33 | |||
24 | /* | 34 | /* |
25 | * forward declarations | 35 | * forward declarations |
26 | */ | 36 | */ |
@@ -29,10 +39,14 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc); | |||
29 | static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); | 39 | static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc); |
30 | static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); | 40 | static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc); |
31 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); | 41 | static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix); |
32 | static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc); | 42 | static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc); |
33 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); | 43 | static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); |
44 | static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc); | ||
45 | static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc); | ||
46 | static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc); | ||
47 | static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc); | ||
34 | 48 | ||
35 | struct bfa_ioc_hwif_s hwif_ct; | 49 | static struct bfa_ioc_hwif_s hwif_ct; |
36 | 50 | ||
37 | /* | 51 | /* |
38 | * Called from bfa_ioc_attach() to map asic specific calls. | 52 | * Called from bfa_ioc_attach() to map asic specific calls. |
@@ -46,8 +60,12 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) | |||
46 | hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; | 60 | hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; |
47 | hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; | 61 | hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; |
48 | hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; | 62 | hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; |
49 | hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; | 63 | hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; |
50 | hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; | 64 | hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; |
65 | hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; | ||
66 | hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; | ||
67 | hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; | ||
68 | hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete; | ||
51 | 69 | ||
52 | ioc->ioc_hwif = &hwif_ct; | 70 | ioc->ioc_hwif = &hwif_ct; |
53 | } | 71 | } |
@@ -83,7 +101,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) | |||
83 | */ | 101 | */ |
84 | if (usecnt == 0) { | 102 | if (usecnt == 0) { |
85 | writel(1, ioc->ioc_regs.ioc_usage_reg); | 103 | writel(1, ioc->ioc_regs.ioc_usage_reg); |
86 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); | 104 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); |
105 | writel(0, ioc->ioc_regs.ioc_fail_sync); | ||
87 | bfa_trc(ioc, usecnt); | 106 | bfa_trc(ioc, usecnt); |
88 | return BFA_TRUE; | 107 | return BFA_TRUE; |
89 | } | 108 | } |
@@ -94,14 +113,14 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) | |||
94 | /* | 113 | /* |
95 | * Use count cannot be non-zero and chip in uninitialized state. | 114 | * Use count cannot be non-zero and chip in uninitialized state. |
96 | */ | 115 | */ |
97 | bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); | 116 | WARN_ON(ioc_fwstate == BFI_IOC_UNINIT); |
98 | 117 | ||
99 | /* | 118 | /* |
100 | * Check if another driver with a different firmware is active | 119 | * Check if another driver with a different firmware is active |
101 | */ | 120 | */ |
102 | bfa_ioc_fwver_get(ioc, &fwhdr); | 121 | bfa_ioc_fwver_get(ioc, &fwhdr); |
103 | if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { | 122 | if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { |
104 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); | 123 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); |
105 | bfa_trc(ioc, usecnt); | 124 | bfa_trc(ioc, usecnt); |
106 | return BFA_FALSE; | 125 | return BFA_FALSE; |
107 | } | 126 | } |
@@ -111,7 +130,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) | |||
111 | */ | 130 | */ |
112 | usecnt++; | 131 | usecnt++; |
113 | writel(usecnt, ioc->ioc_regs.ioc_usage_reg); | 132 | writel(usecnt, ioc->ioc_regs.ioc_usage_reg); |
114 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); | 133 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); |
115 | bfa_trc(ioc, usecnt); | 134 | bfa_trc(ioc, usecnt); |
116 | return BFA_TRUE; | 135 | return BFA_TRUE; |
117 | } | 136 | } |
@@ -139,25 +158,27 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) | |||
139 | */ | 158 | */ |
140 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); | 159 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); |
141 | usecnt = readl(ioc->ioc_regs.ioc_usage_reg); | 160 | usecnt = readl(ioc->ioc_regs.ioc_usage_reg); |
142 | bfa_assert(usecnt > 0); | 161 | WARN_ON(usecnt <= 0); |
143 | 162 | ||
144 | usecnt--; | 163 | usecnt--; |
145 | writel(usecnt, ioc->ioc_regs.ioc_usage_reg); | 164 | writel(usecnt, ioc->ioc_regs.ioc_usage_reg); |
146 | bfa_trc(ioc, usecnt); | 165 | bfa_trc(ioc, usecnt); |
147 | 166 | ||
148 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); | 167 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); |
149 | } | 168 | } |
150 | 169 | ||
151 | /* | 170 | /* |
152 | * Notify other functions on HB failure. | 171 | * Notify other functions on HB failure. |
153 | */ | 172 | */ |
154 | static void | 173 | static void |
155 | bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) | 174 | bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc) |
156 | { | 175 | { |
157 | if (ioc->cna) { | 176 | if (ioc->cna) { |
158 | writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); | 177 | writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); |
178 | writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); | ||
159 | /* Wait for halt to take effect */ | 179 | /* Wait for halt to take effect */ |
160 | readl(ioc->ioc_regs.ll_halt); | 180 | readl(ioc->ioc_regs.ll_halt); |
181 | readl(ioc->ioc_regs.alt_ll_halt); | ||
161 | } else { | 182 | } else { |
162 | writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); | 183 | writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); |
163 | readl(ioc->ioc_regs.err_set); | 184 | readl(ioc->ioc_regs.err_set); |
@@ -209,15 +230,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) | |||
209 | if (ioc->port_id == 0) { | 230 | if (ioc->port_id == 0) { |
210 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; | 231 | ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; |
211 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; | 232 | ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; |
233 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; | ||
212 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; | 234 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; |
213 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; | 235 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; |
214 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; | 236 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; |
237 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; | ||
215 | } else { | 238 | } else { |
216 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); | 239 | ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); |
217 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); | 240 | ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); |
241 | ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; | ||
218 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; | 242 | ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; |
219 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; | 243 | ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; |
220 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; | 244 | ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; |
245 | ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; | ||
221 | } | 246 | } |
222 | 247 | ||
223 | /* | 248 | /* |
@@ -235,6 +260,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) | |||
235 | ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); | 260 | ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); |
236 | ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); | 261 | ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); |
237 | ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); | 262 | ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); |
263 | ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); | ||
238 | 264 | ||
239 | /* | 265 | /* |
240 | * sram memory access | 266 | * sram memory access |
@@ -313,7 +339,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) | |||
313 | if (ioc->cna) { | 339 | if (ioc->cna) { |
314 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); | 340 | bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); |
315 | writel(0, ioc->ioc_regs.ioc_usage_reg); | 341 | writel(0, ioc->ioc_regs.ioc_usage_reg); |
316 | bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); | 342 | writel(1, ioc->ioc_regs.ioc_usage_sem_reg); |
317 | } | 343 | } |
318 | 344 | ||
319 | /* | 345 | /* |
@@ -322,10 +348,80 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) | |||
322 | * will lock it instead of clearing it. | 348 | * will lock it instead of clearing it. |
323 | */ | 349 | */ |
324 | readl(ioc->ioc_regs.ioc_sem_reg); | 350 | readl(ioc->ioc_regs.ioc_sem_reg); |
325 | bfa_ioc_hw_sem_release(ioc); | 351 | writel(1, ioc->ioc_regs.ioc_sem_reg); |
352 | } | ||
353 | |||
354 | /* | ||
355 | * Synchronized IOC failure processing routines | ||
356 | */ | ||
357 | static void | ||
358 | bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc) | ||
359 | { | ||
360 | uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
361 | uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); | ||
362 | |||
363 | writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); | ||
364 | } | ||
365 | |||
366 | static void | ||
367 | bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc) | ||
368 | { | ||
369 | uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
370 | uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | | ||
371 | bfa_ioc_ct_sync_pos(ioc); | ||
372 | |||
373 | writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); | ||
374 | } | ||
375 | |||
376 | static void | ||
377 | bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc) | ||
378 | { | ||
379 | uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
380 | |||
381 | writel((r32 | bfa_ioc_ct_sync_pos(ioc)), | ||
382 | ioc->ioc_regs.ioc_fail_sync); | ||
326 | } | 383 | } |
327 | 384 | ||
385 | static bfa_boolean_t | ||
386 | bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc) | ||
387 | { | ||
388 | uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync); | ||
389 | uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); | ||
390 | uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); | ||
391 | uint32_t tmp_ackd; | ||
392 | |||
393 | if (sync_ackd == 0) | ||
394 | return BFA_TRUE; | ||
395 | |||
396 | /* | ||
397 | * The check below is to see whether any other PCI fn | ||
398 | * has reinitialized the ASIC (reset sync_ackd bits) | ||
399 | * and failed again while this IOC was waiting for hw | ||
400 | * semaphore (in bfa_iocpf_sm_semwait()). | ||
401 | */ | ||
402 | tmp_ackd = sync_ackd; | ||
403 | if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && | ||
404 | !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) | ||
405 | sync_ackd |= bfa_ioc_ct_sync_pos(ioc); | ||
406 | |||
407 | if (sync_reqd == sync_ackd) { | ||
408 | writel(bfa_ioc_ct_clear_sync_ackd(r32), | ||
409 | ioc->ioc_regs.ioc_fail_sync); | ||
410 | writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); | ||
411 | writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); | ||
412 | return BFA_TRUE; | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * If another PCI fn reinitialized and failed again while | ||
417 | * this IOC was waiting for hw sem, the sync_ackd bit for | ||
418 | * this IOC need to be set again to allow reinitialization. | ||
419 | */ | ||
420 | if (tmp_ackd != sync_ackd) | ||
421 | writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); | ||
328 | 422 | ||
423 | return BFA_FALSE; | ||
424 | } | ||
329 | 425 | ||
330 | /* | 426 | /* |
331 | * Check the firmware state to know if pll_init has been completed already | 427 | * Check the firmware state to know if pll_init has been completed already |
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 15407ab39e77..ab79ff6fdeea 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h | |||
@@ -99,7 +99,6 @@ struct bfa_module_s { | |||
99 | void (*iocdisable) (struct bfa_s *bfa); | 99 | void (*iocdisable) (struct bfa_s *bfa); |
100 | }; | 100 | }; |
101 | 101 | ||
102 | extern struct bfa_module_s *hal_mods[]; | ||
103 | 102 | ||
104 | struct bfa_s { | 103 | struct bfa_s { |
105 | void *bfad; /* BFA driver instance */ | 104 | void *bfad; /* BFA driver instance */ |
@@ -116,8 +115,6 @@ struct bfa_s { | |||
116 | struct bfa_msix_s msix; | 115 | struct bfa_msix_s msix; |
117 | }; | 116 | }; |
118 | 117 | ||
119 | extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX]; | ||
120 | extern bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[]; | ||
121 | extern bfa_boolean_t bfa_auto_recover; | 118 | extern bfa_boolean_t bfa_auto_recover; |
122 | extern struct bfa_module_s hal_mod_sgpg; | 119 | extern struct bfa_module_s hal_mod_sgpg; |
123 | extern struct bfa_module_s hal_mod_fcport; | 120 | extern struct bfa_module_s hal_mod_fcport; |
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h deleted file mode 100644 index 65df62ef437f..000000000000 --- a/drivers/scsi/bfa/bfa_os_inc.h +++ /dev/null | |||
@@ -1,143 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. | ||
3 | * All rights reserved | ||
4 | * www.brocade.com | ||
5 | * | ||
6 | * Linux driver for Brocade Fibre Channel Host Bus Adapter. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License (GPL) Version 2 as | ||
10 | * published by the Free Software Foundation | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef __BFA_OS_INC_H__ | ||
19 | #define __BFA_OS_INC_H__ | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <linux/version.h> | ||
23 | #include <linux/pci.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/idr.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/cdev.h> | ||
28 | #include <linux/fs.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/vmalloc.h> | ||
31 | #include <linux/workqueue.h> | ||
32 | #include <linux/bitops.h> | ||
33 | #include <scsi/scsi.h> | ||
34 | #include <scsi/scsi_host.h> | ||
35 | #include <scsi/scsi_tcq.h> | ||
36 | #include <scsi/scsi_transport_fc.h> | ||
37 | #include <scsi/scsi_transport.h> | ||
38 | |||
39 | #ifdef __BIG_ENDIAN | ||
40 | #define __BIGENDIAN | ||
41 | #endif | ||
42 | |||
43 | static inline u64 bfa_os_get_log_time(void) | ||
44 | { | ||
45 | u64 system_time = 0; | ||
46 | struct timeval tv; | ||
47 | do_gettimeofday(&tv); | ||
48 | |||
49 | /* We are interested in seconds only. */ | ||
50 | system_time = tv.tv_sec; | ||
51 | return system_time; | ||
52 | } | ||
53 | |||
54 | #define bfa_io_lat_clock_res_div HZ | ||
55 | #define bfa_io_lat_clock_res_mul 1000 | ||
56 | |||
57 | #define BFA_LOG(level, bfad, mask, fmt, arg...) \ | ||
58 | do { \ | ||
59 | if (((mask) == 4) || (level[1] <= '4')) \ | ||
60 | dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \ | ||
61 | } while (0) | ||
62 | |||
63 | #define bfa_swap_3b(_x) \ | ||
64 | ((((_x) & 0xff) << 16) | \ | ||
65 | ((_x) & 0x00ff00) | \ | ||
66 | (((_x) & 0xff0000) >> 16)) | ||
67 | |||
68 | #define bfa_os_swap_sgaddr(_x) ((u64)( \ | ||
69 | (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ | ||
70 | (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ | ||
71 | (((u64)(_x) & (u64)0x0000000000ff0000ull) << 32) | \ | ||
72 | (((u64)(_x) & (u64)0x00000000ff000000ull) << 32) | \ | ||
73 | (((u64)(_x) & (u64)0x000000ff00000000ull) >> 32) | \ | ||
74 | (((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32) | \ | ||
75 | (((u64)(_x) & (u64)0x00ff000000000000ull) >> 32) | \ | ||
76 | (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) | ||
77 | |||
78 | #ifndef __BIGENDIAN | ||
79 | #define bfa_os_hton3b(_x) bfa_swap_3b(_x) | ||
80 | #define bfa_os_sgaddr(_x) (_x) | ||
81 | #else | ||
82 | #define bfa_os_hton3b(_x) (_x) | ||
83 | #define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x) | ||
84 | #endif | ||
85 | |||
86 | #define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x) | ||
87 | #define bfa_os_u32(__pa64) ((__pa64) >> 32) | ||
88 | |||
89 | #define BFA_TRC_TS(_trcm) \ | ||
90 | ({ \ | ||
91 | struct timeval tv; \ | ||
92 | \ | ||
93 | do_gettimeofday(&tv); \ | ||
94 | (tv.tv_sec*1000000+tv.tv_usec); \ | ||
95 | }) | ||
96 | |||
97 | #define boolean_t int | ||
98 | |||
99 | /* | ||
100 | * For current time stamp, OS API will fill-in | ||
101 | */ | ||
102 | struct bfa_timeval_s { | ||
103 | u32 tv_sec; /* seconds */ | ||
104 | u32 tv_usec; /* microseconds */ | ||
105 | }; | ||
106 | |||
107 | static inline void | ||
108 | bfa_os_gettimeofday(struct bfa_timeval_s *tv) | ||
109 | { | ||
110 | struct timeval tmp_tv; | ||
111 | |||
112 | do_gettimeofday(&tmp_tv); | ||
113 | tv->tv_sec = (u32) tmp_tv.tv_sec; | ||
114 | tv->tv_usec = (u32) tmp_tv.tv_usec; | ||
115 | } | ||
116 | |||
117 | static inline void | ||
118 | wwn2str(char *wwn_str, u64 wwn) | ||
119 | { | ||
120 | union { | ||
121 | u64 wwn; | ||
122 | u8 byte[8]; | ||
123 | } w; | ||
124 | |||
125 | w.wwn = wwn; | ||
126 | sprintf(wwn_str, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", w.byte[0], | ||
127 | w.byte[1], w.byte[2], w.byte[3], w.byte[4], w.byte[5], | ||
128 | w.byte[6], w.byte[7]); | ||
129 | } | ||
130 | |||
131 | static inline void | ||
132 | fcid2str(char *fcid_str, u32 fcid) | ||
133 | { | ||
134 | union { | ||
135 | u32 fcid; | ||
136 | u8 byte[4]; | ||
137 | } f; | ||
138 | |||
139 | f.fcid = fcid; | ||
140 | sprintf(fcid_str, "%02x:%02x:%02x", f.byte[1], f.byte[2], f.byte[3]); | ||
141 | } | ||
142 | |||
143 | #endif /* __BFA_OS_INC_H__ */ | ||
diff --git a/drivers/scsi/bfa/bfa_plog.h b/drivers/scsi/bfa/bfa_plog.h index 501f0ed35cf0..1c9baa68339b 100644 --- a/drivers/scsi/bfa/bfa_plog.h +++ b/drivers/scsi/bfa/bfa_plog.h | |||
@@ -151,9 +151,5 @@ void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, | |||
151 | void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, | 151 | void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, |
152 | enum bfa_plog_eid event, u16 misc, | 152 | enum bfa_plog_eid event, u16 misc, |
153 | struct fchs_s *fchdr, u32 pld_w0); | 153 | struct fchs_s *fchdr, u32 pld_w0); |
154 | void bfa_plog_clear(struct bfa_plog_s *plog); | ||
155 | void bfa_plog_enable(struct bfa_plog_s *plog); | ||
156 | void bfa_plog_disable(struct bfa_plog_s *plog); | ||
157 | bfa_boolean_t bfa_plog_get_setting(struct bfa_plog_s *plog); | ||
158 | 154 | ||
159 | #endif /* __BFA_PORTLOG_H__ */ | 155 | #endif /* __BFA_PORTLOG_H__ */ |
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index fff96226a383..3f8e9d6066ec 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfad_drv.h" | ||
18 | #include "bfa_defs_svc.h" | 19 | #include "bfa_defs_svc.h" |
19 | #include "bfa_port.h" | 20 | #include "bfa_port.h" |
20 | #include "bfi.h" | 21 | #include "bfi.h" |
@@ -29,14 +30,14 @@ static void | |||
29 | bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) | 30 | bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) |
30 | { | 31 | { |
31 | u32 *dip = (u32 *) stats; | 32 | u32 *dip = (u32 *) stats; |
32 | u32 t0, t1; | 33 | __be32 t0, t1; |
33 | int i; | 34 | int i; |
34 | 35 | ||
35 | for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32); | 36 | for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32); |
36 | i += 2) { | 37 | i += 2) { |
37 | t0 = dip[i]; | 38 | t0 = dip[i]; |
38 | t1 = dip[i + 1]; | 39 | t1 = dip[i + 1]; |
39 | #ifdef __BIGENDIAN | 40 | #ifdef __BIG_ENDIAN |
40 | dip[i] = be32_to_cpu(t0); | 41 | dip[i] = be32_to_cpu(t0); |
41 | dip[i + 1] = be32_to_cpu(t1); | 42 | dip[i + 1] = be32_to_cpu(t1); |
42 | #else | 43 | #else |
@@ -96,13 +97,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status) | |||
96 | port->stats_busy = BFA_FALSE; | 97 | port->stats_busy = BFA_FALSE; |
97 | 98 | ||
98 | if (status == BFA_STATUS_OK) { | 99 | if (status == BFA_STATUS_OK) { |
99 | struct bfa_timeval_s tv; | 100 | struct timeval tv; |
100 | 101 | ||
101 | memcpy(port->stats, port->stats_dma.kva, | 102 | memcpy(port->stats, port->stats_dma.kva, |
102 | sizeof(union bfa_port_stats_u)); | 103 | sizeof(union bfa_port_stats_u)); |
103 | bfa_port_stats_swap(port, port->stats); | 104 | bfa_port_stats_swap(port, port->stats); |
104 | 105 | ||
105 | bfa_os_gettimeofday(&tv); | 106 | do_gettimeofday(&tv); |
106 | port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time; | 107 | port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time; |
107 | } | 108 | } |
108 | 109 | ||
@@ -124,7 +125,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status) | |||
124 | static void | 125 | static void |
125 | bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) | 126 | bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) |
126 | { | 127 | { |
127 | struct bfa_timeval_s tv; | 128 | struct timeval tv; |
128 | 129 | ||
129 | port->stats_status = status; | 130 | port->stats_status = status; |
130 | port->stats_busy = BFA_FALSE; | 131 | port->stats_busy = BFA_FALSE; |
@@ -132,7 +133,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) | |||
132 | /* | 133 | /* |
133 | * re-initialize time stamp for stats reset | 134 | * re-initialize time stamp for stats reset |
134 | */ | 135 | */ |
135 | bfa_os_gettimeofday(&tv); | 136 | do_gettimeofday(&tv); |
136 | port->stats_reset_time = tv.tv_sec; | 137 | port->stats_reset_time = tv.tv_sec; |
137 | 138 | ||
138 | if (port->stats_cbfn) { | 139 | if (port->stats_cbfn) { |
@@ -185,7 +186,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) | |||
185 | break; | 186 | break; |
186 | 187 | ||
187 | default: | 188 | default: |
188 | bfa_assert(0); | 189 | WARN_ON(1); |
189 | } | 190 | } |
190 | } | 191 | } |
191 | 192 | ||
@@ -432,9 +433,9 @@ void | |||
432 | bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, | 433 | bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, |
433 | void *dev, struct bfa_trc_mod_s *trcmod) | 434 | void *dev, struct bfa_trc_mod_s *trcmod) |
434 | { | 435 | { |
435 | struct bfa_timeval_s tv; | 436 | struct timeval tv; |
436 | 437 | ||
437 | bfa_assert(port); | 438 | WARN_ON(!port); |
438 | 439 | ||
439 | port->dev = dev; | 440 | port->dev = dev; |
440 | port->ioc = ioc; | 441 | port->ioc = ioc; |
@@ -447,27 +448,13 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, | |||
447 | 448 | ||
448 | bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); | 449 | bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port); |
449 | bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); | 450 | bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); |
450 | bfa_ioc_hbfail_register(port->ioc, &port->hbfail); | 451 | list_add_tail(&port->hbfail.qe, &port->ioc->hb_notify_q); |
451 | 452 | ||
452 | /* | 453 | /* |
453 | * initialize time stamp for stats reset | 454 | * initialize time stamp for stats reset |
454 | */ | 455 | */ |
455 | bfa_os_gettimeofday(&tv); | 456 | do_gettimeofday(&tv); |
456 | port->stats_reset_time = tv.tv_sec; | 457 | port->stats_reset_time = tv.tv_sec; |
457 | 458 | ||
458 | bfa_trc(port, 0); | 459 | bfa_trc(port, 0); |
459 | } | 460 | } |
460 | |||
461 | /* | ||
462 | * bfa_port_detach() | ||
463 | * | ||
464 | * | ||
465 | * @param[in] port - Pointer to the Port module data structure | ||
466 | * | ||
467 | * @return void | ||
468 | */ | ||
469 | void | ||
470 | bfa_port_detach(struct bfa_port_s *port) | ||
471 | { | ||
472 | bfa_trc(port, 0); | ||
473 | } | ||
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h index dbce9dfd056b..c4ee9db6b470 100644 --- a/drivers/scsi/bfa/bfa_port.h +++ b/drivers/scsi/bfa/bfa_port.h | |||
@@ -48,7 +48,6 @@ struct bfa_port_s { | |||
48 | 48 | ||
49 | void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, | 49 | void bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, |
50 | void *dev, struct bfa_trc_mod_s *trcmod); | 50 | void *dev, struct bfa_trc_mod_s *trcmod); |
51 | void bfa_port_detach(struct bfa_port_s *port); | ||
52 | void bfa_port_hbfail(void *arg); | 51 | void bfa_port_hbfail(void *arg); |
53 | 52 | ||
54 | bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, | 53 | bfa_status_t bfa_port_get_stats(struct bfa_port_s *port, |
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 37e16ac8f249..1d34921f88bf 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c | |||
@@ -15,11 +15,10 @@ | |||
15 | * General Public License for more details. | 15 | * General Public License for more details. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "bfa_os_inc.h" | 18 | #include "bfad_drv.h" |
19 | #include "bfa_plog.h" | 19 | #include "bfa_plog.h" |
20 | #include "bfa_cs.h" | 20 | #include "bfa_cs.h" |
21 | #include "bfa_modules.h" | 21 | #include "bfa_modules.h" |
22 | #include "bfad_drv.h" | ||
23 | 22 | ||
24 | BFA_TRC_FILE(HAL, FCXP); | 23 | BFA_TRC_FILE(HAL, FCXP); |
25 | BFA_MODULE(fcxp); | 24 | BFA_MODULE(fcxp); |
@@ -41,19 +40,6 @@ BFA_MODULE(uf); | |||
41 | #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 | 40 | #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 |
42 | #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 | 41 | #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 |
43 | 42 | ||
44 | /* | ||
45 | * lps_pvt BFA LPS private functions | ||
46 | */ | ||
47 | |||
48 | enum bfa_lps_event { | ||
49 | BFA_LPS_SM_LOGIN = 1, /* login request from user */ | ||
50 | BFA_LPS_SM_LOGOUT = 2, /* logout request from user */ | ||
51 | BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */ | ||
52 | BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */ | ||
53 | BFA_LPS_SM_DELETE = 5, /* lps delete from user */ | ||
54 | BFA_LPS_SM_OFFLINE = 6, /* Link is offline */ | ||
55 | BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ | ||
56 | }; | ||
57 | 43 | ||
58 | /* | 44 | /* |
59 | * FC PORT related definitions | 45 | * FC PORT related definitions |
@@ -66,7 +52,6 @@ enum bfa_lps_event { | |||
66 | ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ | 52 | ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \ |
67 | (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) | 53 | (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) |
68 | 54 | ||
69 | |||
70 | /* | 55 | /* |
71 | * BFA port state machine events | 56 | * BFA port state machine events |
72 | */ | 57 | */ |
@@ -113,19 +98,6 @@ enum bfa_fcport_ln_sm_event { | |||
113 | } \ | 98 | } \ |
114 | } while (0) | 99 | } while (0) |
115 | 100 | ||
116 | |||
117 | enum bfa_rport_event { | ||
118 | BFA_RPORT_SM_CREATE = 1, /* rport create event */ | ||
119 | BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */ | ||
120 | BFA_RPORT_SM_ONLINE = 3, /* rport is online */ | ||
121 | BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */ | ||
122 | BFA_RPORT_SM_FWRSP = 5, /* firmware response */ | ||
123 | BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */ | ||
124 | BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */ | ||
125 | BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */ | ||
126 | BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ | ||
127 | }; | ||
128 | |||
129 | /* | 101 | /* |
130 | * forward declarations FCXP related functions | 102 | * forward declarations FCXP related functions |
131 | */ | 103 | */ |
@@ -159,6 +131,7 @@ static void bfa_lps_reqq_resume(void *lps_arg); | |||
159 | static void bfa_lps_free(struct bfa_lps_s *lps); | 131 | static void bfa_lps_free(struct bfa_lps_s *lps); |
160 | static void bfa_lps_send_login(struct bfa_lps_s *lps); | 132 | static void bfa_lps_send_login(struct bfa_lps_s *lps); |
161 | static void bfa_lps_send_logout(struct bfa_lps_s *lps); | 133 | static void bfa_lps_send_logout(struct bfa_lps_s *lps); |
134 | static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps); | ||
162 | static void bfa_lps_login_comp(struct bfa_lps_s *lps); | 135 | static void bfa_lps_login_comp(struct bfa_lps_s *lps); |
163 | static void bfa_lps_logout_comp(struct bfa_lps_s *lps); | 136 | static void bfa_lps_logout_comp(struct bfa_lps_s *lps); |
164 | static void bfa_lps_cvl_event(struct bfa_lps_s *lps); | 137 | static void bfa_lps_cvl_event(struct bfa_lps_s *lps); |
@@ -171,6 +144,8 @@ static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event); | |||
171 | static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event | 144 | static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event |
172 | event); | 145 | event); |
173 | static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); | 146 | static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event); |
147 | static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, | ||
148 | enum bfa_lps_event event); | ||
174 | static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); | 149 | static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); |
175 | static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event | 150 | static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event |
176 | event); | 151 | event); |
@@ -312,6 +287,18 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec) | |||
312 | return 0; | 287 | return 0; |
313 | } | 288 | } |
314 | 289 | ||
290 | static u64 | ||
291 | bfa_get_log_time(void) | ||
292 | { | ||
293 | u64 system_time = 0; | ||
294 | struct timeval tv; | ||
295 | do_gettimeofday(&tv); | ||
296 | |||
297 | /* We are interested in seconds only. */ | ||
298 | system_time = tv.tv_sec; | ||
299 | return system_time; | ||
300 | } | ||
301 | |||
315 | static void | 302 | static void |
316 | bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) | 303 | bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) |
317 | { | 304 | { |
@@ -322,7 +309,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) | |||
322 | return; | 309 | return; |
323 | 310 | ||
324 | if (plkd_validate_logrec(pl_rec)) { | 311 | if (plkd_validate_logrec(pl_rec)) { |
325 | bfa_assert(0); | 312 | WARN_ON(1); |
326 | return; | 313 | return; |
327 | } | 314 | } |
328 | 315 | ||
@@ -332,7 +319,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) | |||
332 | 319 | ||
333 | memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); | 320 | memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); |
334 | 321 | ||
335 | pl_recp->tv = bfa_os_get_log_time(); | 322 | pl_recp->tv = bfa_get_log_time(); |
336 | BFA_PL_LOG_REC_INCR(plog->tail); | 323 | BFA_PL_LOG_REC_INCR(plog->tail); |
337 | 324 | ||
338 | if (plog->head == plog->tail) | 325 | if (plog->head == plog->tail) |
@@ -437,29 +424,6 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, | |||
437 | } | 424 | } |
438 | } | 425 | } |
439 | 426 | ||
440 | void | ||
441 | bfa_plog_clear(struct bfa_plog_s *plog) | ||
442 | { | ||
443 | plog->head = plog->tail = 0; | ||
444 | } | ||
445 | |||
446 | void | ||
447 | bfa_plog_enable(struct bfa_plog_s *plog) | ||
448 | { | ||
449 | plog->plog_enabled = 1; | ||
450 | } | ||
451 | |||
452 | void | ||
453 | bfa_plog_disable(struct bfa_plog_s *plog) | ||
454 | { | ||
455 | plog->plog_enabled = 0; | ||
456 | } | ||
457 | |||
458 | bfa_boolean_t | ||
459 | bfa_plog_get_setting(struct bfa_plog_s *plog) | ||
460 | { | ||
461 | return (bfa_boolean_t)plog->plog_enabled; | ||
462 | } | ||
463 | 427 | ||
464 | /* | 428 | /* |
465 | * fcxp_pvt BFA FCXP private functions | 429 | * fcxp_pvt BFA FCXP private functions |
@@ -637,15 +601,15 @@ bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, | |||
637 | bfa_fcxp_get_sglen_t sglen_cbfn) | 601 | bfa_fcxp_get_sglen_t sglen_cbfn) |
638 | { | 602 | { |
639 | 603 | ||
640 | bfa_assert(bfa != NULL); | 604 | WARN_ON(bfa == NULL); |
641 | 605 | ||
642 | bfa_trc(bfa, fcxp->fcxp_tag); | 606 | bfa_trc(bfa, fcxp->fcxp_tag); |
643 | 607 | ||
644 | if (n_sgles == 0) { | 608 | if (n_sgles == 0) { |
645 | *use_ibuf = 1; | 609 | *use_ibuf = 1; |
646 | } else { | 610 | } else { |
647 | bfa_assert(*sga_cbfn != NULL); | 611 | WARN_ON(*sga_cbfn == NULL); |
648 | bfa_assert(*sglen_cbfn != NULL); | 612 | WARN_ON(*sglen_cbfn == NULL); |
649 | 613 | ||
650 | *use_ibuf = 0; | 614 | *use_ibuf = 0; |
651 | *r_sga_cbfn = sga_cbfn; | 615 | *r_sga_cbfn = sga_cbfn; |
@@ -657,7 +621,7 @@ bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp, | |||
657 | * alloc required sgpgs | 621 | * alloc required sgpgs |
658 | */ | 622 | */ |
659 | if (n_sgles > BFI_SGE_INLINE) | 623 | if (n_sgles > BFI_SGE_INLINE) |
660 | bfa_assert(0); | 624 | WARN_ON(1); |
661 | } | 625 | } |
662 | 626 | ||
663 | } | 627 | } |
@@ -671,7 +635,7 @@ bfa_fcxp_init(struct bfa_fcxp_s *fcxp, | |||
671 | bfa_fcxp_get_sglen_t rsp_sglen_cbfn) | 635 | bfa_fcxp_get_sglen_t rsp_sglen_cbfn) |
672 | { | 636 | { |
673 | 637 | ||
674 | bfa_assert(bfa != NULL); | 638 | WARN_ON(bfa == NULL); |
675 | 639 | ||
676 | bfa_trc(bfa, fcxp->fcxp_tag); | 640 | bfa_trc(bfa, fcxp->fcxp_tag); |
677 | 641 | ||
@@ -708,7 +672,7 @@ bfa_fcxp_put(struct bfa_fcxp_s *fcxp) | |||
708 | return; | 672 | return; |
709 | } | 673 | } |
710 | 674 | ||
711 | bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); | 675 | WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp)); |
712 | list_del(&fcxp->qe); | 676 | list_del(&fcxp->qe); |
713 | list_add_tail(&fcxp->qe, &mod->fcxp_free_q); | 677 | list_add_tail(&fcxp->qe, &mod->fcxp_free_q); |
714 | } | 678 | } |
@@ -757,7 +721,7 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) | |||
757 | 721 | ||
758 | fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); | 722 | fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); |
759 | 723 | ||
760 | bfa_assert(fcxp->send_cbfn != NULL); | 724 | WARN_ON(fcxp->send_cbfn == NULL); |
761 | 725 | ||
762 | hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); | 726 | hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp); |
763 | 727 | ||
@@ -913,13 +877,13 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) | |||
913 | BFA_FCXP_REQ_PLD_PA(fcxp)); | 877 | BFA_FCXP_REQ_PLD_PA(fcxp)); |
914 | } else { | 878 | } else { |
915 | if (fcxp->nreq_sgles > 0) { | 879 | if (fcxp->nreq_sgles > 0) { |
916 | bfa_assert(fcxp->nreq_sgles == 1); | 880 | WARN_ON(fcxp->nreq_sgles != 1); |
917 | hal_fcxp_set_local_sges(send_req->req_sge, | 881 | hal_fcxp_set_local_sges(send_req->req_sge, |
918 | reqi->req_tot_len, | 882 | reqi->req_tot_len, |
919 | fcxp->req_sga_cbfn(fcxp->caller, | 883 | fcxp->req_sga_cbfn(fcxp->caller, |
920 | 0)); | 884 | 0)); |
921 | } else { | 885 | } else { |
922 | bfa_assert(reqi->req_tot_len == 0); | 886 | WARN_ON(reqi->req_tot_len != 0); |
923 | hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); | 887 | hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); |
924 | } | 888 | } |
925 | } | 889 | } |
@@ -928,20 +892,20 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) | |||
928 | * setup rsp sgles | 892 | * setup rsp sgles |
929 | */ | 893 | */ |
930 | if (fcxp->use_irspbuf == 1) { | 894 | if (fcxp->use_irspbuf == 1) { |
931 | bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ); | 895 | WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ); |
932 | 896 | ||
933 | hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, | 897 | hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen, |
934 | BFA_FCXP_RSP_PLD_PA(fcxp)); | 898 | BFA_FCXP_RSP_PLD_PA(fcxp)); |
935 | 899 | ||
936 | } else { | 900 | } else { |
937 | if (fcxp->nrsp_sgles > 0) { | 901 | if (fcxp->nrsp_sgles > 0) { |
938 | bfa_assert(fcxp->nrsp_sgles == 1); | 902 | WARN_ON(fcxp->nrsp_sgles != 1); |
939 | hal_fcxp_set_local_sges(send_req->rsp_sge, | 903 | hal_fcxp_set_local_sges(send_req->rsp_sge, |
940 | rspi->rsp_maxlen, | 904 | rspi->rsp_maxlen, |
941 | fcxp->rsp_sga_cbfn(fcxp->caller, | 905 | fcxp->rsp_sga_cbfn(fcxp->caller, |
942 | 0)); | 906 | 0)); |
943 | } else { | 907 | } else { |
944 | bfa_assert(rspi->rsp_maxlen == 0); | 908 | WARN_ON(rspi->rsp_maxlen != 0); |
945 | hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); | 909 | hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0); |
946 | } | 910 | } |
947 | } | 911 | } |
@@ -955,10 +919,6 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) | |||
955 | } | 919 | } |
956 | 920 | ||
957 | /* | 921 | /* |
958 | * hal_fcxp_api BFA FCXP API | ||
959 | */ | ||
960 | |||
961 | /* | ||
962 | * Allocate an FCXP instance to send a response or to send a request | 922 | * Allocate an FCXP instance to send a response or to send a request |
963 | * that has a response. Request/response buffers are allocated by caller. | 923 | * that has a response. Request/response buffers are allocated by caller. |
964 | * | 924 | * |
@@ -990,7 +950,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, | |||
990 | { | 950 | { |
991 | struct bfa_fcxp_s *fcxp = NULL; | 951 | struct bfa_fcxp_s *fcxp = NULL; |
992 | 952 | ||
993 | bfa_assert(bfa != NULL); | 953 | WARN_ON(bfa == NULL); |
994 | 954 | ||
995 | fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); | 955 | fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa)); |
996 | if (fcxp == NULL) | 956 | if (fcxp == NULL) |
@@ -1017,7 +977,7 @@ bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp) | |||
1017 | struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; | 977 | struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; |
1018 | void *reqbuf; | 978 | void *reqbuf; |
1019 | 979 | ||
1020 | bfa_assert(fcxp->use_ireqbuf == 1); | 980 | WARN_ON(fcxp->use_ireqbuf != 1); |
1021 | reqbuf = ((u8 *)mod->req_pld_list_kva) + | 981 | reqbuf = ((u8 *)mod->req_pld_list_kva) + |
1022 | fcxp->fcxp_tag * mod->req_pld_sz; | 982 | fcxp->fcxp_tag * mod->req_pld_sz; |
1023 | return reqbuf; | 983 | return reqbuf; |
@@ -1044,7 +1004,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) | |||
1044 | struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; | 1004 | struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; |
1045 | void *rspbuf; | 1005 | void *rspbuf; |
1046 | 1006 | ||
1047 | bfa_assert(fcxp->use_irspbuf == 1); | 1007 | WARN_ON(fcxp->use_irspbuf != 1); |
1048 | 1008 | ||
1049 | rspbuf = ((u8 *)mod->rsp_pld_list_kva) + | 1009 | rspbuf = ((u8 *)mod->rsp_pld_list_kva) + |
1050 | fcxp->fcxp_tag * mod->rsp_pld_sz; | 1010 | fcxp->fcxp_tag * mod->rsp_pld_sz; |
@@ -1052,7 +1012,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) | |||
1052 | } | 1012 | } |
1053 | 1013 | ||
1054 | /* | 1014 | /* |
1055 | * Free the BFA FCXP | 1015 | * Free the BFA FCXP |
1056 | * | 1016 | * |
1057 | * @param[in] fcxp BFA fcxp pointer | 1017 | * @param[in] fcxp BFA fcxp pointer |
1058 | * | 1018 | * |
@@ -1063,7 +1023,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp) | |||
1063 | { | 1023 | { |
1064 | struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; | 1024 | struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod; |
1065 | 1025 | ||
1066 | bfa_assert(fcxp != NULL); | 1026 | WARN_ON(fcxp == NULL); |
1067 | bfa_trc(mod->bfa, fcxp->fcxp_tag); | 1027 | bfa_trc(mod->bfa, fcxp->fcxp_tag); |
1068 | bfa_fcxp_put(fcxp); | 1028 | bfa_fcxp_put(fcxp); |
1069 | } | 1029 | } |
@@ -1142,7 +1102,7 @@ bfa_status_t | |||
1142 | bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) | 1102 | bfa_fcxp_abort(struct bfa_fcxp_s *fcxp) |
1143 | { | 1103 | { |
1144 | bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); | 1104 | bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag); |
1145 | bfa_assert(0); | 1105 | WARN_ON(1); |
1146 | return BFA_STATUS_OK; | 1106 | return BFA_STATUS_OK; |
1147 | } | 1107 | } |
1148 | 1108 | ||
@@ -1157,7 +1117,7 @@ bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, | |||
1157 | { | 1117 | { |
1158 | struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); | 1118 | struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); |
1159 | 1119 | ||
1160 | bfa_assert(list_empty(&mod->fcxp_free_q)); | 1120 | WARN_ON(!list_empty(&mod->fcxp_free_q)); |
1161 | 1121 | ||
1162 | wqe->alloc_cbfn = alloc_cbfn; | 1122 | wqe->alloc_cbfn = alloc_cbfn; |
1163 | wqe->alloc_cbarg = alloc_cbarg; | 1123 | wqe->alloc_cbarg = alloc_cbarg; |
@@ -1178,7 +1138,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) | |||
1178 | { | 1138 | { |
1179 | struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); | 1139 | struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); |
1180 | 1140 | ||
1181 | bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe)); | 1141 | WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe)); |
1182 | list_del(&wqe->qe); | 1142 | list_del(&wqe->qe); |
1183 | } | 1143 | } |
1184 | 1144 | ||
@@ -1199,12 +1159,6 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) | |||
1199 | fcxp->send_cbfn = bfa_fcxp_null_comp; | 1159 | fcxp->send_cbfn = bfa_fcxp_null_comp; |
1200 | } | 1160 | } |
1201 | 1161 | ||
1202 | |||
1203 | |||
1204 | /* | ||
1205 | * hal_fcxp_public BFA FCXP public functions | ||
1206 | */ | ||
1207 | |||
1208 | void | 1162 | void |
1209 | bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | 1163 | bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) |
1210 | { | 1164 | { |
@@ -1215,7 +1169,7 @@ bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
1215 | 1169 | ||
1216 | default: | 1170 | default: |
1217 | bfa_trc(bfa, msg->mhdr.msg_id); | 1171 | bfa_trc(bfa, msg->mhdr.msg_id); |
1218 | bfa_assert(0); | 1172 | WARN_ON(1); |
1219 | } | 1173 | } |
1220 | } | 1174 | } |
1221 | 1175 | ||
@@ -1303,6 +1257,12 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
1303 | else | 1257 | else |
1304 | bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, | 1258 | bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, |
1305 | BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); | 1259 | BFA_PL_EID_LOGIN, 0, "FLOGI Accept"); |
1260 | /* If N2N, send the assigned PID to FW */ | ||
1261 | bfa_trc(lps->bfa, lps->fport); | ||
1262 | bfa_trc(lps->bfa, lps->lp_pid); | ||
1263 | |||
1264 | if (!lps->fport && lps->lp_pid) | ||
1265 | bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); | ||
1306 | } else { | 1266 | } else { |
1307 | bfa_sm_set_state(lps, bfa_lps_sm_init); | 1267 | bfa_sm_set_state(lps, bfa_lps_sm_init); |
1308 | if (lps->fdisc) | 1268 | if (lps->fdisc) |
@@ -1321,6 +1281,11 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
1321 | bfa_sm_set_state(lps, bfa_lps_sm_init); | 1281 | bfa_sm_set_state(lps, bfa_lps_sm_init); |
1322 | break; | 1282 | break; |
1323 | 1283 | ||
1284 | case BFA_LPS_SM_SET_N2N_PID: | ||
1285 | bfa_trc(lps->bfa, lps->fport); | ||
1286 | bfa_trc(lps->bfa, lps->lp_pid); | ||
1287 | break; | ||
1288 | |||
1324 | default: | 1289 | default: |
1325 | bfa_sm_fault(lps->bfa, event); | 1290 | bfa_sm_fault(lps->bfa, event); |
1326 | } | 1291 | } |
@@ -1389,9 +1354,59 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) | |||
1389 | BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); | 1354 | BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); |
1390 | break; | 1355 | break; |
1391 | 1356 | ||
1357 | case BFA_LPS_SM_SET_N2N_PID: | ||
1358 | if (bfa_reqq_full(lps->bfa, lps->reqq)) { | ||
1359 | bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait); | ||
1360 | bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); | ||
1361 | } else | ||
1362 | bfa_lps_send_set_n2n_pid(lps); | ||
1363 | break; | ||
1364 | |||
1365 | case BFA_LPS_SM_OFFLINE: | ||
1366 | case BFA_LPS_SM_DELETE: | ||
1367 | bfa_sm_set_state(lps, bfa_lps_sm_init); | ||
1368 | break; | ||
1369 | |||
1370 | default: | ||
1371 | bfa_sm_fault(lps->bfa, event); | ||
1372 | } | ||
1373 | } | ||
1374 | |||
1375 | /* | ||
1376 | * login complete | ||
1377 | */ | ||
1378 | static void | ||
1379 | bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event) | ||
1380 | { | ||
1381 | bfa_trc(lps->bfa, lps->lp_tag); | ||
1382 | bfa_trc(lps->bfa, event); | ||
1383 | |||
1384 | switch (event) { | ||
1385 | case BFA_LPS_SM_RESUME: | ||
1386 | bfa_sm_set_state(lps, bfa_lps_sm_online); | ||
1387 | bfa_lps_send_set_n2n_pid(lps); | ||
1388 | break; | ||
1389 | |||
1390 | case BFA_LPS_SM_LOGOUT: | ||
1391 | bfa_sm_set_state(lps, bfa_lps_sm_logowait); | ||
1392 | bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, | ||
1393 | BFA_PL_EID_LOGO, 0, "Logout"); | ||
1394 | break; | ||
1395 | |||
1396 | case BFA_LPS_SM_RX_CVL: | ||
1397 | bfa_sm_set_state(lps, bfa_lps_sm_init); | ||
1398 | bfa_reqq_wcancel(&lps->wqe); | ||
1399 | |||
1400 | /* Let the vport module know about this event */ | ||
1401 | bfa_lps_cvl_event(lps); | ||
1402 | bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS, | ||
1403 | BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx"); | ||
1404 | break; | ||
1405 | |||
1392 | case BFA_LPS_SM_OFFLINE: | 1406 | case BFA_LPS_SM_OFFLINE: |
1393 | case BFA_LPS_SM_DELETE: | 1407 | case BFA_LPS_SM_DELETE: |
1394 | bfa_sm_set_state(lps, bfa_lps_sm_init); | 1408 | bfa_sm_set_state(lps, bfa_lps_sm_init); |
1409 | bfa_reqq_wcancel(&lps->wqe); | ||
1395 | break; | 1410 | break; |
1396 | 1411 | ||
1397 | default: | 1412 | default: |
@@ -1540,15 +1555,16 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) | |||
1540 | struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); | 1555 | struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); |
1541 | struct bfa_lps_s *lps; | 1556 | struct bfa_lps_s *lps; |
1542 | 1557 | ||
1543 | bfa_assert(rsp->lp_tag < mod->num_lps); | 1558 | WARN_ON(rsp->lp_tag >= mod->num_lps); |
1544 | lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); | 1559 | lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); |
1545 | 1560 | ||
1546 | lps->status = rsp->status; | 1561 | lps->status = rsp->status; |
1547 | switch (rsp->status) { | 1562 | switch (rsp->status) { |
1548 | case BFA_STATUS_OK: | 1563 | case BFA_STATUS_OK: |
1549 | lps->fport = rsp->f_port; | 1564 | lps->fport = rsp->f_port; |
1565 | if (lps->fport) | ||
1566 | lps->lp_pid = rsp->lp_pid; | ||
1550 | lps->npiv_en = rsp->npiv_en; | 1567 | lps->npiv_en = rsp->npiv_en; |
1551 | lps->lp_pid = rsp->lp_pid; | ||
1552 | lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); | 1568 | lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); |
1553 | lps->pr_pwwn = rsp->port_name; | 1569 | lps->pr_pwwn = rsp->port_name; |
1554 | lps->pr_nwwn = rsp->node_name; | 1570 | lps->pr_nwwn = rsp->node_name; |
@@ -1587,7 +1603,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) | |||
1587 | struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); | 1603 | struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); |
1588 | struct bfa_lps_s *lps; | 1604 | struct bfa_lps_s *lps; |
1589 | 1605 | ||
1590 | bfa_assert(rsp->lp_tag < mod->num_lps); | 1606 | WARN_ON(rsp->lp_tag >= mod->num_lps); |
1591 | lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); | 1607 | lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag); |
1592 | 1608 | ||
1593 | bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); | 1609 | bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); |
@@ -1640,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps) | |||
1640 | struct bfi_lps_login_req_s *m; | 1656 | struct bfi_lps_login_req_s *m; |
1641 | 1657 | ||
1642 | m = bfa_reqq_next(lps->bfa, lps->reqq); | 1658 | m = bfa_reqq_next(lps->bfa, lps->reqq); |
1643 | bfa_assert(m); | 1659 | WARN_ON(!m); |
1644 | 1660 | ||
1645 | bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, | 1661 | bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ, |
1646 | bfa_lpuid(lps->bfa)); | 1662 | bfa_lpuid(lps->bfa)); |
@@ -1665,7 +1681,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps) | |||
1665 | struct bfi_lps_logout_req_s *m; | 1681 | struct bfi_lps_logout_req_s *m; |
1666 | 1682 | ||
1667 | m = bfa_reqq_next(lps->bfa, lps->reqq); | 1683 | m = bfa_reqq_next(lps->bfa, lps->reqq); |
1668 | bfa_assert(m); | 1684 | WARN_ON(!m); |
1669 | 1685 | ||
1670 | bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, | 1686 | bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ, |
1671 | bfa_lpuid(lps->bfa)); | 1687 | bfa_lpuid(lps->bfa)); |
@@ -1676,6 +1692,25 @@ bfa_lps_send_logout(struct bfa_lps_s *lps) | |||
1676 | } | 1692 | } |
1677 | 1693 | ||
1678 | /* | 1694 | /* |
1695 | * send n2n pid set request to firmware | ||
1696 | */ | ||
1697 | static void | ||
1698 | bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps) | ||
1699 | { | ||
1700 | struct bfi_lps_n2n_pid_req_s *m; | ||
1701 | |||
1702 | m = bfa_reqq_next(lps->bfa, lps->reqq); | ||
1703 | WARN_ON(!m); | ||
1704 | |||
1705 | bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ, | ||
1706 | bfa_lpuid(lps->bfa)); | ||
1707 | |||
1708 | m->lp_tag = lps->lp_tag; | ||
1709 | m->lp_pid = lps->lp_pid; | ||
1710 | bfa_reqq_produce(lps->bfa, lps->reqq); | ||
1711 | } | ||
1712 | |||
1713 | /* | ||
1679 | * Indirect login completion handler for non-fcs | 1714 | * Indirect login completion handler for non-fcs |
1680 | */ | 1715 | */ |
1681 | static void | 1716 | static void |
@@ -1853,14 +1888,6 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, | |||
1853 | bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); | 1888 | bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); |
1854 | } | 1889 | } |
1855 | 1890 | ||
1856 | /* | ||
1857 | * Initiate a lport logout (flogi). | ||
1858 | */ | ||
1859 | void | ||
1860 | bfa_lps_flogo(struct bfa_lps_s *lps) | ||
1861 | { | ||
1862 | bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); | ||
1863 | } | ||
1864 | 1891 | ||
1865 | /* | 1892 | /* |
1866 | * Initiate a lport FDSIC logout. | 1893 | * Initiate a lport FDSIC logout. |
@@ -1871,24 +1898,6 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps) | |||
1871 | bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); | 1898 | bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); |
1872 | } | 1899 | } |
1873 | 1900 | ||
1874 | /* | ||
1875 | * Discard a pending login request -- should be called only for | ||
1876 | * link down handling. | ||
1877 | */ | ||
1878 | void | ||
1879 | bfa_lps_discard(struct bfa_lps_s *lps) | ||
1880 | { | ||
1881 | bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); | ||
1882 | } | ||
1883 | |||
1884 | /* | ||
1885 | * Return lport services tag | ||
1886 | */ | ||
1887 | u8 | ||
1888 | bfa_lps_get_tag(struct bfa_lps_s *lps) | ||
1889 | { | ||
1890 | return lps->lp_tag; | ||
1891 | } | ||
1892 | 1901 | ||
1893 | /* | 1902 | /* |
1894 | * Return lport services tag given the pid | 1903 | * Return lport services tag given the pid |
@@ -1909,55 +1918,6 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) | |||
1909 | return 0; | 1918 | return 0; |
1910 | } | 1919 | } |
1911 | 1920 | ||
1912 | /* | ||
1913 | * return if fabric login indicates support for NPIV | ||
1914 | */ | ||
1915 | bfa_boolean_t | ||
1916 | bfa_lps_is_npiv_en(struct bfa_lps_s *lps) | ||
1917 | { | ||
1918 | return lps->npiv_en; | ||
1919 | } | ||
1920 | |||
1921 | /* | ||
1922 | * Return TRUE if attached to F-Port, else return FALSE | ||
1923 | */ | ||
1924 | bfa_boolean_t | ||
1925 | bfa_lps_is_fport(struct bfa_lps_s *lps) | ||
1926 | { | ||
1927 | return lps->fport; | ||
1928 | } | ||
1929 | |||
1930 | /* | ||
1931 | * Return TRUE if attached to a Brocade Fabric | ||
1932 | */ | ||
1933 | bfa_boolean_t | ||
1934 | bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps) | ||
1935 | { | ||
1936 | return lps->brcd_switch; | ||
1937 | } | ||
1938 | /* | ||
1939 | * return TRUE if authentication is required | ||
1940 | */ | ||
1941 | bfa_boolean_t | ||
1942 | bfa_lps_is_authreq(struct bfa_lps_s *lps) | ||
1943 | { | ||
1944 | return lps->auth_req; | ||
1945 | } | ||
1946 | |||
1947 | bfa_eproto_status_t | ||
1948 | bfa_lps_get_extstatus(struct bfa_lps_s *lps) | ||
1949 | { | ||
1950 | return lps->ext_status; | ||
1951 | } | ||
1952 | |||
1953 | /* | ||
1954 | * return port id assigned to the lport | ||
1955 | */ | ||
1956 | u32 | ||
1957 | bfa_lps_get_pid(struct bfa_lps_s *lps) | ||
1958 | { | ||
1959 | return lps->lp_pid; | ||
1960 | } | ||
1961 | 1921 | ||
1962 | /* | 1922 | /* |
1963 | * return port id assigned to the base lport | 1923 | * return port id assigned to the base lport |
@@ -1971,57 +1931,16 @@ bfa_lps_get_base_pid(struct bfa_s *bfa) | |||
1971 | } | 1931 | } |
1972 | 1932 | ||
1973 | /* | 1933 | /* |
1974 | * Return bb_credit assigned in FLOGI response | 1934 | * Set PID in case of n2n (which is assigned during PLOGI) |
1975 | */ | ||
1976 | u16 | ||
1977 | bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps) | ||
1978 | { | ||
1979 | return lps->pr_bbcred; | ||
1980 | } | ||
1981 | |||
1982 | /* | ||
1983 | * Return peer port name | ||
1984 | */ | ||
1985 | wwn_t | ||
1986 | bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps) | ||
1987 | { | ||
1988 | return lps->pr_pwwn; | ||
1989 | } | ||
1990 | |||
1991 | /* | ||
1992 | * Return peer node name | ||
1993 | */ | ||
1994 | wwn_t | ||
1995 | bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps) | ||
1996 | { | ||
1997 | return lps->pr_nwwn; | ||
1998 | } | ||
1999 | |||
2000 | /* | ||
2001 | * return reason code if login request is rejected | ||
2002 | */ | ||
2003 | u8 | ||
2004 | bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps) | ||
2005 | { | ||
2006 | return lps->lsrjt_rsn; | ||
2007 | } | ||
2008 | |||
2009 | /* | ||
2010 | * return explanation code if login request is rejected | ||
2011 | */ | 1935 | */ |
2012 | u8 | 1936 | void |
2013 | bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps) | 1937 | bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid) |
2014 | { | 1938 | { |
2015 | return lps->lsrjt_expl; | 1939 | bfa_trc(lps->bfa, lps->lp_tag); |
2016 | } | 1940 | bfa_trc(lps->bfa, n2n_pid); |
2017 | 1941 | ||
2018 | /* | 1942 | lps->lp_pid = n2n_pid; |
2019 | * Return fpma/spma MAC for lport | 1943 | bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID); |
2020 | */ | ||
2021 | mac_t | ||
2022 | bfa_lps_get_lp_mac(struct bfa_lps_s *lps) | ||
2023 | { | ||
2024 | return lps->lp_mac; | ||
2025 | } | 1944 | } |
2026 | 1945 | ||
2027 | /* | 1946 | /* |
@@ -2050,7 +1969,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
2050 | 1969 | ||
2051 | default: | 1970 | default: |
2052 | bfa_trc(bfa, m->mhdr.msg_id); | 1971 | bfa_trc(bfa, m->mhdr.msg_id); |
2053 | bfa_assert(0); | 1972 | WARN_ON(1); |
2054 | } | 1973 | } |
2055 | } | 1974 | } |
2056 | 1975 | ||
@@ -2068,6 +1987,8 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, | |||
2068 | /* | 1987 | /* |
2069 | * Start event after IOC is configured and BFA is started. | 1988 | * Start event after IOC is configured and BFA is started. |
2070 | */ | 1989 | */ |
1990 | fcport->use_flash_cfg = BFA_TRUE; | ||
1991 | |||
2071 | if (bfa_fcport_send_enable(fcport)) { | 1992 | if (bfa_fcport_send_enable(fcport)) { |
2072 | bfa_trc(fcport->bfa, BFA_TRUE); | 1993 | bfa_trc(fcport->bfa, BFA_TRUE); |
2073 | bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); | 1994 | bfa_sm_set_state(fcport, bfa_fcport_sm_enabling); |
@@ -2178,7 +2099,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, | |||
2178 | bfa_fcport_update_linkinfo(fcport); | 2099 | bfa_fcport_update_linkinfo(fcport); |
2179 | bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); | 2100 | bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); |
2180 | 2101 | ||
2181 | bfa_assert(fcport->event_cbfn); | 2102 | WARN_ON(!fcport->event_cbfn); |
2182 | bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); | 2103 | bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE); |
2183 | break; | 2104 | break; |
2184 | 2105 | ||
@@ -2229,7 +2150,7 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, | |||
2229 | case BFA_FCPORT_SM_LINKUP: | 2150 | case BFA_FCPORT_SM_LINKUP: |
2230 | bfa_fcport_update_linkinfo(fcport); | 2151 | bfa_fcport_update_linkinfo(fcport); |
2231 | bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); | 2152 | bfa_sm_set_state(fcport, bfa_fcport_sm_linkup); |
2232 | bfa_assert(fcport->event_cbfn); | 2153 | WARN_ON(!fcport->event_cbfn); |
2233 | bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, | 2154 | bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL, |
2234 | BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); | 2155 | BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup"); |
2235 | if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { | 2156 | if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { |
@@ -2803,12 +2724,6 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, | |||
2803 | } | 2724 | } |
2804 | } | 2725 | } |
2805 | 2726 | ||
2806 | |||
2807 | |||
2808 | /* | ||
2809 | * hal_port_private | ||
2810 | */ | ||
2811 | |||
2812 | static void | 2727 | static void |
2813 | __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) | 2728 | __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) |
2814 | { | 2729 | { |
@@ -2839,7 +2754,7 @@ bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event, | |||
2839 | bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); | 2754 | bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN); |
2840 | break; | 2755 | break; |
2841 | default: | 2756 | default: |
2842 | bfa_assert(0); | 2757 | WARN_ON(1); |
2843 | } | 2758 | } |
2844 | } | 2759 | } |
2845 | 2760 | ||
@@ -2906,7 +2821,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
2906 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | 2821 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); |
2907 | struct bfa_port_cfg_s *port_cfg = &fcport->cfg; | 2822 | struct bfa_port_cfg_s *port_cfg = &fcport->cfg; |
2908 | struct bfa_fcport_ln_s *ln = &fcport->ln; | 2823 | struct bfa_fcport_ln_s *ln = &fcport->ln; |
2909 | struct bfa_timeval_s tv; | 2824 | struct timeval tv; |
2910 | 2825 | ||
2911 | memset(fcport, 0, sizeof(struct bfa_fcport_s)); | 2826 | memset(fcport, 0, sizeof(struct bfa_fcport_s)); |
2912 | fcport->bfa = bfa; | 2827 | fcport->bfa = bfa; |
@@ -2920,7 +2835,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
2920 | /* | 2835 | /* |
2921 | * initialize time stamp for stats reset | 2836 | * initialize time stamp for stats reset |
2922 | */ | 2837 | */ |
2923 | bfa_os_gettimeofday(&tv); | 2838 | do_gettimeofday(&tv); |
2924 | fcport->stats_reset_time = tv.tv_sec; | 2839 | fcport->stats_reset_time = tv.tv_sec; |
2925 | 2840 | ||
2926 | /* | 2841 | /* |
@@ -3039,6 +2954,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) | |||
3039 | m->port_cfg = fcport->cfg; | 2954 | m->port_cfg = fcport->cfg; |
3040 | m->msgtag = fcport->msgtag; | 2955 | m->msgtag = fcport->msgtag; |
3041 | m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); | 2956 | m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); |
2957 | m->use_flash_cfg = fcport->use_flash_cfg; | ||
3042 | bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); | 2958 | bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); |
3043 | bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); | 2959 | bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); |
3044 | bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); | 2960 | bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); |
@@ -3089,8 +3005,8 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport) | |||
3089 | static void | 3005 | static void |
3090 | bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) | 3006 | bfa_fcport_set_wwns(struct bfa_fcport_s *fcport) |
3091 | { | 3007 | { |
3092 | fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc); | 3008 | fcport->pwwn = fcport->bfa->ioc.attr->pwwn; |
3093 | fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc); | 3009 | fcport->nwwn = fcport->bfa->ioc.attr->nwwn; |
3094 | 3010 | ||
3095 | bfa_trc(fcport->bfa, fcport->pwwn); | 3011 | bfa_trc(fcport->bfa, fcport->pwwn); |
3096 | bfa_trc(fcport->bfa, fcport->nwwn); | 3012 | bfa_trc(fcport->bfa, fcport->nwwn); |
@@ -3127,7 +3043,7 @@ bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, | |||
3127 | struct bfa_qos_stats_s *s) | 3043 | struct bfa_qos_stats_s *s) |
3128 | { | 3044 | { |
3129 | u32 *dip = (u32 *) d; | 3045 | u32 *dip = (u32 *) d; |
3130 | u32 *sip = (u32 *) s; | 3046 | __be32 *sip = (__be32 *) s; |
3131 | int i; | 3047 | int i; |
3132 | 3048 | ||
3133 | /* Now swap the 32 bit fields */ | 3049 | /* Now swap the 32 bit fields */ |
@@ -3140,12 +3056,12 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, | |||
3140 | struct bfa_fcoe_stats_s *s) | 3056 | struct bfa_fcoe_stats_s *s) |
3141 | { | 3057 | { |
3142 | u32 *dip = (u32 *) d; | 3058 | u32 *dip = (u32 *) d; |
3143 | u32 *sip = (u32 *) s; | 3059 | __be32 *sip = (__be32 *) s; |
3144 | int i; | 3060 | int i; |
3145 | 3061 | ||
3146 | for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); | 3062 | for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); |
3147 | i = i + 2) { | 3063 | i = i + 2) { |
3148 | #ifdef __BIGENDIAN | 3064 | #ifdef __BIG_ENDIAN |
3149 | dip[i] = be32_to_cpu(sip[i]); | 3065 | dip[i] = be32_to_cpu(sip[i]); |
3150 | dip[i + 1] = be32_to_cpu(sip[i + 1]); | 3066 | dip[i + 1] = be32_to_cpu(sip[i + 1]); |
3151 | #else | 3067 | #else |
@@ -3162,7 +3078,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) | |||
3162 | 3078 | ||
3163 | if (complete) { | 3079 | if (complete) { |
3164 | if (fcport->stats_status == BFA_STATUS_OK) { | 3080 | if (fcport->stats_status == BFA_STATUS_OK) { |
3165 | struct bfa_timeval_s tv; | 3081 | struct timeval tv; |
3166 | 3082 | ||
3167 | /* Swap FC QoS or FCoE stats */ | 3083 | /* Swap FC QoS or FCoE stats */ |
3168 | if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { | 3084 | if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) { |
@@ -3174,7 +3090,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete) | |||
3174 | &fcport->stats_ret->fcoe, | 3090 | &fcport->stats_ret->fcoe, |
3175 | &fcport->stats->fcoe); | 3091 | &fcport->stats->fcoe); |
3176 | 3092 | ||
3177 | bfa_os_gettimeofday(&tv); | 3093 | do_gettimeofday(&tv); |
3178 | fcport->stats_ret->fcoe.secs_reset = | 3094 | fcport->stats_ret->fcoe.secs_reset = |
3179 | tv.tv_sec - fcport->stats_reset_time; | 3095 | tv.tv_sec - fcport->stats_reset_time; |
3180 | } | 3096 | } |
@@ -3233,12 +3149,12 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) | |||
3233 | struct bfa_fcport_s *fcport = cbarg; | 3149 | struct bfa_fcport_s *fcport = cbarg; |
3234 | 3150 | ||
3235 | if (complete) { | 3151 | if (complete) { |
3236 | struct bfa_timeval_s tv; | 3152 | struct timeval tv; |
3237 | 3153 | ||
3238 | /* | 3154 | /* |
3239 | * re-initialize time stamp for stats reset | 3155 | * re-initialize time stamp for stats reset |
3240 | */ | 3156 | */ |
3241 | bfa_os_gettimeofday(&tv); | 3157 | do_gettimeofday(&tv); |
3242 | fcport->stats_reset_time = tv.tv_sec; | 3158 | fcport->stats_reset_time = tv.tv_sec; |
3243 | 3159 | ||
3244 | fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); | 3160 | fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status); |
@@ -3303,8 +3219,8 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) | |||
3303 | int link_bm = 0; | 3219 | int link_bm = 0; |
3304 | 3220 | ||
3305 | bfa_trc(fcport->bfa, fcport->cfg.trunked); | 3221 | bfa_trc(fcport->bfa, fcport->cfg.trunked); |
3306 | bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE || | 3222 | WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE && |
3307 | scn->trunk_state == BFA_TRUNK_OFFLINE); | 3223 | scn->trunk_state != BFA_TRUNK_OFFLINE); |
3308 | 3224 | ||
3309 | bfa_trc(fcport->bfa, trunk->attr.state); | 3225 | bfa_trc(fcport->bfa, trunk->attr.state); |
3310 | bfa_trc(fcport->bfa, scn->trunk_state); | 3226 | bfa_trc(fcport->bfa, scn->trunk_state); |
@@ -3396,12 +3312,6 @@ bfa_trunk_iocdisable(struct bfa_s *bfa) | |||
3396 | } | 3312 | } |
3397 | } | 3313 | } |
3398 | 3314 | ||
3399 | |||
3400 | |||
3401 | /* | ||
3402 | * hal_port_public | ||
3403 | */ | ||
3404 | |||
3405 | /* | 3315 | /* |
3406 | * Called to initialize port attributes | 3316 | * Called to initialize port attributes |
3407 | */ | 3317 | */ |
@@ -3419,9 +3329,9 @@ bfa_fcport_init(struct bfa_s *bfa) | |||
3419 | fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); | 3329 | fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc); |
3420 | fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); | 3330 | fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc); |
3421 | 3331 | ||
3422 | bfa_assert(fcport->cfg.maxfrsize); | 3332 | WARN_ON(!fcport->cfg.maxfrsize); |
3423 | bfa_assert(fcport->cfg.rx_bbcredit); | 3333 | WARN_ON(!fcport->cfg.rx_bbcredit); |
3424 | bfa_assert(fcport->speed_sup); | 3334 | WARN_ON(!fcport->speed_sup); |
3425 | } | 3335 | } |
3426 | 3336 | ||
3427 | /* | 3337 | /* |
@@ -3441,8 +3351,28 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
3441 | 3351 | ||
3442 | switch (msg->mhdr.msg_id) { | 3352 | switch (msg->mhdr.msg_id) { |
3443 | case BFI_FCPORT_I2H_ENABLE_RSP: | 3353 | case BFI_FCPORT_I2H_ENABLE_RSP: |
3444 | if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) | 3354 | if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) { |
3355 | |||
3356 | if (fcport->use_flash_cfg) { | ||
3357 | fcport->cfg = i2hmsg.penable_rsp->port_cfg; | ||
3358 | fcport->cfg.maxfrsize = | ||
3359 | cpu_to_be16(fcport->cfg.maxfrsize); | ||
3360 | fcport->cfg.path_tov = | ||
3361 | cpu_to_be16(fcport->cfg.path_tov); | ||
3362 | fcport->cfg.q_depth = | ||
3363 | cpu_to_be16(fcport->cfg.q_depth); | ||
3364 | |||
3365 | if (fcport->cfg.trunked) | ||
3366 | fcport->trunk.attr.state = | ||
3367 | BFA_TRUNK_OFFLINE; | ||
3368 | else | ||
3369 | fcport->trunk.attr.state = | ||
3370 | BFA_TRUNK_DISABLED; | ||
3371 | fcport->use_flash_cfg = BFA_FALSE; | ||
3372 | } | ||
3373 | |||
3445 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); | 3374 | bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP); |
3375 | } | ||
3446 | break; | 3376 | break; |
3447 | 3377 | ||
3448 | case BFI_FCPORT_I2H_DISABLE_RSP: | 3378 | case BFI_FCPORT_I2H_DISABLE_RSP: |
@@ -3498,17 +3428,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
3498 | break; | 3428 | break; |
3499 | 3429 | ||
3500 | default: | 3430 | default: |
3501 | bfa_assert(0); | 3431 | WARN_ON(1); |
3502 | break; | 3432 | break; |
3503 | } | 3433 | } |
3504 | } | 3434 | } |
3505 | 3435 | ||
3506 | |||
3507 | |||
3508 | /* | ||
3509 | * hal_port_api | ||
3510 | */ | ||
3511 | |||
3512 | /* | 3436 | /* |
3513 | * Registered callback for port events. | 3437 | * Registered callback for port events. |
3514 | */ | 3438 | */ |
@@ -3732,8 +3656,8 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) | |||
3732 | attr->nwwn = fcport->nwwn; | 3656 | attr->nwwn = fcport->nwwn; |
3733 | attr->pwwn = fcport->pwwn; | 3657 | attr->pwwn = fcport->pwwn; |
3734 | 3658 | ||
3735 | attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); | 3659 | attr->factorypwwn = bfa->ioc.attr->mfg_pwwn; |
3736 | attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); | 3660 | attr->factorynwwn = bfa->ioc.attr->mfg_nwwn; |
3737 | 3661 | ||
3738 | memcpy(&attr->pport_cfg, &fcport->cfg, | 3662 | memcpy(&attr->pport_cfg, &fcport->cfg, |
3739 | sizeof(struct bfa_port_cfg_s)); | 3663 | sizeof(struct bfa_port_cfg_s)); |
@@ -3751,7 +3675,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) | |||
3751 | /* beacon attributes */ | 3675 | /* beacon attributes */ |
3752 | attr->beacon = fcport->beacon; | 3676 | attr->beacon = fcport->beacon; |
3753 | attr->link_e2e_beacon = fcport->link_e2e_beacon; | 3677 | attr->link_e2e_beacon = fcport->link_e2e_beacon; |
3754 | attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog); | 3678 | attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled; |
3755 | attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa); | 3679 | attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa); |
3756 | 3680 | ||
3757 | attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); | 3681 | attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa); |
@@ -3818,89 +3742,6 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) | |||
3818 | return BFA_STATUS_OK; | 3742 | return BFA_STATUS_OK; |
3819 | } | 3743 | } |
3820 | 3744 | ||
3821 | /* | ||
3822 | * Fetch FCQoS port statistics | ||
3823 | */ | ||
3824 | bfa_status_t | ||
3825 | bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, | ||
3826 | bfa_cb_port_t cbfn, void *cbarg) | ||
3827 | { | ||
3828 | /* Meaningful only for FC mode */ | ||
3829 | bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); | ||
3830 | |||
3831 | return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); | ||
3832 | } | ||
3833 | |||
3834 | /* | ||
3835 | * Reset FCoE port statistics | ||
3836 | */ | ||
3837 | bfa_status_t | ||
3838 | bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) | ||
3839 | { | ||
3840 | /* Meaningful only for FC mode */ | ||
3841 | bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc)); | ||
3842 | |||
3843 | return bfa_fcport_clear_stats(bfa, cbfn, cbarg); | ||
3844 | } | ||
3845 | |||
3846 | /* | ||
3847 | * Fetch FCQoS port statistics | ||
3848 | */ | ||
3849 | bfa_status_t | ||
3850 | bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, | ||
3851 | bfa_cb_port_t cbfn, void *cbarg) | ||
3852 | { | ||
3853 | /* Meaningful only for FCoE mode */ | ||
3854 | bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); | ||
3855 | |||
3856 | return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); | ||
3857 | } | ||
3858 | |||
3859 | /* | ||
3860 | * Reset FCoE port statistics | ||
3861 | */ | ||
3862 | bfa_status_t | ||
3863 | bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) | ||
3864 | { | ||
3865 | /* Meaningful only for FCoE mode */ | ||
3866 | bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc)); | ||
3867 | |||
3868 | return bfa_fcport_clear_stats(bfa, cbfn, cbarg); | ||
3869 | } | ||
3870 | |||
3871 | void | ||
3872 | bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) | ||
3873 | { | ||
3874 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
3875 | |||
3876 | qos_attr->state = fcport->qos_attr.state; | ||
3877 | qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); | ||
3878 | } | ||
3879 | |||
3880 | void | ||
3881 | bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, | ||
3882 | struct bfa_qos_vc_attr_s *qos_vc_attr) | ||
3883 | { | ||
3884 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
3885 | struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; | ||
3886 | u32 i = 0; | ||
3887 | |||
3888 | qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); | ||
3889 | qos_vc_attr->shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); | ||
3890 | qos_vc_attr->elp_opmode_flags = | ||
3891 | be32_to_cpu(bfa_vc_attr->elp_opmode_flags); | ||
3892 | |||
3893 | /* Individual VC info */ | ||
3894 | while (i < qos_vc_attr->total_vc_count) { | ||
3895 | qos_vc_attr->vc_info[i].vc_credit = | ||
3896 | bfa_vc_attr->vc_info[i].vc_credit; | ||
3897 | qos_vc_attr->vc_info[i].borrow_credit = | ||
3898 | bfa_vc_attr->vc_info[i].borrow_credit; | ||
3899 | qos_vc_attr->vc_info[i].priority = | ||
3900 | bfa_vc_attr->vc_info[i].priority; | ||
3901 | ++i; | ||
3902 | } | ||
3903 | } | ||
3904 | 3745 | ||
3905 | /* | 3746 | /* |
3906 | * Fetch port attributes. | 3747 | * Fetch port attributes. |
@@ -3924,60 +3765,6 @@ bfa_fcport_is_ratelim(struct bfa_s *bfa) | |||
3924 | 3765 | ||
3925 | } | 3766 | } |
3926 | 3767 | ||
3927 | void | ||
3928 | bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) | ||
3929 | { | ||
3930 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
3931 | enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa); | ||
3932 | |||
3933 | bfa_trc(bfa, on_off); | ||
3934 | bfa_trc(bfa, fcport->cfg.qos_enabled); | ||
3935 | |||
3936 | bfa_trc(bfa, ioc_type); | ||
3937 | |||
3938 | if (ioc_type == BFA_IOC_TYPE_FC) { | ||
3939 | fcport->cfg.qos_enabled = on_off; | ||
3940 | /* | ||
3941 | * Notify fcpim of the change in QoS state | ||
3942 | */ | ||
3943 | bfa_fcpim_update_ioredirect(bfa); | ||
3944 | } | ||
3945 | } | ||
3946 | |||
3947 | void | ||
3948 | bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) | ||
3949 | { | ||
3950 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
3951 | |||
3952 | bfa_trc(bfa, on_off); | ||
3953 | bfa_trc(bfa, fcport->cfg.ratelimit); | ||
3954 | |||
3955 | fcport->cfg.ratelimit = on_off; | ||
3956 | if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN) | ||
3957 | fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; | ||
3958 | } | ||
3959 | |||
3960 | /* | ||
3961 | * Configure default minimum ratelim speed | ||
3962 | */ | ||
3963 | bfa_status_t | ||
3964 | bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed) | ||
3965 | { | ||
3966 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
3967 | |||
3968 | bfa_trc(bfa, speed); | ||
3969 | |||
3970 | /* Auto and speeds greater than the supported speed, are invalid */ | ||
3971 | if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) { | ||
3972 | bfa_trc(bfa, fcport->speed_sup); | ||
3973 | return BFA_STATUS_UNSUPP_SPEED; | ||
3974 | } | ||
3975 | |||
3976 | fcport->cfg.trl_def_speed = speed; | ||
3977 | |||
3978 | return BFA_STATUS_OK; | ||
3979 | } | ||
3980 | |||
3981 | /* | 3768 | /* |
3982 | * Get default minimum ratelim speed | 3769 | * Get default minimum ratelim speed |
3983 | */ | 3770 | */ |
@@ -3990,32 +3777,6 @@ bfa_fcport_get_ratelim_speed(struct bfa_s *bfa) | |||
3990 | return fcport->cfg.trl_def_speed; | 3777 | return fcport->cfg.trl_def_speed; |
3991 | 3778 | ||
3992 | } | 3779 | } |
3993 | void | ||
3994 | bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status) | ||
3995 | { | ||
3996 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
3997 | |||
3998 | bfa_trc(bfa, status); | ||
3999 | bfa_trc(bfa, fcport->diag_busy); | ||
4000 | |||
4001 | fcport->diag_busy = status; | ||
4002 | } | ||
4003 | |||
4004 | void | ||
4005 | bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, | ||
4006 | bfa_boolean_t link_e2e_beacon) | ||
4007 | { | ||
4008 | struct bfa_s *bfa = dev; | ||
4009 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
4010 | |||
4011 | bfa_trc(bfa, beacon); | ||
4012 | bfa_trc(bfa, link_e2e_beacon); | ||
4013 | bfa_trc(bfa, fcport->beacon); | ||
4014 | bfa_trc(bfa, fcport->link_e2e_beacon); | ||
4015 | |||
4016 | fcport->beacon = beacon; | ||
4017 | fcport->link_e2e_beacon = link_e2e_beacon; | ||
4018 | } | ||
4019 | 3780 | ||
4020 | bfa_boolean_t | 3781 | bfa_boolean_t |
4021 | bfa_fcport_is_linkup(struct bfa_s *bfa) | 3782 | bfa_fcport_is_linkup(struct bfa_s *bfa) |
@@ -4036,63 +3797,6 @@ bfa_fcport_is_qos_enabled(struct bfa_s *bfa) | |||
4036 | return fcport->cfg.qos_enabled; | 3797 | return fcport->cfg.qos_enabled; |
4037 | } | 3798 | } |
4038 | 3799 | ||
4039 | bfa_status_t | ||
4040 | bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr) | ||
4041 | |||
4042 | { | ||
4043 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
4044 | struct bfa_fcport_trunk_s *trunk = &fcport->trunk; | ||
4045 | |||
4046 | bfa_trc(bfa, fcport->cfg.trunked); | ||
4047 | bfa_trc(bfa, trunk->attr.state); | ||
4048 | *attr = trunk->attr; | ||
4049 | attr->port_id = bfa_lps_get_base_pid(bfa); | ||
4050 | |||
4051 | return BFA_STATUS_OK; | ||
4052 | } | ||
4053 | |||
4054 | void | ||
4055 | bfa_trunk_enable_cfg(struct bfa_s *bfa) | ||
4056 | { | ||
4057 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
4058 | struct bfa_fcport_trunk_s *trunk = &fcport->trunk; | ||
4059 | |||
4060 | bfa_trc(bfa, 1); | ||
4061 | trunk->attr.state = BFA_TRUNK_OFFLINE; | ||
4062 | fcport->cfg.trunked = BFA_TRUE; | ||
4063 | } | ||
4064 | |||
4065 | bfa_status_t | ||
4066 | bfa_trunk_enable(struct bfa_s *bfa) | ||
4067 | { | ||
4068 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
4069 | struct bfa_fcport_trunk_s *trunk = &fcport->trunk; | ||
4070 | |||
4071 | bfa_trc(bfa, 1); | ||
4072 | |||
4073 | trunk->attr.state = BFA_TRUNK_OFFLINE; | ||
4074 | bfa_fcport_disable(bfa); | ||
4075 | fcport->cfg.trunked = BFA_TRUE; | ||
4076 | bfa_fcport_enable(bfa); | ||
4077 | |||
4078 | return BFA_STATUS_OK; | ||
4079 | } | ||
4080 | |||
4081 | bfa_status_t | ||
4082 | bfa_trunk_disable(struct bfa_s *bfa) | ||
4083 | { | ||
4084 | struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); | ||
4085 | struct bfa_fcport_trunk_s *trunk = &fcport->trunk; | ||
4086 | |||
4087 | bfa_trc(bfa, 0); | ||
4088 | trunk->attr.state = BFA_TRUNK_DISABLED; | ||
4089 | bfa_fcport_disable(bfa); | ||
4090 | fcport->cfg.trunked = BFA_FALSE; | ||
4091 | bfa_fcport_enable(bfa); | ||
4092 | return BFA_STATUS_OK; | ||
4093 | } | ||
4094 | |||
4095 | |||
4096 | /* | 3800 | /* |
4097 | * Rport State machine functions | 3801 | * Rport State machine functions |
4098 | */ | 3802 | */ |
@@ -4606,8 +4310,8 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
4606 | mod->rps_list = rp; | 4310 | mod->rps_list = rp; |
4607 | mod->num_rports = cfg->fwcfg.num_rports; | 4311 | mod->num_rports = cfg->fwcfg.num_rports; |
4608 | 4312 | ||
4609 | bfa_assert(mod->num_rports && | 4313 | WARN_ON(!mod->num_rports || |
4610 | !(mod->num_rports & (mod->num_rports - 1))); | 4314 | (mod->num_rports & (mod->num_rports - 1))); |
4611 | 4315 | ||
4612 | for (i = 0; i < mod->num_rports; i++, rp++) { | 4316 | for (i = 0; i < mod->num_rports; i++, rp++) { |
4613 | memset(rp, 0, sizeof(struct bfa_rport_s)); | 4317 | memset(rp, 0, sizeof(struct bfa_rport_s)); |
@@ -4675,7 +4379,7 @@ bfa_rport_free(struct bfa_rport_s *rport) | |||
4675 | { | 4379 | { |
4676 | struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); | 4380 | struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa); |
4677 | 4381 | ||
4678 | bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport)); | 4382 | WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport)); |
4679 | list_del(&rport->qe); | 4383 | list_del(&rport->qe); |
4680 | list_add_tail(&rport->qe, &mod->rp_free_q); | 4384 | list_add_tail(&rport->qe, &mod->rp_free_q); |
4681 | } | 4385 | } |
@@ -4788,13 +4492,13 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
4788 | rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); | 4492 | rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle); |
4789 | rp->fw_handle = msg.create_rsp->fw_handle; | 4493 | rp->fw_handle = msg.create_rsp->fw_handle; |
4790 | rp->qos_attr = msg.create_rsp->qos_attr; | 4494 | rp->qos_attr = msg.create_rsp->qos_attr; |
4791 | bfa_assert(msg.create_rsp->status == BFA_STATUS_OK); | 4495 | WARN_ON(msg.create_rsp->status != BFA_STATUS_OK); |
4792 | bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); | 4496 | bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); |
4793 | break; | 4497 | break; |
4794 | 4498 | ||
4795 | case BFI_RPORT_I2H_DELETE_RSP: | 4499 | case BFI_RPORT_I2H_DELETE_RSP: |
4796 | rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); | 4500 | rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle); |
4797 | bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK); | 4501 | WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK); |
4798 | bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); | 4502 | bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP); |
4799 | break; | 4503 | break; |
4800 | 4504 | ||
@@ -4806,7 +4510,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) | |||
4806 | 4510 | ||
4807 | default: | 4511 | default: |
4808 | bfa_trc(bfa, m->mhdr.msg_id); | 4512 | bfa_trc(bfa, m->mhdr.msg_id); |
4809 | bfa_assert(0); | 4513 | WARN_ON(1); |
4810 | } | 4514 | } |
4811 | } | 4515 | } |
4812 | 4516 | ||
@@ -4828,24 +4532,18 @@ bfa_rport_create(struct bfa_s *bfa, void *rport_drv) | |||
4828 | 4532 | ||
4829 | rp->bfa = bfa; | 4533 | rp->bfa = bfa; |
4830 | rp->rport_drv = rport_drv; | 4534 | rp->rport_drv = rport_drv; |
4831 | bfa_rport_clear_stats(rp); | 4535 | memset(&rp->stats, 0, sizeof(rp->stats)); |
4832 | 4536 | ||
4833 | bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); | 4537 | WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit)); |
4834 | bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); | 4538 | bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE); |
4835 | 4539 | ||
4836 | return rp; | 4540 | return rp; |
4837 | } | 4541 | } |
4838 | 4542 | ||
4839 | void | 4543 | void |
4840 | bfa_rport_delete(struct bfa_rport_s *rport) | ||
4841 | { | ||
4842 | bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE); | ||
4843 | } | ||
4844 | |||
4845 | void | ||
4846 | bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) | 4544 | bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) |
4847 | { | 4545 | { |
4848 | bfa_assert(rport_info->max_frmsz != 0); | 4546 | WARN_ON(rport_info->max_frmsz == 0); |
4849 | 4547 | ||
4850 | /* | 4548 | /* |
4851 | * Some JBODs are seen to be not setting PDU size correctly in PLOGI | 4549 | * Some JBODs are seen to be not setting PDU size correctly in PLOGI |
@@ -4861,43 +4559,15 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) | |||
4861 | } | 4559 | } |
4862 | 4560 | ||
4863 | void | 4561 | void |
4864 | bfa_rport_offline(struct bfa_rport_s *rport) | ||
4865 | { | ||
4866 | bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE); | ||
4867 | } | ||
4868 | |||
4869 | void | ||
4870 | bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) | 4562 | bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed) |
4871 | { | 4563 | { |
4872 | bfa_assert(speed != 0); | 4564 | WARN_ON(speed == 0); |
4873 | bfa_assert(speed != BFA_PORT_SPEED_AUTO); | 4565 | WARN_ON(speed == BFA_PORT_SPEED_AUTO); |
4874 | 4566 | ||
4875 | rport->rport_info.speed = speed; | 4567 | rport->rport_info.speed = speed; |
4876 | bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); | 4568 | bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED); |
4877 | } | 4569 | } |
4878 | 4570 | ||
4879 | void | ||
4880 | bfa_rport_get_stats(struct bfa_rport_s *rport, | ||
4881 | struct bfa_rport_hal_stats_s *stats) | ||
4882 | { | ||
4883 | *stats = rport->stats; | ||
4884 | } | ||
4885 | |||
4886 | void | ||
4887 | bfa_rport_get_qos_attr(struct bfa_rport_s *rport, | ||
4888 | struct bfa_rport_qos_attr_s *qos_attr) | ||
4889 | { | ||
4890 | qos_attr->qos_priority = rport->qos_attr.qos_priority; | ||
4891 | qos_attr->qos_flow_id = be32_to_cpu(rport->qos_attr.qos_flow_id); | ||
4892 | |||
4893 | } | ||
4894 | |||
4895 | void | ||
4896 | bfa_rport_clear_stats(struct bfa_rport_s *rport) | ||
4897 | { | ||
4898 | memset(&rport->stats, 0, sizeof(rport->stats)); | ||
4899 | } | ||
4900 | |||
4901 | 4571 | ||
4902 | /* | 4572 | /* |
4903 | * SGPG related functions | 4573 | * SGPG related functions |
@@ -4952,7 +4622,7 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, | |||
4952 | sgpg_pa.pa = mod->sgpg_arr_pa; | 4622 | sgpg_pa.pa = mod->sgpg_arr_pa; |
4953 | mod->free_sgpgs = mod->num_sgpgs; | 4623 | mod->free_sgpgs = mod->num_sgpgs; |
4954 | 4624 | ||
4955 | bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); | 4625 | WARN_ON(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)); |
4956 | 4626 | ||
4957 | for (i = 0; i < mod->num_sgpgs; i++) { | 4627 | for (i = 0; i < mod->num_sgpgs; i++) { |
4958 | memset(hsgpg, 0, sizeof(*hsgpg)); | 4628 | memset(hsgpg, 0, sizeof(*hsgpg)); |
@@ -4993,12 +4663,6 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa) | |||
4993 | { | 4663 | { |
4994 | } | 4664 | } |
4995 | 4665 | ||
4996 | |||
4997 | |||
4998 | /* | ||
4999 | * hal_sgpg_public BFA SGPG public functions | ||
5000 | */ | ||
5001 | |||
5002 | bfa_status_t | 4666 | bfa_status_t |
5003 | bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) | 4667 | bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) |
5004 | { | 4668 | { |
@@ -5006,14 +4670,12 @@ bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) | |||
5006 | struct bfa_sgpg_s *hsgpg; | 4670 | struct bfa_sgpg_s *hsgpg; |
5007 | int i; | 4671 | int i; |
5008 | 4672 | ||
5009 | bfa_trc_fp(bfa, nsgpgs); | ||
5010 | |||
5011 | if (mod->free_sgpgs < nsgpgs) | 4673 | if (mod->free_sgpgs < nsgpgs) |
5012 | return BFA_STATUS_ENOMEM; | 4674 | return BFA_STATUS_ENOMEM; |
5013 | 4675 | ||
5014 | for (i = 0; i < nsgpgs; i++) { | 4676 | for (i = 0; i < nsgpgs; i++) { |
5015 | bfa_q_deq(&mod->sgpg_q, &hsgpg); | 4677 | bfa_q_deq(&mod->sgpg_q, &hsgpg); |
5016 | bfa_assert(hsgpg); | 4678 | WARN_ON(!hsgpg); |
5017 | list_add_tail(&hsgpg->qe, sgpg_q); | 4679 | list_add_tail(&hsgpg->qe, sgpg_q); |
5018 | } | 4680 | } |
5019 | 4681 | ||
@@ -5027,10 +4689,8 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) | |||
5027 | struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); | 4689 | struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); |
5028 | struct bfa_sgpg_wqe_s *wqe; | 4690 | struct bfa_sgpg_wqe_s *wqe; |
5029 | 4691 | ||
5030 | bfa_trc_fp(bfa, nsgpg); | ||
5031 | |||
5032 | mod->free_sgpgs += nsgpg; | 4692 | mod->free_sgpgs += nsgpg; |
5033 | bfa_assert(mod->free_sgpgs <= mod->num_sgpgs); | 4693 | WARN_ON(mod->free_sgpgs > mod->num_sgpgs); |
5034 | 4694 | ||
5035 | list_splice_tail_init(sgpg_q, &mod->sgpg_q); | 4695 | list_splice_tail_init(sgpg_q, &mod->sgpg_q); |
5036 | 4696 | ||
@@ -5060,8 +4720,8 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) | |||
5060 | { | 4720 | { |
5061 | struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); | 4721 | struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); |
5062 | 4722 | ||
5063 | bfa_assert(nsgpg > 0); | 4723 | WARN_ON(nsgpg <= 0); |
5064 | bfa_assert(nsgpg > mod->free_sgpgs); | 4724 | WARN_ON(nsgpg <= mod->free_sgpgs); |
5065 | 4725 | ||
5066 | wqe->nsgpg_total = wqe->nsgpg = nsgpg; | 4726 | wqe->nsgpg_total = wqe->nsgpg = nsgpg; |
5067 | 4727 | ||
@@ -5072,7 +4732,7 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) | |||
5072 | /* | 4732 | /* |
5073 | * no one else is waiting for SGPG | 4733 | * no one else is waiting for SGPG |
5074 | */ | 4734 | */ |
5075 | bfa_assert(list_empty(&mod->sgpg_wait_q)); | 4735 | WARN_ON(!list_empty(&mod->sgpg_wait_q)); |
5076 | list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); | 4736 | list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); |
5077 | wqe->nsgpg -= mod->free_sgpgs; | 4737 | wqe->nsgpg -= mod->free_sgpgs; |
5078 | mod->free_sgpgs = 0; | 4738 | mod->free_sgpgs = 0; |
@@ -5086,7 +4746,7 @@ bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) | |||
5086 | { | 4746 | { |
5087 | struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); | 4747 | struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa); |
5088 | 4748 | ||
5089 | bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); | 4749 | WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); |
5090 | list_del(&wqe->qe); | 4750 | list_del(&wqe->qe); |
5091 | 4751 | ||
5092 | if (wqe->nsgpg_total != wqe->nsgpg) | 4752 | if (wqe->nsgpg_total != wqe->nsgpg) |
@@ -5318,7 +4978,7 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) | |||
5318 | uf->data_ptr = buf; | 4978 | uf->data_ptr = buf; |
5319 | uf->data_len = m->xfr_len; | 4979 | uf->data_len = m->xfr_len; |
5320 | 4980 | ||
5321 | bfa_assert(uf->data_len >= sizeof(struct fchs_s)); | 4981 | WARN_ON(uf->data_len < sizeof(struct fchs_s)); |
5322 | 4982 | ||
5323 | if (uf->data_len == sizeof(struct fchs_s)) { | 4983 | if (uf->data_len == sizeof(struct fchs_s)) { |
5324 | bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, | 4984 | bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX, |
@@ -5361,12 +5021,6 @@ bfa_uf_start(struct bfa_s *bfa) | |||
5361 | bfa_uf_post_all(BFA_UF_MOD(bfa)); | 5021 | bfa_uf_post_all(BFA_UF_MOD(bfa)); |
5362 | } | 5022 | } |
5363 | 5023 | ||
5364 | |||
5365 | |||
5366 | /* | ||
5367 | * hal_uf_api | ||
5368 | */ | ||
5369 | |||
5370 | /* | 5024 | /* |
5371 | * Register handler for all unsolicted recieve frames. | 5025 | * Register handler for all unsolicted recieve frames. |
5372 | * | 5026 | * |
@@ -5414,7 +5068,7 @@ bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) | |||
5414 | 5068 | ||
5415 | default: | 5069 | default: |
5416 | bfa_trc(bfa, msg->mhdr.msg_id); | 5070 | bfa_trc(bfa, msg->mhdr.msg_id); |
5417 | bfa_assert(0); | 5071 | WARN_ON(1); |
5418 | } | 5072 | } |
5419 | } | 5073 | } |
5420 | 5074 | ||
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index e2349d5cdb93..331ad992a581 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h | |||
@@ -220,6 +220,18 @@ void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); | |||
220 | /* | 220 | /* |
221 | * RPORT related defines | 221 | * RPORT related defines |
222 | */ | 222 | */ |
223 | enum bfa_rport_event { | ||
224 | BFA_RPORT_SM_CREATE = 1, /* rport create event */ | ||
225 | BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */ | ||
226 | BFA_RPORT_SM_ONLINE = 3, /* rport is online */ | ||
227 | BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */ | ||
228 | BFA_RPORT_SM_FWRSP = 5, /* firmware response */ | ||
229 | BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */ | ||
230 | BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */ | ||
231 | BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */ | ||
232 | BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ | ||
233 | }; | ||
234 | |||
223 | #define BFA_RPORT_MIN 4 | 235 | #define BFA_RPORT_MIN 4 |
224 | 236 | ||
225 | struct bfa_rport_mod_s { | 237 | struct bfa_rport_mod_s { |
@@ -432,6 +444,7 @@ struct bfa_fcport_s { | |||
432 | u8 myalpa; /* my ALPA in LOOP topology */ | 444 | u8 myalpa; /* my ALPA in LOOP topology */ |
433 | u8 rsvd[3]; | 445 | u8 rsvd[3]; |
434 | struct bfa_port_cfg_s cfg; /* current port configuration */ | 446 | struct bfa_port_cfg_s cfg; /* current port configuration */ |
447 | bfa_boolean_t use_flash_cfg; /* get port cfg from flash */ | ||
435 | struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ | 448 | struct bfa_qos_attr_s qos_attr; /* QoS Attributes */ |
436 | struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ | 449 | struct bfa_qos_vc_attr_s qos_vc_attr; /* VC info from ELP */ |
437 | struct bfa_reqq_wait_s reqq_wait; | 450 | struct bfa_reqq_wait_s reqq_wait; |
@@ -500,30 +513,9 @@ void bfa_fcport_event_register(struct bfa_s *bfa, | |||
500 | void (*event_cbfn) (void *cbarg, | 513 | void (*event_cbfn) (void *cbarg, |
501 | enum bfa_port_linkstate event), void *event_cbarg); | 514 | enum bfa_port_linkstate event), void *event_cbarg); |
502 | bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); | 515 | bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa); |
503 | void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off); | ||
504 | void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off); | ||
505 | bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, | ||
506 | enum bfa_port_speed speed); | ||
507 | enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); | 516 | enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa); |
508 | 517 | ||
509 | void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); | 518 | void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit); |
510 | void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status); | ||
511 | void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon, | ||
512 | bfa_boolean_t link_e2e_beacon); | ||
513 | void bfa_fcport_qos_get_attr(struct bfa_s *bfa, | ||
514 | struct bfa_qos_attr_s *qos_attr); | ||
515 | void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, | ||
516 | struct bfa_qos_vc_attr_s *qos_vc_attr); | ||
517 | bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa, | ||
518 | union bfa_fcport_stats_u *stats, | ||
519 | bfa_cb_port_t cbfn, void *cbarg); | ||
520 | bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, | ||
521 | void *cbarg); | ||
522 | bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, | ||
523 | union bfa_fcport_stats_u *stats, | ||
524 | bfa_cb_port_t cbfn, void *cbarg); | ||
525 | bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, | ||
526 | void *cbarg); | ||
527 | bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); | 519 | bfa_boolean_t bfa_fcport_is_ratelim(struct bfa_s *bfa); |
528 | bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); | 520 | bfa_boolean_t bfa_fcport_is_linkup(struct bfa_s *bfa); |
529 | bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, | 521 | bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa, |
@@ -537,14 +529,9 @@ bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa); | |||
537 | * bfa rport API functions | 529 | * bfa rport API functions |
538 | */ | 530 | */ |
539 | struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv); | 531 | struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv); |
540 | void bfa_rport_delete(struct bfa_rport_s *rport); | ||
541 | void bfa_rport_online(struct bfa_rport_s *rport, | 532 | void bfa_rport_online(struct bfa_rport_s *rport, |
542 | struct bfa_rport_info_s *rport_info); | 533 | struct bfa_rport_info_s *rport_info); |
543 | void bfa_rport_offline(struct bfa_rport_s *rport); | ||
544 | void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed); | 534 | void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed); |
545 | void bfa_rport_get_stats(struct bfa_rport_s *rport, | ||
546 | struct bfa_rport_hal_stats_s *stats); | ||
547 | void bfa_rport_clear_stats(struct bfa_rport_s *rport); | ||
548 | void bfa_cb_rport_online(void *rport); | 535 | void bfa_cb_rport_online(void *rport); |
549 | void bfa_cb_rport_offline(void *rport); | 536 | void bfa_cb_rport_offline(void *rport); |
550 | void bfa_cb_rport_qos_scn_flowid(void *rport, | 537 | void bfa_cb_rport_qos_scn_flowid(void *rport, |
@@ -553,8 +540,6 @@ void bfa_cb_rport_qos_scn_flowid(void *rport, | |||
553 | void bfa_cb_rport_qos_scn_prio(void *rport, | 540 | void bfa_cb_rport_qos_scn_prio(void *rport, |
554 | struct bfa_rport_qos_attr_s old_qos_attr, | 541 | struct bfa_rport_qos_attr_s old_qos_attr, |
555 | struct bfa_rport_qos_attr_s new_qos_attr); | 542 | struct bfa_rport_qos_attr_s new_qos_attr); |
556 | void bfa_rport_get_qos_attr(struct bfa_rport_s *rport, | ||
557 | struct bfa_rport_qos_attr_s *qos_attr); | ||
558 | 543 | ||
559 | /* | 544 | /* |
560 | * bfa fcxp API functions | 545 | * bfa fcxp API functions |
@@ -619,38 +604,18 @@ void bfa_uf_free(struct bfa_uf_s *uf); | |||
619 | u32 bfa_lps_get_max_vport(struct bfa_s *bfa); | 604 | u32 bfa_lps_get_max_vport(struct bfa_s *bfa); |
620 | struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); | 605 | struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa); |
621 | void bfa_lps_delete(struct bfa_lps_s *lps); | 606 | void bfa_lps_delete(struct bfa_lps_s *lps); |
622 | void bfa_lps_discard(struct bfa_lps_s *lps); | ||
623 | void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, | 607 | void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, |
624 | u16 pdusz, wwn_t pwwn, wwn_t nwwn, | 608 | u16 pdusz, wwn_t pwwn, wwn_t nwwn, |
625 | bfa_boolean_t auth_en); | 609 | bfa_boolean_t auth_en); |
626 | void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, | 610 | void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, |
627 | wwn_t pwwn, wwn_t nwwn); | 611 | wwn_t pwwn, wwn_t nwwn); |
628 | void bfa_lps_flogo(struct bfa_lps_s *lps); | ||
629 | void bfa_lps_fdisclogo(struct bfa_lps_s *lps); | 612 | void bfa_lps_fdisclogo(struct bfa_lps_s *lps); |
630 | u8 bfa_lps_get_tag(struct bfa_lps_s *lps); | 613 | void bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, u32 n2n_pid); |
631 | bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps); | ||
632 | bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps); | ||
633 | bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps); | ||
634 | bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps); | ||
635 | bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps); | ||
636 | u32 bfa_lps_get_pid(struct bfa_lps_s *lps); | ||
637 | u32 bfa_lps_get_base_pid(struct bfa_s *bfa); | 614 | u32 bfa_lps_get_base_pid(struct bfa_s *bfa); |
638 | u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); | 615 | u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid); |
639 | u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps); | ||
640 | wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps); | ||
641 | wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps); | ||
642 | u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps); | ||
643 | u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps); | ||
644 | mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps); | ||
645 | void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); | 616 | void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status); |
646 | void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); | 617 | void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status); |
647 | void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); | 618 | void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg); |
648 | void bfa_cb_lps_cvl_event(void *bfad, void *uarg); | 619 | void bfa_cb_lps_cvl_event(void *bfad, void *uarg); |
649 | 620 | ||
650 | void bfa_trunk_enable_cfg(struct bfa_s *bfa); | ||
651 | bfa_status_t bfa_trunk_enable(struct bfa_s *bfa); | ||
652 | bfa_status_t bfa_trunk_disable(struct bfa_s *bfa); | ||
653 | bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa, | ||
654 | struct bfa_trunk_attr_s *attr); | ||
655 | |||
656 | #endif /* __BFA_SVC_H__ */ | 621 | #endif /* __BFA_SVC_H__ */ |
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 6797720213b2..44524cf55d33 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include "bfad_drv.h" | 32 | #include "bfad_drv.h" |
33 | #include "bfad_im.h" | 33 | #include "bfad_im.h" |
34 | #include "bfa_fcs.h" | 34 | #include "bfa_fcs.h" |
35 | #include "bfa_os_inc.h" | ||
36 | #include "bfa_defs.h" | 35 | #include "bfa_defs.h" |
37 | #include "bfa.h" | 36 | #include "bfa.h" |
38 | 37 | ||
@@ -61,12 +60,12 @@ int msix_disable_cb = 0, msix_disable_ct = 0; | |||
61 | u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; | 60 | u32 bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size; |
62 | u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; | 61 | u32 *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc; |
63 | 62 | ||
64 | const char *msix_name_ct[] = { | 63 | static const char *msix_name_ct[] = { |
65 | "cpe0", "cpe1", "cpe2", "cpe3", | 64 | "cpe0", "cpe1", "cpe2", "cpe3", |
66 | "rme0", "rme1", "rme2", "rme3", | 65 | "rme0", "rme1", "rme2", "rme3", |
67 | "ctrl" }; | 66 | "ctrl" }; |
68 | 67 | ||
69 | const char *msix_name_cb[] = { | 68 | static const char *msix_name_cb[] = { |
70 | "cpe0", "cpe1", "cpe2", "cpe3", | 69 | "cpe0", "cpe1", "cpe2", "cpe3", |
71 | "rme0", "rme1", "rme2", "rme3", | 70 | "rme0", "rme1", "rme2", "rme3", |
72 | "eemc", "elpu0", "elpu1", "epss", "mlpu" }; | 71 | "eemc", "elpu0", "elpu1", "epss", "mlpu" }; |
@@ -206,7 +205,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event) | |||
206 | } | 205 | } |
207 | 206 | ||
208 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 207 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
209 | bfa_init(&bfad->bfa); | 208 | bfa_iocfc_init(&bfad->bfa); |
210 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 209 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
211 | 210 | ||
212 | /* Set up interrupt handler for each vectors */ | 211 | /* Set up interrupt handler for each vectors */ |
@@ -533,7 +532,7 @@ bfad_hal_mem_release(struct bfad_s *bfad) | |||
533 | (dma_addr_t) meminfo_elem->dma); | 532 | (dma_addr_t) meminfo_elem->dma); |
534 | break; | 533 | break; |
535 | default: | 534 | default: |
536 | bfa_assert(0); | 535 | WARN_ON(1); |
537 | break; | 536 | break; |
538 | } | 537 | } |
539 | } | 538 | } |
@@ -725,7 +724,7 @@ bfad_bfa_tmo(unsigned long data) | |||
725 | 724 | ||
726 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 725 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
727 | 726 | ||
728 | bfa_timer_tick(&bfad->bfa); | 727 | bfa_timer_beat(&bfad->bfa.timer_mod); |
729 | 728 | ||
730 | bfa_comp_deq(&bfad->bfa, &doneq); | 729 | bfa_comp_deq(&bfad->bfa, &doneq); |
731 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 730 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
@@ -882,8 +881,8 @@ bfad_drv_init(struct bfad_s *bfad) | |||
882 | goto out_hal_mem_alloc_failure; | 881 | goto out_hal_mem_alloc_failure; |
883 | } | 882 | } |
884 | 883 | ||
885 | bfa_init_trc(&bfad->bfa, bfad->trcmod); | 884 | bfad->bfa.trcmod = bfad->trcmod; |
886 | bfa_init_plog(&bfad->bfa, &bfad->plog_buf); | 885 | bfad->bfa.plog = &bfad->plog_buf; |
887 | bfa_plog_init(&bfad->plog_buf); | 886 | bfa_plog_init(&bfad->plog_buf); |
888 | bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, | 887 | bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START, |
889 | 0, "Driver Attach"); | 888 | 0, "Driver Attach"); |
@@ -893,9 +892,9 @@ bfad_drv_init(struct bfad_s *bfad) | |||
893 | 892 | ||
894 | /* FCS INIT */ | 893 | /* FCS INIT */ |
895 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 894 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
896 | bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod); | 895 | bfad->bfa_fcs.trcmod = bfad->trcmod; |
897 | bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); | 896 | bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE); |
898 | bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable); | 897 | bfad->bfa_fcs.fdmi_enabled = fdmi_enable; |
899 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 898 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
900 | 899 | ||
901 | bfad->bfad_flags |= BFAD_DRV_INIT_DONE; | 900 | bfad->bfad_flags |= BFAD_DRV_INIT_DONE; |
@@ -913,7 +912,7 @@ bfad_drv_uninit(struct bfad_s *bfad) | |||
913 | 912 | ||
914 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 913 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
915 | init_completion(&bfad->comp); | 914 | init_completion(&bfad->comp); |
916 | bfa_stop(&bfad->bfa); | 915 | bfa_iocfc_stop(&bfad->bfa); |
917 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 916 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
918 | wait_for_completion(&bfad->comp); | 917 | wait_for_completion(&bfad->comp); |
919 | 918 | ||
@@ -932,8 +931,8 @@ bfad_drv_start(struct bfad_s *bfad) | |||
932 | unsigned long flags; | 931 | unsigned long flags; |
933 | 932 | ||
934 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 933 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
935 | bfa_start(&bfad->bfa); | 934 | bfa_iocfc_start(&bfad->bfa); |
936 | bfa_fcs_start(&bfad->bfa_fcs); | 935 | bfa_fcs_fabric_modstart(&bfad->bfa_fcs); |
937 | bfad->bfad_flags |= BFAD_HAL_START_DONE; | 936 | bfad->bfad_flags |= BFAD_HAL_START_DONE; |
938 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 937 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
939 | 938 | ||
@@ -963,7 +962,7 @@ bfad_stop(struct bfad_s *bfad) | |||
963 | 962 | ||
964 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 963 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
965 | init_completion(&bfad->comp); | 964 | init_completion(&bfad->comp); |
966 | bfa_stop(&bfad->bfa); | 965 | bfa_iocfc_stop(&bfad->bfa); |
967 | bfad->bfad_flags &= ~BFAD_HAL_START_DONE; | 966 | bfad->bfad_flags &= ~BFAD_HAL_START_DONE; |
968 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 967 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
969 | wait_for_completion(&bfad->comp); | 968 | wait_for_completion(&bfad->comp); |
@@ -1102,15 +1101,15 @@ bfad_start_ops(struct bfad_s *bfad) { | |||
1102 | 1101 | ||
1103 | /* | 1102 | /* |
1104 | * If bfa_linkup_delay is set to -1 default; try to retrive the | 1103 | * If bfa_linkup_delay is set to -1 default; try to retrive the |
1105 | * value using the bfad_os_get_linkup_delay(); else use the | 1104 | * value using the bfad_get_linkup_delay(); else use the |
1106 | * passed in module param value as the bfa_linkup_delay. | 1105 | * passed in module param value as the bfa_linkup_delay. |
1107 | */ | 1106 | */ |
1108 | if (bfa_linkup_delay < 0) { | 1107 | if (bfa_linkup_delay < 0) { |
1109 | bfa_linkup_delay = bfad_os_get_linkup_delay(bfad); | 1108 | bfa_linkup_delay = bfad_get_linkup_delay(bfad); |
1110 | bfad_os_rport_online_wait(bfad); | 1109 | bfad_rport_online_wait(bfad); |
1111 | bfa_linkup_delay = -1; | 1110 | bfa_linkup_delay = -1; |
1112 | } else | 1111 | } else |
1113 | bfad_os_rport_online_wait(bfad); | 1112 | bfad_rport_online_wait(bfad); |
1114 | 1113 | ||
1115 | BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); | 1114 | BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n"); |
1116 | 1115 | ||
@@ -1167,7 +1166,6 @@ bfad_intx(int irq, void *dev_id) | |||
1167 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 1166 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
1168 | bfa_comp_free(&bfad->bfa, &doneq); | 1167 | bfa_comp_free(&bfad->bfa, &doneq); |
1169 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 1168 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
1170 | bfa_trc_fp(bfad, irq); | ||
1171 | } | 1169 | } |
1172 | 1170 | ||
1173 | return IRQ_HANDLED; | 1171 | return IRQ_HANDLED; |
@@ -1524,7 +1522,7 @@ bfad_init(void) | |||
1524 | if (strcmp(FCPI_NAME, " fcpim") == 0) | 1522 | if (strcmp(FCPI_NAME, " fcpim") == 0) |
1525 | supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; | 1523 | supported_fc4s |= BFA_LPORT_ROLE_FCP_IM; |
1526 | 1524 | ||
1527 | bfa_ioc_auto_recover(ioc_auto_recover); | 1525 | bfa_auto_recover = ioc_auto_recover; |
1528 | bfa_fcs_rport_set_del_timeout(rport_del_timeout); | 1526 | bfa_fcs_rport_set_del_timeout(rport_del_timeout); |
1529 | 1527 | ||
1530 | error = pci_register_driver(&bfad_pci_driver); | 1528 | error = pci_register_driver(&bfad_pci_driver); |
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index ed9fff440b5c..a94ea4235433 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c | |||
@@ -25,7 +25,7 @@ | |||
25 | /* | 25 | /* |
26 | * FC transport template entry, get SCSI target port ID. | 26 | * FC transport template entry, get SCSI target port ID. |
27 | */ | 27 | */ |
28 | void | 28 | static void |
29 | bfad_im_get_starget_port_id(struct scsi_target *starget) | 29 | bfad_im_get_starget_port_id(struct scsi_target *starget) |
30 | { | 30 | { |
31 | struct Scsi_Host *shost; | 31 | struct Scsi_Host *shost; |
@@ -40,7 +40,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget) | |||
40 | bfad = im_port->bfad; | 40 | bfad = im_port->bfad; |
41 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 41 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
42 | 42 | ||
43 | itnim = bfad_os_get_itnim(im_port, starget->id); | 43 | itnim = bfad_get_itnim(im_port, starget->id); |
44 | if (itnim) | 44 | if (itnim) |
45 | fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); | 45 | fc_id = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); |
46 | 46 | ||
@@ -51,7 +51,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget) | |||
51 | /* | 51 | /* |
52 | * FC transport template entry, get SCSI target nwwn. | 52 | * FC transport template entry, get SCSI target nwwn. |
53 | */ | 53 | */ |
54 | void | 54 | static void |
55 | bfad_im_get_starget_node_name(struct scsi_target *starget) | 55 | bfad_im_get_starget_node_name(struct scsi_target *starget) |
56 | { | 56 | { |
57 | struct Scsi_Host *shost; | 57 | struct Scsi_Host *shost; |
@@ -66,7 +66,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget) | |||
66 | bfad = im_port->bfad; | 66 | bfad = im_port->bfad; |
67 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 67 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
68 | 68 | ||
69 | itnim = bfad_os_get_itnim(im_port, starget->id); | 69 | itnim = bfad_get_itnim(im_port, starget->id); |
70 | if (itnim) | 70 | if (itnim) |
71 | node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); | 71 | node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); |
72 | 72 | ||
@@ -77,7 +77,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget) | |||
77 | /* | 77 | /* |
78 | * FC transport template entry, get SCSI target pwwn. | 78 | * FC transport template entry, get SCSI target pwwn. |
79 | */ | 79 | */ |
80 | void | 80 | static void |
81 | bfad_im_get_starget_port_name(struct scsi_target *starget) | 81 | bfad_im_get_starget_port_name(struct scsi_target *starget) |
82 | { | 82 | { |
83 | struct Scsi_Host *shost; | 83 | struct Scsi_Host *shost; |
@@ -92,7 +92,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget) | |||
92 | bfad = im_port->bfad; | 92 | bfad = im_port->bfad; |
93 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 93 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
94 | 94 | ||
95 | itnim = bfad_os_get_itnim(im_port, starget->id); | 95 | itnim = bfad_get_itnim(im_port, starget->id); |
96 | if (itnim) | 96 | if (itnim) |
97 | port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); | 97 | port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); |
98 | 98 | ||
@@ -103,7 +103,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget) | |||
103 | /* | 103 | /* |
104 | * FC transport template entry, get SCSI host port ID. | 104 | * FC transport template entry, get SCSI host port ID. |
105 | */ | 105 | */ |
106 | void | 106 | static void |
107 | bfad_im_get_host_port_id(struct Scsi_Host *shost) | 107 | bfad_im_get_host_port_id(struct Scsi_Host *shost) |
108 | { | 108 | { |
109 | struct bfad_im_port_s *im_port = | 109 | struct bfad_im_port_s *im_port = |
@@ -111,7 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost) | |||
111 | struct bfad_port_s *port = im_port->port; | 111 | struct bfad_port_s *port = im_port->port; |
112 | 112 | ||
113 | fc_host_port_id(shost) = | 113 | fc_host_port_id(shost) = |
114 | bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); | 114 | bfa_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); |
115 | } | 115 | } |
116 | 116 | ||
117 | /* | 117 | /* |
@@ -487,7 +487,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport) | |||
487 | wait_for_completion(vport->comp_del); | 487 | wait_for_completion(vport->comp_del); |
488 | 488 | ||
489 | free_scsi_host: | 489 | free_scsi_host: |
490 | bfad_os_scsi_host_free(bfad, im_port); | 490 | bfad_scsi_host_free(bfad, im_port); |
491 | 491 | ||
492 | kfree(vport); | 492 | kfree(vport); |
493 | 493 | ||
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 1fedeeb4ac1f..c66e32eced7b 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c | |||
@@ -90,7 +90,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file) | |||
90 | memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len); | 90 | memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len); |
91 | 91 | ||
92 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 92 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
93 | rc = bfa_debug_fwtrc(&bfad->bfa, | 93 | rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc, |
94 | fw_debug->debug_buffer, | 94 | fw_debug->debug_buffer, |
95 | &fw_debug->buffer_len); | 95 | &fw_debug->buffer_len); |
96 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 96 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
@@ -134,7 +134,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file) | |||
134 | memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len); | 134 | memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len); |
135 | 135 | ||
136 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 136 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
137 | rc = bfa_debug_fwsave(&bfad->bfa, | 137 | rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc, |
138 | fw_debug->debug_buffer, | 138 | fw_debug->debug_buffer, |
139 | &fw_debug->buffer_len); | 139 | &fw_debug->buffer_len); |
140 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 140 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
@@ -208,7 +208,7 @@ bfad_debugfs_read(struct file *file, char __user *buf, | |||
208 | if (!debug || !debug->debug_buffer) | 208 | if (!debug || !debug->debug_buffer) |
209 | return 0; | 209 | return 0; |
210 | 210 | ||
211 | return memory_read_from_buffer(buf, nbytes, pos, | 211 | return simple_read_from_buffer(buf, nbytes, pos, |
212 | debug->debug_buffer, debug->buffer_len); | 212 | debug->debug_buffer, debug->buffer_len); |
213 | } | 213 | } |
214 | 214 | ||
@@ -254,7 +254,7 @@ bfad_debugfs_read_regrd(struct file *file, char __user *buf, | |||
254 | if (!bfad->regdata) | 254 | if (!bfad->regdata) |
255 | return 0; | 255 | return 0; |
256 | 256 | ||
257 | rc = memory_read_from_buffer(buf, nbytes, pos, | 257 | rc = simple_read_from_buffer(buf, nbytes, pos, |
258 | bfad->regdata, bfad->reglen); | 258 | bfad->regdata, bfad->reglen); |
259 | 259 | ||
260 | if ((*pos + nbytes) >= bfad->reglen) { | 260 | if ((*pos + nbytes) >= bfad->reglen) { |
@@ -279,15 +279,31 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, | |||
279 | u32 *regbuf; | 279 | u32 *regbuf; |
280 | void __iomem *rb, *reg_addr; | 280 | void __iomem *rb, *reg_addr; |
281 | unsigned long flags; | 281 | unsigned long flags; |
282 | void *kern_buf; | ||
282 | 283 | ||
283 | rc = sscanf(buf, "%x:%x", &addr, &len); | 284 | kern_buf = kzalloc(nbytes, GFP_KERNEL); |
285 | |||
286 | if (!kern_buf) { | ||
287 | printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n", | ||
288 | bfad->inst_no); | ||
289 | return -ENOMEM; | ||
290 | } | ||
291 | |||
292 | if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { | ||
293 | kfree(kern_buf); | ||
294 | return -ENOMEM; | ||
295 | } | ||
296 | |||
297 | rc = sscanf(kern_buf, "%x:%x", &addr, &len); | ||
284 | if (rc < 2) { | 298 | if (rc < 2) { |
285 | printk(KERN_INFO | 299 | printk(KERN_INFO |
286 | "bfad[%d]: %s failed to read user buf\n", | 300 | "bfad[%d]: %s failed to read user buf\n", |
287 | bfad->inst_no, __func__); | 301 | bfad->inst_no, __func__); |
302 | kfree(kern_buf); | ||
288 | return -EINVAL; | 303 | return -EINVAL; |
289 | } | 304 | } |
290 | 305 | ||
306 | kfree(kern_buf); | ||
291 | kfree(bfad->regdata); | 307 | kfree(bfad->regdata); |
292 | bfad->regdata = NULL; | 308 | bfad->regdata = NULL; |
293 | bfad->reglen = 0; | 309 | bfad->reglen = 0; |
@@ -339,14 +355,30 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, | |||
339 | int addr, val, rc; | 355 | int addr, val, rc; |
340 | void __iomem *reg_addr; | 356 | void __iomem *reg_addr; |
341 | unsigned long flags; | 357 | unsigned long flags; |
358 | void *kern_buf; | ||
359 | |||
360 | kern_buf = kzalloc(nbytes, GFP_KERNEL); | ||
361 | |||
362 | if (!kern_buf) { | ||
363 | printk(KERN_INFO "bfad[%d]: Failed to allocate buffer\n", | ||
364 | bfad->inst_no); | ||
365 | return -ENOMEM; | ||
366 | } | ||
367 | |||
368 | if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) { | ||
369 | kfree(kern_buf); | ||
370 | return -ENOMEM; | ||
371 | } | ||
342 | 372 | ||
343 | rc = sscanf(buf, "%x:%x", &addr, &val); | 373 | rc = sscanf(kern_buf, "%x:%x", &addr, &val); |
344 | if (rc < 2) { | 374 | if (rc < 2) { |
345 | printk(KERN_INFO | 375 | printk(KERN_INFO |
346 | "bfad[%d]: %s failed to read user buf\n", | 376 | "bfad[%d]: %s failed to read user buf\n", |
347 | bfad->inst_no, __func__); | 377 | bfad->inst_no, __func__); |
378 | kfree(kern_buf); | ||
348 | return -EINVAL; | 379 | return -EINVAL; |
349 | } | 380 | } |
381 | kfree(kern_buf); | ||
350 | 382 | ||
351 | addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */ | 383 | addr &= BFA_REG_ADDRMSK(bfa); /* offset only 17 bit and word align */ |
352 | 384 | ||
@@ -359,7 +391,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, | |||
359 | return -EINVAL; | 391 | return -EINVAL; |
360 | } | 392 | } |
361 | 393 | ||
362 | reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr); | 394 | reg_addr = (bfa_ioc_bar0(ioc)) + addr; |
363 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 395 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
364 | writel(val, reg_addr); | 396 | writel(val, reg_addr); |
365 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 397 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index d5ce2349ac59..7f9ea90254cd 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h | |||
@@ -26,7 +26,23 @@ | |||
26 | #ifndef __BFAD_DRV_H__ | 26 | #ifndef __BFAD_DRV_H__ |
27 | #define __BFAD_DRV_H__ | 27 | #define __BFAD_DRV_H__ |
28 | 28 | ||
29 | #include "bfa_os_inc.h" | 29 | #include <linux/types.h> |
30 | #include <linux/version.h> | ||
31 | #include <linux/pci.h> | ||
32 | #include <linux/dma-mapping.h> | ||
33 | #include <linux/idr.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/cdev.h> | ||
36 | #include <linux/fs.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/vmalloc.h> | ||
39 | #include <linux/workqueue.h> | ||
40 | #include <linux/bitops.h> | ||
41 | #include <scsi/scsi.h> | ||
42 | #include <scsi/scsi_host.h> | ||
43 | #include <scsi/scsi_tcq.h> | ||
44 | #include <scsi/scsi_transport_fc.h> | ||
45 | #include <scsi/scsi_transport.h> | ||
30 | 46 | ||
31 | #include "bfa_modules.h" | 47 | #include "bfa_modules.h" |
32 | #include "bfa_fcs.h" | 48 | #include "bfa_fcs.h" |
@@ -39,7 +55,7 @@ | |||
39 | #ifdef BFA_DRIVER_VERSION | 55 | #ifdef BFA_DRIVER_VERSION |
40 | #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION | 56 | #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION |
41 | #else | 57 | #else |
42 | #define BFAD_DRIVER_VERSION "2.3.2.0" | 58 | #define BFAD_DRIVER_VERSION "2.3.2.3" |
43 | #endif | 59 | #endif |
44 | 60 | ||
45 | #define BFAD_PROTO_NAME FCPI_NAME | 61 | #define BFAD_PROTO_NAME FCPI_NAME |
@@ -263,28 +279,21 @@ struct bfad_hal_comp { | |||
263 | */ | 279 | */ |
264 | #define nextLowerInt(x) \ | 280 | #define nextLowerInt(x) \ |
265 | do { \ | 281 | do { \ |
266 | int i; \ | 282 | int __i; \ |
267 | (*x)--; \ | 283 | (*x)--; \ |
268 | for (i = 1; i < (sizeof(int)*8); i <<= 1) \ | 284 | for (__i = 1; __i < (sizeof(int)*8); __i <<= 1) \ |
269 | (*x) = (*x) | (*x) >> i; \ | 285 | (*x) = (*x) | (*x) >> __i; \ |
270 | (*x)++; \ | 286 | (*x)++; \ |
271 | (*x) = (*x) >> 1; \ | 287 | (*x) = (*x) >> 1; \ |
272 | } while (0) | 288 | } while (0) |
273 | 289 | ||
274 | 290 | ||
275 | #define list_remove_head(list, entry, type, member) \ | 291 | #define BFA_LOG(level, bfad, mask, fmt, arg...) \ |
276 | do { \ | 292 | do { \ |
277 | entry = NULL; \ | 293 | if (((mask) == 4) || (level[1] <= '4')) \ |
278 | if (!list_empty(list)) { \ | 294 | dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg); \ |
279 | entry = list_entry((list)->next, type, member); \ | ||
280 | list_del_init(&entry->member); \ | ||
281 | } \ | ||
282 | } while (0) | 295 | } while (0) |
283 | 296 | ||
284 | #define list_get_first(list, type, member) \ | ||
285 | ((list_empty(list)) ? NULL : \ | ||
286 | list_entry((list)->next, type, member)) | ||
287 | |||
288 | bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, | 297 | bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, |
289 | struct bfa_lport_cfg_s *port_cfg, | 298 | struct bfa_lport_cfg_s *port_cfg, |
290 | struct device *dev); | 299 | struct device *dev); |
@@ -316,8 +325,8 @@ void bfad_debugfs_exit(struct bfad_port_s *port); | |||
316 | 325 | ||
317 | void bfad_pci_remove(struct pci_dev *pdev); | 326 | void bfad_pci_remove(struct pci_dev *pdev); |
318 | int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); | 327 | int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid); |
319 | void bfad_os_rport_online_wait(struct bfad_s *bfad); | 328 | void bfad_rport_online_wait(struct bfad_s *bfad); |
320 | int bfad_os_get_linkup_delay(struct bfad_s *bfad); | 329 | int bfad_get_linkup_delay(struct bfad_s *bfad); |
321 | int bfad_install_msix_handler(struct bfad_s *bfad); | 330 | int bfad_install_msix_handler(struct bfad_s *bfad); |
322 | 331 | ||
323 | extern struct idr bfad_im_port_index; | 332 | extern struct idr bfad_im_port_index; |
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index fbad5e9b2402..c2b36179e8e8 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
@@ -21,7 +21,6 @@ | |||
21 | 21 | ||
22 | #include "bfad_drv.h" | 22 | #include "bfad_drv.h" |
23 | #include "bfad_im.h" | 23 | #include "bfad_im.h" |
24 | #include "bfa_cb_ioim.h" | ||
25 | #include "bfa_fcs.h" | 24 | #include "bfa_fcs.h" |
26 | 25 | ||
27 | BFA_TRC_FILE(LDRV, IM); | 26 | BFA_TRC_FILE(LDRV, IM); |
@@ -93,10 +92,10 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, | |||
93 | if (!cmnd->result && itnim && | 92 | if (!cmnd->result && itnim && |
94 | (bfa_lun_queue_depth > cmnd->device->queue_depth)) { | 93 | (bfa_lun_queue_depth > cmnd->device->queue_depth)) { |
95 | /* Queue depth adjustment for good status completion */ | 94 | /* Queue depth adjustment for good status completion */ |
96 | bfad_os_ramp_up_qdepth(itnim, cmnd->device); | 95 | bfad_ramp_up_qdepth(itnim, cmnd->device); |
97 | } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { | 96 | } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { |
98 | /* qfull handling */ | 97 | /* qfull handling */ |
99 | bfad_os_handle_qfull(itnim, cmnd->device); | 98 | bfad_handle_qfull(itnim, cmnd->device); |
100 | } | 99 | } |
101 | } | 100 | } |
102 | 101 | ||
@@ -124,7 +123,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) | |||
124 | if (itnim_data) { | 123 | if (itnim_data) { |
125 | itnim = itnim_data->itnim; | 124 | itnim = itnim_data->itnim; |
126 | if (itnim) | 125 | if (itnim) |
127 | bfad_os_ramp_up_qdepth(itnim, cmnd->device); | 126 | bfad_ramp_up_qdepth(itnim, cmnd->device); |
128 | } | 127 | } |
129 | } | 128 | } |
130 | 129 | ||
@@ -183,7 +182,7 @@ bfad_im_info(struct Scsi_Host *shost) | |||
183 | bfa_get_adapter_model(bfa, model); | 182 | bfa_get_adapter_model(bfa, model); |
184 | 183 | ||
185 | memset(bfa_buf, 0, sizeof(bfa_buf)); | 184 | memset(bfa_buf, 0, sizeof(bfa_buf)); |
186 | if (ioc->ctdev) | 185 | if (ioc->ctdev && !ioc->fcmode) |
187 | snprintf(bfa_buf, sizeof(bfa_buf), | 186 | snprintf(bfa_buf, sizeof(bfa_buf), |
188 | "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", | 187 | "Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s", |
189 | model, bfad->pci_name, BFAD_DRIVER_VERSION); | 188 | model, bfad->pci_name, BFAD_DRIVER_VERSION); |
@@ -258,6 +257,7 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, | |||
258 | struct bfa_tskim_s *tskim; | 257 | struct bfa_tskim_s *tskim; |
259 | struct bfa_itnim_s *bfa_itnim; | 258 | struct bfa_itnim_s *bfa_itnim; |
260 | bfa_status_t rc = BFA_STATUS_OK; | 259 | bfa_status_t rc = BFA_STATUS_OK; |
260 | struct scsi_lun scsilun; | ||
261 | 261 | ||
262 | tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); | 262 | tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); |
263 | if (!tskim) { | 263 | if (!tskim) { |
@@ -274,7 +274,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, | |||
274 | cmnd->host_scribble = NULL; | 274 | cmnd->host_scribble = NULL; |
275 | cmnd->SCp.Status = 0; | 275 | cmnd->SCp.Status = 0; |
276 | bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); | 276 | bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); |
277 | bfa_tskim_start(tskim, bfa_itnim, (lun_t)0, | 277 | memset(&scsilun, 0, sizeof(scsilun)); |
278 | bfa_tskim_start(tskim, bfa_itnim, scsilun, | ||
278 | FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); | 279 | FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); |
279 | out: | 280 | out: |
280 | return rc; | 281 | return rc; |
@@ -301,6 +302,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
301 | int rc = SUCCESS; | 302 | int rc = SUCCESS; |
302 | unsigned long flags; | 303 | unsigned long flags; |
303 | enum bfi_tskim_status task_status; | 304 | enum bfi_tskim_status task_status; |
305 | struct scsi_lun scsilun; | ||
304 | 306 | ||
305 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 307 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
306 | itnim = itnim_data->itnim; | 308 | itnim = itnim_data->itnim; |
@@ -327,8 +329,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) | |||
327 | cmnd->SCp.ptr = (char *)&wq; | 329 | cmnd->SCp.ptr = (char *)&wq; |
328 | cmnd->SCp.Status = 0; | 330 | cmnd->SCp.Status = 0; |
329 | bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); | 331 | bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); |
330 | bfa_tskim_start(tskim, bfa_itnim, | 332 | int_to_scsilun(cmnd->device->lun, &scsilun); |
331 | bfad_int_to_lun(cmnd->device->lun), | 333 | bfa_tskim_start(tskim, bfa_itnim, scsilun, |
332 | FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); | 334 | FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); |
333 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 335 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
334 | 336 | ||
@@ -364,7 +366,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) | |||
364 | 366 | ||
365 | spin_lock_irqsave(&bfad->bfad_lock, flags); | 367 | spin_lock_irqsave(&bfad->bfad_lock, flags); |
366 | for (i = 0; i < MAX_FCP_TARGET; i++) { | 368 | for (i = 0; i < MAX_FCP_TARGET; i++) { |
367 | itnim = bfad_os_get_itnim(im_port, i); | 369 | itnim = bfad_get_itnim(im_port, i); |
368 | if (itnim) { | 370 | if (itnim) { |
369 | cmnd->SCp.ptr = (char *)&wq; | 371 | cmnd->SCp.ptr = (char *)&wq; |
370 | rc = bfad_im_target_reset_send(bfad, cmnd, itnim); | 372 | rc = bfad_im_target_reset_send(bfad, cmnd, itnim); |
@@ -447,7 +449,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) | |||
447 | struct bfad_im_s *im = itnim_drv->im; | 449 | struct bfad_im_s *im = itnim_drv->im; |
448 | 450 | ||
449 | /* online to free state transtion should not happen */ | 451 | /* online to free state transtion should not happen */ |
450 | bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE); | 452 | WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); |
451 | 453 | ||
452 | itnim_drv->queue_work = 1; | 454 | itnim_drv->queue_work = 1; |
453 | /* offline request is not yet done, use the same request to free */ | 455 | /* offline request is not yet done, use the same request to free */ |
@@ -545,7 +547,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, | |||
545 | 547 | ||
546 | mutex_unlock(&bfad_mutex); | 548 | mutex_unlock(&bfad_mutex); |
547 | 549 | ||
548 | im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad); | 550 | im_port->shost = bfad_scsi_host_alloc(im_port, bfad); |
549 | if (!im_port->shost) { | 551 | if (!im_port->shost) { |
550 | error = 1; | 552 | error = 1; |
551 | goto out_free_idr; | 553 | goto out_free_idr; |
@@ -571,7 +573,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, | |||
571 | } | 573 | } |
572 | 574 | ||
573 | /* setup host fixed attribute if the lk supports */ | 575 | /* setup host fixed attribute if the lk supports */ |
574 | bfad_os_fc_host_init(im_port); | 576 | bfad_fc_host_init(im_port); |
575 | 577 | ||
576 | return 0; | 578 | return 0; |
577 | 579 | ||
@@ -662,7 +664,7 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port) | |||
662 | } | 664 | } |
663 | 665 | ||
664 | /* the itnim_mapped_list must be empty at this time */ | 666 | /* the itnim_mapped_list must be empty at this time */ |
665 | bfa_assert(list_empty(&im_port->itnim_mapped_list)); | 667 | WARN_ON(!list_empty(&im_port->itnim_mapped_list)); |
666 | 668 | ||
667 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 669 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
668 | } | 670 | } |
@@ -682,7 +684,7 @@ bfad_im_probe(struct bfad_s *bfad) | |||
682 | bfad->im = im; | 684 | bfad->im = im; |
683 | im->bfad = bfad; | 685 | im->bfad = bfad; |
684 | 686 | ||
685 | if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) { | 687 | if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { |
686 | kfree(im); | 688 | kfree(im); |
687 | rc = BFA_STATUS_FAILED; | 689 | rc = BFA_STATUS_FAILED; |
688 | } | 690 | } |
@@ -695,14 +697,14 @@ void | |||
695 | bfad_im_probe_undo(struct bfad_s *bfad) | 697 | bfad_im_probe_undo(struct bfad_s *bfad) |
696 | { | 698 | { |
697 | if (bfad->im) { | 699 | if (bfad->im) { |
698 | bfad_os_destroy_workq(bfad->im); | 700 | bfad_destroy_workq(bfad->im); |
699 | kfree(bfad->im); | 701 | kfree(bfad->im); |
700 | bfad->im = NULL; | 702 | bfad->im = NULL; |
701 | } | 703 | } |
702 | } | 704 | } |
703 | 705 | ||
704 | struct Scsi_Host * | 706 | struct Scsi_Host * |
705 | bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) | 707 | bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) |
706 | { | 708 | { |
707 | struct scsi_host_template *sht; | 709 | struct scsi_host_template *sht; |
708 | 710 | ||
@@ -717,7 +719,7 @@ bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) | |||
717 | } | 719 | } |
718 | 720 | ||
719 | void | 721 | void |
720 | bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) | 722 | bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) |
721 | { | 723 | { |
722 | if (!(im_port->flags & BFAD_PORT_DELETE)) | 724 | if (!(im_port->flags & BFAD_PORT_DELETE)) |
723 | flush_workqueue(bfad->im->drv_workq); | 725 | flush_workqueue(bfad->im->drv_workq); |
@@ -727,7 +729,7 @@ bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) | |||
727 | } | 729 | } |
728 | 730 | ||
729 | void | 731 | void |
730 | bfad_os_destroy_workq(struct bfad_im_s *im) | 732 | bfad_destroy_workq(struct bfad_im_s *im) |
731 | { | 733 | { |
732 | if (im && im->drv_workq) { | 734 | if (im && im->drv_workq) { |
733 | flush_workqueue(im->drv_workq); | 735 | flush_workqueue(im->drv_workq); |
@@ -737,7 +739,7 @@ bfad_os_destroy_workq(struct bfad_im_s *im) | |||
737 | } | 739 | } |
738 | 740 | ||
739 | bfa_status_t | 741 | bfa_status_t |
740 | bfad_os_thread_workq(struct bfad_s *bfad) | 742 | bfad_thread_workq(struct bfad_s *bfad) |
741 | { | 743 | { |
742 | struct bfad_im_s *im = bfad->im; | 744 | struct bfad_im_s *im = bfad->im; |
743 | 745 | ||
@@ -841,7 +843,7 @@ bfad_im_module_exit(void) | |||
841 | } | 843 | } |
842 | 844 | ||
843 | void | 845 | void |
844 | bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) | 846 | bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) |
845 | { | 847 | { |
846 | struct scsi_device *tmp_sdev; | 848 | struct scsi_device *tmp_sdev; |
847 | 849 | ||
@@ -869,7 +871,7 @@ bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) | |||
869 | } | 871 | } |
870 | 872 | ||
871 | void | 873 | void |
872 | bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) | 874 | bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) |
873 | { | 875 | { |
874 | struct scsi_device *tmp_sdev; | 876 | struct scsi_device *tmp_sdev; |
875 | 877 | ||
@@ -883,7 +885,7 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) | |||
883 | } | 885 | } |
884 | 886 | ||
885 | struct bfad_itnim_s * | 887 | struct bfad_itnim_s * |
886 | bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id) | 888 | bfad_get_itnim(struct bfad_im_port_s *im_port, int id) |
887 | { | 889 | { |
888 | struct bfad_itnim_s *itnim = NULL; | 890 | struct bfad_itnim_s *itnim = NULL; |
889 | 891 | ||
@@ -922,7 +924,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa) | |||
922 | if (!ioc_attr) | 924 | if (!ioc_attr) |
923 | return 0; | 925 | return 0; |
924 | 926 | ||
925 | bfa_get_attr(bfa, ioc_attr); | 927 | bfa_ioc_get_attr(&bfa->ioc, ioc_attr); |
926 | if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { | 928 | if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { |
927 | if (ioc_attr->adapter_attr.is_mezz) { | 929 | if (ioc_attr->adapter_attr.is_mezz) { |
928 | supported_speed |= FC_PORTSPEED_8GBIT | | 930 | supported_speed |= FC_PORTSPEED_8GBIT | |
@@ -944,7 +946,7 @@ bfad_im_supported_speeds(struct bfa_s *bfa) | |||
944 | } | 946 | } |
945 | 947 | ||
946 | void | 948 | void |
947 | bfad_os_fc_host_init(struct bfad_im_port_s *im_port) | 949 | bfad_fc_host_init(struct bfad_im_port_s *im_port) |
948 | { | 950 | { |
949 | struct Scsi_Host *host = im_port->shost; | 951 | struct Scsi_Host *host = im_port->shost; |
950 | struct bfad_s *bfad = im_port->bfad; | 952 | struct bfad_s *bfad = im_port->bfad; |
@@ -988,7 +990,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) | |||
988 | rport_ids.port_name = | 990 | rport_ids.port_name = |
989 | cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); | 991 | cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); |
990 | rport_ids.port_id = | 992 | rport_ids.port_id = |
991 | bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); | 993 | bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); |
992 | rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; | 994 | rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; |
993 | 995 | ||
994 | itnim->fc_rport = fc_rport = | 996 | itnim->fc_rport = fc_rport = |
@@ -1109,7 +1111,7 @@ bfad_im_itnim_work_handler(struct work_struct *work) | |||
1109 | kfree(itnim); | 1111 | kfree(itnim); |
1110 | break; | 1112 | break; |
1111 | default: | 1113 | default: |
1112 | bfa_assert(0); | 1114 | WARN_ON(1); |
1113 | break; | 1115 | break; |
1114 | } | 1116 | } |
1115 | 1117 | ||
@@ -1172,7 +1174,6 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd | |||
1172 | } | 1174 | } |
1173 | 1175 | ||
1174 | cmnd->host_scribble = (char *)hal_io; | 1176 | cmnd->host_scribble = (char *)hal_io; |
1175 | bfa_trc_fp(bfad, hal_io->iotag); | ||
1176 | bfa_ioim_start(hal_io); | 1177 | bfa_ioim_start(hal_io); |
1177 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); | 1178 | spin_unlock_irqrestore(&bfad->bfad_lock, flags); |
1178 | 1179 | ||
@@ -1190,7 +1191,7 @@ out_fail_cmd: | |||
1190 | static DEF_SCSI_QCMD(bfad_im_queuecommand) | 1191 | static DEF_SCSI_QCMD(bfad_im_queuecommand) |
1191 | 1192 | ||
1192 | void | 1193 | void |
1193 | bfad_os_rport_online_wait(struct bfad_s *bfad) | 1194 | bfad_rport_online_wait(struct bfad_s *bfad) |
1194 | { | 1195 | { |
1195 | int i; | 1196 | int i; |
1196 | int rport_delay = 10; | 1197 | int rport_delay = 10; |
@@ -1218,7 +1219,7 @@ bfad_os_rport_online_wait(struct bfad_s *bfad) | |||
1218 | } | 1219 | } |
1219 | 1220 | ||
1220 | int | 1221 | int |
1221 | bfad_os_get_linkup_delay(struct bfad_s *bfad) | 1222 | bfad_get_linkup_delay(struct bfad_s *bfad) |
1222 | { | 1223 | { |
1223 | u8 nwwns = 0; | 1224 | u8 nwwns = 0; |
1224 | wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; | 1225 | wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; |
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index b038c0e08921..bfee63b16fa9 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h | |||
@@ -117,17 +117,17 @@ struct bfad_im_s { | |||
117 | char drv_workq_name[KOBJ_NAME_LEN]; | 117 | char drv_workq_name[KOBJ_NAME_LEN]; |
118 | }; | 118 | }; |
119 | 119 | ||
120 | struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, | 120 | struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, |
121 | struct bfad_s *); | 121 | struct bfad_s *); |
122 | bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad); | 122 | bfa_status_t bfad_thread_workq(struct bfad_s *bfad); |
123 | void bfad_os_destroy_workq(struct bfad_im_s *im); | 123 | void bfad_destroy_workq(struct bfad_im_s *im); |
124 | void bfad_os_fc_host_init(struct bfad_im_port_s *im_port); | 124 | void bfad_fc_host_init(struct bfad_im_port_s *im_port); |
125 | void bfad_os_scsi_host_free(struct bfad_s *bfad, | 125 | void bfad_scsi_host_free(struct bfad_s *bfad, |
126 | struct bfad_im_port_s *im_port); | 126 | struct bfad_im_port_s *im_port); |
127 | void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, | 127 | void bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, |
128 | struct scsi_device *sdev); | 128 | struct scsi_device *sdev); |
129 | void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); | 129 | void bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev); |
130 | struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id); | 130 | struct bfad_itnim_s *bfad_get_itnim(struct bfad_im_port_s *im_port, int id); |
131 | 131 | ||
132 | extern struct scsi_host_template bfad_im_scsi_host_template; | 132 | extern struct scsi_host_template bfad_im_scsi_host_template; |
133 | extern struct scsi_host_template bfad_im_vport_template; | 133 | extern struct scsi_host_template bfad_im_vport_template; |
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 58796d1284b7..72b69a0c3b51 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h | |||
@@ -95,8 +95,8 @@ enum { | |||
95 | */ | 95 | */ |
96 | union bfi_addr_u { | 96 | union bfi_addr_u { |
97 | struct { | 97 | struct { |
98 | u32 addr_lo; | 98 | __be32 addr_lo; |
99 | u32 addr_hi; | 99 | __be32 addr_hi; |
100 | } a32; | 100 | } a32; |
101 | }; | 101 | }; |
102 | 102 | ||
@@ -104,7 +104,7 @@ union bfi_addr_u { | |||
104 | * Scatter Gather Element | 104 | * Scatter Gather Element |
105 | */ | 105 | */ |
106 | struct bfi_sge_s { | 106 | struct bfi_sge_s { |
107 | #ifdef __BIGENDIAN | 107 | #ifdef __BIG_ENDIAN |
108 | u32 flags:2, | 108 | u32 flags:2, |
109 | rsvd:2, | 109 | rsvd:2, |
110 | sg_len:28; | 110 | sg_len:28; |
@@ -399,7 +399,7 @@ union bfi_ioc_i2h_msg_u { | |||
399 | */ | 399 | */ |
400 | struct bfi_pbc_blun_s { | 400 | struct bfi_pbc_blun_s { |
401 | wwn_t tgt_pwwn; | 401 | wwn_t tgt_pwwn; |
402 | lun_t tgt_lun; | 402 | struct scsi_lun tgt_lun; |
403 | }; | 403 | }; |
404 | 404 | ||
405 | /* | 405 | /* |
diff --git a/drivers/scsi/bfa/bfi_cbreg.h b/drivers/scsi/bfa/bfi_cbreg.h index 6f03ed382c69..39ad42b66b5b 100644 --- a/drivers/scsi/bfa/bfi_cbreg.h +++ b/drivers/scsi/bfa/bfi_cbreg.h | |||
@@ -208,6 +208,7 @@ | |||
208 | #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG | 208 | #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG |
209 | #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG | 209 | #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG |
210 | #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG | 210 | #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG |
211 | #define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG | ||
211 | 212 | ||
212 | #define CPE_Q_DEPTH(__n) \ | 213 | #define CPE_Q_DEPTH(__n) \ |
213 | (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) | 214 | (CPE_Q0_DEPTH + (__n) * (CPE_Q1_DEPTH - CPE_Q0_DEPTH)) |
diff --git a/drivers/scsi/bfa/bfi_ctreg.h b/drivers/scsi/bfa/bfi_ctreg.h index 62b86a4b0e4b..fc4ce4a5a183 100644 --- a/drivers/scsi/bfa/bfi_ctreg.h +++ b/drivers/scsi/bfa/bfi_ctreg.h | |||
@@ -522,6 +522,7 @@ enum { | |||
522 | #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG | 522 | #define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG |
523 | #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG | 523 | #define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG |
524 | #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG | 524 | #define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG |
525 | #define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG | ||
525 | 526 | ||
526 | #define CPE_DEPTH_Q(__n) \ | 527 | #define CPE_DEPTH_Q(__n) \ |
527 | (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) | 528 | (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) |
@@ -539,22 +540,30 @@ enum { | |||
539 | (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) | 540 | (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) |
540 | #define RME_CI_PTR_Q(__n) \ | 541 | #define RME_CI_PTR_Q(__n) \ |
541 | (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) | 542 | (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) |
542 | #define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ | 543 | #define HQM_QSET_RXQ_DRBL_P0(__n) \ |
543 | * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) | 544 | (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \ |
544 | #define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ | 545 | (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) |
545 | * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) | 546 | #define HQM_QSET_TXQ_DRBL_P0(__n) \ |
546 | #define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ | 547 | (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \ |
547 | * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) | 548 | (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) |
548 | #define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ | 549 | #define HQM_QSET_IB_DRBL_1_P0(__n) \ |
549 | * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) | 550 | (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \ |
550 | #define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ | 551 | (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) |
551 | * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) | 552 | #define HQM_QSET_IB_DRBL_2_P0(__n) \ |
552 | #define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ | 553 | (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \ |
553 | * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) | 554 | (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) |
554 | #define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ | 555 | #define HQM_QSET_RXQ_DRBL_P1(__n) \ |
555 | * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) | 556 | (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \ |
556 | #define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ | 557 | (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) |
557 | * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) | 558 | #define HQM_QSET_TXQ_DRBL_P1(__n) \ |
559 | (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \ | ||
560 | (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) | ||
561 | #define HQM_QSET_IB_DRBL_1_P1(__n) \ | ||
562 | (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \ | ||
563 | (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) | ||
564 | #define HQM_QSET_IB_DRBL_2_P1(__n) \ | ||
565 | (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \ | ||
566 | (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) | ||
558 | 567 | ||
559 | #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) | 568 | #define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) |
560 | #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) | 569 | #define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) |
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index fa9f6fb9d45b..19e888a57555 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h | |||
@@ -47,10 +47,10 @@ struct bfi_iocfc_cfg_s { | |||
47 | */ | 47 | */ |
48 | union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS]; | 48 | union bfi_addr_u req_cq_ba[BFI_IOC_MAX_CQS]; |
49 | union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS]; | 49 | union bfi_addr_u req_shadow_ci[BFI_IOC_MAX_CQS]; |
50 | u16 req_cq_elems[BFI_IOC_MAX_CQS]; | 50 | __be16 req_cq_elems[BFI_IOC_MAX_CQS]; |
51 | union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS]; | 51 | union bfi_addr_u rsp_cq_ba[BFI_IOC_MAX_CQS]; |
52 | union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS]; | 52 | union bfi_addr_u rsp_shadow_pi[BFI_IOC_MAX_CQS]; |
53 | u16 rsp_cq_elems[BFI_IOC_MAX_CQS]; | 53 | __be16 rsp_cq_elems[BFI_IOC_MAX_CQS]; |
54 | 54 | ||
55 | union bfi_addr_u stats_addr; /* DMA-able address for stats */ | 55 | union bfi_addr_u stats_addr; /* DMA-able address for stats */ |
56 | union bfi_addr_u cfgrsp_addr; /* config response dma address */ | 56 | union bfi_addr_u cfgrsp_addr; /* config response dma address */ |
@@ -102,8 +102,8 @@ struct bfi_iocfc_set_intr_req_s { | |||
102 | struct bfi_mhdr_s mh; /* common msg header */ | 102 | struct bfi_mhdr_s mh; /* common msg header */ |
103 | u8 coalesce; /* enable intr coalescing */ | 103 | u8 coalesce; /* enable intr coalescing */ |
104 | u8 rsvd[3]; | 104 | u8 rsvd[3]; |
105 | u16 delay; /* delay timer 0..1125us */ | 105 | __be16 delay; /* delay timer 0..1125us */ |
106 | u16 latency; /* latency timer 0..225us */ | 106 | __be16 latency; /* latency timer 0..225us */ |
107 | }; | 107 | }; |
108 | 108 | ||
109 | 109 | ||
@@ -188,7 +188,8 @@ struct bfi_fcport_rsp_s { | |||
188 | struct bfi_mhdr_s mh; /* common msg header */ | 188 | struct bfi_mhdr_s mh; /* common msg header */ |
189 | u8 status; /* port enable status */ | 189 | u8 status; /* port enable status */ |
190 | u8 rsvd[3]; | 190 | u8 rsvd[3]; |
191 | u32 msgtag; /* msgtag for reply */ | 191 | struct bfa_port_cfg_s port_cfg;/* port configuration */ |
192 | u32 msgtag; /* msgtag for reply */ | ||
192 | }; | 193 | }; |
193 | 194 | ||
194 | /* | 195 | /* |
@@ -202,7 +203,8 @@ struct bfi_fcport_enable_req_s { | |||
202 | struct bfa_port_cfg_s port_cfg; /* port configuration */ | 203 | struct bfa_port_cfg_s port_cfg; /* port configuration */ |
203 | union bfi_addr_u stats_dma_addr; /* DMA address for stats */ | 204 | union bfi_addr_u stats_dma_addr; /* DMA address for stats */ |
204 | u32 msgtag; /* msgtag for reply */ | 205 | u32 msgtag; /* msgtag for reply */ |
205 | u32 rsvd2; | 206 | u8 use_flash_cfg; /* get prot cfg from flash */ |
207 | u8 rsvd2[3]; | ||
206 | }; | 208 | }; |
207 | 209 | ||
208 | /* | 210 | /* |
@@ -210,7 +212,7 @@ struct bfi_fcport_enable_req_s { | |||
210 | */ | 212 | */ |
211 | struct bfi_fcport_set_svc_params_req_s { | 213 | struct bfi_fcport_set_svc_params_req_s { |
212 | struct bfi_mhdr_s mh; /* msg header */ | 214 | struct bfi_mhdr_s mh; /* msg header */ |
213 | u16 tx_bbcredit; /* Tx credits */ | 215 | __be16 tx_bbcredit; /* Tx credits */ |
214 | u16 rsvd; | 216 | u16 rsvd; |
215 | }; | 217 | }; |
216 | 218 | ||
@@ -231,7 +233,7 @@ struct bfi_fcport_trunk_link_s { | |||
231 | u8 state; /* bfa_trunk_link_state_t */ | 233 | u8 state; /* bfa_trunk_link_state_t */ |
232 | u8 speed; /* bfa_port_speed_t */ | 234 | u8 speed; /* bfa_port_speed_t */ |
233 | u8 rsvd; | 235 | u8 rsvd; |
234 | u32 deskew; | 236 | __be32 deskew; |
235 | }; | 237 | }; |
236 | 238 | ||
237 | #define BFI_FCPORT_MAX_LINKS 2 | 239 | #define BFI_FCPORT_MAX_LINKS 2 |
@@ -284,17 +286,17 @@ enum bfi_fcxp_i2h { | |||
284 | */ | 286 | */ |
285 | struct bfi_fcxp_send_req_s { | 287 | struct bfi_fcxp_send_req_s { |
286 | struct bfi_mhdr_s mh; /* Common msg header */ | 288 | struct bfi_mhdr_s mh; /* Common msg header */ |
287 | u16 fcxp_tag; /* driver request tag */ | 289 | __be16 fcxp_tag; /* driver request tag */ |
288 | u16 max_frmsz; /* max send frame size */ | 290 | __be16 max_frmsz; /* max send frame size */ |
289 | u16 vf_id; /* vsan tag if applicable */ | 291 | __be16 vf_id; /* vsan tag if applicable */ |
290 | u16 rport_fw_hndl; /* FW Handle for the remote port */ | 292 | u16 rport_fw_hndl; /* FW Handle for the remote port */ |
291 | u8 class; /* FC class used for req/rsp */ | 293 | u8 class; /* FC class used for req/rsp */ |
292 | u8 rsp_timeout; /* timeout in secs, 0-no response */ | 294 | u8 rsp_timeout; /* timeout in secs, 0-no response */ |
293 | u8 cts; /* continue sequence */ | 295 | u8 cts; /* continue sequence */ |
294 | u8 lp_tag; /* lport tag */ | 296 | u8 lp_tag; /* lport tag */ |
295 | struct fchs_s fchs; /* request FC header structure */ | 297 | struct fchs_s fchs; /* request FC header structure */ |
296 | u32 req_len; /* request payload length */ | 298 | __be32 req_len; /* request payload length */ |
297 | u32 rsp_maxlen; /* max response length expected */ | 299 | __be32 rsp_maxlen; /* max response length expected */ |
298 | struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ | 300 | struct bfi_sge_s req_sge[BFA_FCXP_MAX_SGES]; /* request buf */ |
299 | struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ | 301 | struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ |
300 | }; | 302 | }; |
@@ -304,11 +306,11 @@ struct bfi_fcxp_send_req_s { | |||
304 | */ | 306 | */ |
305 | struct bfi_fcxp_send_rsp_s { | 307 | struct bfi_fcxp_send_rsp_s { |
306 | struct bfi_mhdr_s mh; /* Common msg header */ | 308 | struct bfi_mhdr_s mh; /* Common msg header */ |
307 | u16 fcxp_tag; /* send request tag */ | 309 | __be16 fcxp_tag; /* send request tag */ |
308 | u8 req_status; /* request status */ | 310 | u8 req_status; /* request status */ |
309 | u8 rsvd; | 311 | u8 rsvd; |
310 | u32 rsp_len; /* actual response length */ | 312 | __be32 rsp_len; /* actual response length */ |
311 | u32 residue_len; /* residual response length */ | 313 | __be32 residue_len; /* residual response length */ |
312 | struct fchs_s fchs; /* response FC header structure */ | 314 | struct fchs_s fchs; /* response FC header structure */ |
313 | }; | 315 | }; |
314 | 316 | ||
@@ -325,7 +327,7 @@ enum bfi_uf_i2h { | |||
325 | struct bfi_uf_buf_post_s { | 327 | struct bfi_uf_buf_post_s { |
326 | struct bfi_mhdr_s mh; /* Common msg header */ | 328 | struct bfi_mhdr_s mh; /* Common msg header */ |
327 | u16 buf_tag; /* buffer tag */ | 329 | u16 buf_tag; /* buffer tag */ |
328 | u16 buf_len; /* total buffer length */ | 330 | __be16 buf_len; /* total buffer length */ |
329 | struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ | 331 | struct bfi_sge_s sge[BFA_UF_MAX_SGES]; /* buffer DMA SGEs */ |
330 | }; | 332 | }; |
331 | 333 | ||
@@ -340,6 +342,7 @@ struct bfi_uf_frm_rcvd_s { | |||
340 | enum bfi_lps_h2i_msgs { | 342 | enum bfi_lps_h2i_msgs { |
341 | BFI_LPS_H2I_LOGIN_REQ = 1, | 343 | BFI_LPS_H2I_LOGIN_REQ = 1, |
342 | BFI_LPS_H2I_LOGOUT_REQ = 2, | 344 | BFI_LPS_H2I_LOGOUT_REQ = 2, |
345 | BFI_LPS_H2I_N2N_PID_REQ = 3, | ||
343 | }; | 346 | }; |
344 | 347 | ||
345 | enum bfi_lps_i2h_msgs { | 348 | enum bfi_lps_i2h_msgs { |
@@ -352,7 +355,7 @@ struct bfi_lps_login_req_s { | |||
352 | struct bfi_mhdr_s mh; /* common msg header */ | 355 | struct bfi_mhdr_s mh; /* common msg header */ |
353 | u8 lp_tag; | 356 | u8 lp_tag; |
354 | u8 alpa; | 357 | u8 alpa; |
355 | u16 pdu_size; | 358 | __be16 pdu_size; |
356 | wwn_t pwwn; | 359 | wwn_t pwwn; |
357 | wwn_t nwwn; | 360 | wwn_t nwwn; |
358 | u8 fdisc; | 361 | u8 fdisc; |
@@ -368,7 +371,7 @@ struct bfi_lps_login_rsp_s { | |||
368 | u8 lsrjt_expl; | 371 | u8 lsrjt_expl; |
369 | wwn_t port_name; | 372 | wwn_t port_name; |
370 | wwn_t node_name; | 373 | wwn_t node_name; |
371 | u16 bb_credit; | 374 | __be16 bb_credit; |
372 | u8 f_port; | 375 | u8 f_port; |
373 | u8 npiv_en; | 376 | u8 npiv_en; |
374 | u32 lp_pid:24; | 377 | u32 lp_pid:24; |
@@ -399,10 +402,17 @@ struct bfi_lps_cvl_event_s { | |||
399 | u8 rsvd[3]; | 402 | u8 rsvd[3]; |
400 | }; | 403 | }; |
401 | 404 | ||
405 | struct bfi_lps_n2n_pid_req_s { | ||
406 | struct bfi_mhdr_s mh; /* common msg header */ | ||
407 | u8 lp_tag; | ||
408 | u32 lp_pid:24; | ||
409 | }; | ||
410 | |||
402 | union bfi_lps_h2i_msg_u { | 411 | union bfi_lps_h2i_msg_u { |
403 | struct bfi_mhdr_s *msg; | 412 | struct bfi_mhdr_s *msg; |
404 | struct bfi_lps_login_req_s *login_req; | 413 | struct bfi_lps_login_req_s *login_req; |
405 | struct bfi_lps_logout_req_s *logout_req; | 414 | struct bfi_lps_logout_req_s *logout_req; |
415 | struct bfi_lps_n2n_pid_req_s *n2n_pid_req; | ||
406 | }; | 416 | }; |
407 | 417 | ||
408 | union bfi_lps_i2h_msg_u { | 418 | union bfi_lps_i2h_msg_u { |
@@ -427,7 +437,7 @@ enum bfi_rport_i2h_msgs { | |||
427 | struct bfi_rport_create_req_s { | 437 | struct bfi_rport_create_req_s { |
428 | struct bfi_mhdr_s mh; /* common msg header */ | 438 | struct bfi_mhdr_s mh; /* common msg header */ |
429 | u16 bfa_handle; /* host rport handle */ | 439 | u16 bfa_handle; /* host rport handle */ |
430 | u16 max_frmsz; /* max rcv pdu size */ | 440 | __be16 max_frmsz; /* max rcv pdu size */ |
431 | u32 pid:24, /* remote port ID */ | 441 | u32 pid:24, /* remote port ID */ |
432 | lp_tag:8; /* local port tag */ | 442 | lp_tag:8; /* local port tag */ |
433 | u32 local_pid:24, /* local port ID */ | 443 | u32 local_pid:24, /* local port ID */ |
@@ -583,7 +593,7 @@ struct bfi_ioim_dif_s { | |||
583 | */ | 593 | */ |
584 | struct bfi_ioim_req_s { | 594 | struct bfi_ioim_req_s { |
585 | struct bfi_mhdr_s mh; /* Common msg header */ | 595 | struct bfi_mhdr_s mh; /* Common msg header */ |
586 | u16 io_tag; /* I/O tag */ | 596 | __be16 io_tag; /* I/O tag */ |
587 | u16 rport_hdl; /* itnim/rport firmware handle */ | 597 | u16 rport_hdl; /* itnim/rport firmware handle */ |
588 | struct fcp_cmnd_s cmnd; /* IO request info */ | 598 | struct fcp_cmnd_s cmnd; /* IO request info */ |
589 | 599 | ||
@@ -689,7 +699,7 @@ enum bfi_ioim_status { | |||
689 | */ | 699 | */ |
690 | struct bfi_ioim_rsp_s { | 700 | struct bfi_ioim_rsp_s { |
691 | struct bfi_mhdr_s mh; /* common msg header */ | 701 | struct bfi_mhdr_s mh; /* common msg header */ |
692 | u16 io_tag; /* completed IO tag */ | 702 | __be16 io_tag; /* completed IO tag */ |
693 | u16 bfa_rport_hndl; /* releated rport handle */ | 703 | u16 bfa_rport_hndl; /* releated rport handle */ |
694 | u8 io_status; /* IO completion status */ | 704 | u8 io_status; /* IO completion status */ |
695 | u8 reuse_io_tag; /* IO tag can be reused */ | 705 | u8 reuse_io_tag; /* IO tag can be reused */ |
@@ -698,13 +708,13 @@ struct bfi_ioim_rsp_s { | |||
698 | u8 sns_len; /* scsi sense length */ | 708 | u8 sns_len; /* scsi sense length */ |
699 | u8 resid_flags; /* IO residue flags */ | 709 | u8 resid_flags; /* IO residue flags */ |
700 | u8 rsvd_a; | 710 | u8 rsvd_a; |
701 | u32 residue; /* IO residual length in bytes */ | 711 | __be32 residue; /* IO residual length in bytes */ |
702 | u32 rsvd_b[3]; | 712 | u32 rsvd_b[3]; |
703 | }; | 713 | }; |
704 | 714 | ||
705 | struct bfi_ioim_abort_req_s { | 715 | struct bfi_ioim_abort_req_s { |
706 | struct bfi_mhdr_s mh; /* Common msg header */ | 716 | struct bfi_mhdr_s mh; /* Common msg header */ |
707 | u16 io_tag; /* I/O tag */ | 717 | __be16 io_tag; /* I/O tag */ |
708 | u16 abort_tag; /* unique request tag */ | 718 | u16 abort_tag; /* unique request tag */ |
709 | }; | 719 | }; |
710 | 720 | ||
@@ -723,9 +733,9 @@ enum bfi_tskim_i2h { | |||
723 | 733 | ||
724 | struct bfi_tskim_req_s { | 734 | struct bfi_tskim_req_s { |
725 | struct bfi_mhdr_s mh; /* Common msg header */ | 735 | struct bfi_mhdr_s mh; /* Common msg header */ |
726 | u16 tsk_tag; /* task management tag */ | 736 | __be16 tsk_tag; /* task management tag */ |
727 | u16 itn_fhdl; /* itn firmware handle */ | 737 | u16 itn_fhdl; /* itn firmware handle */ |
728 | lun_t lun; /* LU number */ | 738 | struct scsi_lun lun; /* LU number */ |
729 | u8 tm_flags; /* see enum fcp_tm_cmnd */ | 739 | u8 tm_flags; /* see enum fcp_tm_cmnd */ |
730 | u8 t_secs; /* Timeout value in seconds */ | 740 | u8 t_secs; /* Timeout value in seconds */ |
731 | u8 rsvd[2]; | 741 | u8 rsvd[2]; |
@@ -733,7 +743,7 @@ struct bfi_tskim_req_s { | |||
733 | 743 | ||
734 | struct bfi_tskim_abortreq_s { | 744 | struct bfi_tskim_abortreq_s { |
735 | struct bfi_mhdr_s mh; /* Common msg header */ | 745 | struct bfi_mhdr_s mh; /* Common msg header */ |
736 | u16 tsk_tag; /* task management tag */ | 746 | __be16 tsk_tag; /* task management tag */ |
737 | u16 rsvd; | 747 | u16 rsvd; |
738 | }; | 748 | }; |
739 | 749 | ||
@@ -755,7 +765,7 @@ enum bfi_tskim_status { | |||
755 | 765 | ||
756 | struct bfi_tskim_rsp_s { | 766 | struct bfi_tskim_rsp_s { |
757 | struct bfi_mhdr_s mh; /* Common msg header */ | 767 | struct bfi_mhdr_s mh; /* Common msg header */ |
758 | u16 tsk_tag; /* task mgmt cmnd tag */ | 768 | __be16 tsk_tag; /* task mgmt cmnd tag */ |
759 | u8 tsk_status; /* @ref bfi_tskim_status */ | 769 | u8 tsk_status; /* @ref bfi_tskim_status */ |
760 | u8 rsvd; | 770 | u8 rsvd; |
761 | }; | 771 | }; |
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h index 1b6f86b2482d..30e6bdbd65af 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h | |||
@@ -1,12 +1,13 @@ | |||
1 | /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI | 1 | /* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI |
2 | * | 2 | * |
3 | * Copyright (c) 2006 - 2009 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2010 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
10 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
10 | */ | 11 | */ |
11 | #ifndef __57XX_ISCSI_CONSTANTS_H_ | 12 | #ifndef __57XX_ISCSI_CONSTANTS_H_ |
12 | #define __57XX_ISCSI_CONSTANTS_H_ | 13 | #define __57XX_ISCSI_CONSTANTS_H_ |
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h index 36af1afef9b6..dad6c8a34317 100644 --- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h | |||
@@ -1,12 +1,13 @@ | |||
1 | /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. | 1 | /* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. |
2 | * | 2 | * |
3 | * Copyright (c) 2006 - 2009 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2010 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
10 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
10 | */ | 11 | */ |
11 | #ifndef __57XX_ISCSI_HSI_LINUX_LE__ | 12 | #ifndef __57XX_ISCSI_HSI_LINUX_LE__ |
12 | #define __57XX_ISCSI_HSI_LINUX_LE__ | 13 | #define __57XX_ISCSI_HSI_LINUX_LE__ |
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index a44b1b33fa18..e1ca5fe7e6bb 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i.h: Broadcom NetXtreme II iSCSI driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006 - 2009 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2010 Broadcom Corporation |
4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
5 | * Copyright (c) 2007, 2008 Mike Christie | 5 | * Copyright (c) 2007, 2008 Mike Christie |
6 | * | 6 | * |
@@ -9,6 +9,7 @@ | |||
9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
10 | * | 10 | * |
11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
12 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
12 | */ | 13 | */ |
13 | 14 | ||
14 | #ifndef _BNX2I_H_ | 15 | #ifndef _BNX2I_H_ |
@@ -649,6 +650,7 @@ enum { | |||
649 | EP_STATE_OFLD_FAILED = 0x8000000, | 650 | EP_STATE_OFLD_FAILED = 0x8000000, |
650 | EP_STATE_CONNECT_FAILED = 0x10000000, | 651 | EP_STATE_CONNECT_FAILED = 0x10000000, |
651 | EP_STATE_DISCONN_TIMEDOUT = 0x20000000, | 652 | EP_STATE_DISCONN_TIMEDOUT = 0x20000000, |
653 | EP_STATE_OFLD_FAILED_CID_BUSY = 0x80000000, | ||
652 | }; | 654 | }; |
653 | 655 | ||
654 | /** | 656 | /** |
@@ -717,14 +719,11 @@ extern struct device_attribute *bnx2i_dev_attributes[]; | |||
717 | * Function Prototypes | 719 | * Function Prototypes |
718 | */ | 720 | */ |
719 | extern void bnx2i_identify_device(struct bnx2i_hba *hba); | 721 | extern void bnx2i_identify_device(struct bnx2i_hba *hba); |
720 | extern void bnx2i_register_device(struct bnx2i_hba *hba); | ||
721 | 722 | ||
722 | extern void bnx2i_ulp_init(struct cnic_dev *dev); | 723 | extern void bnx2i_ulp_init(struct cnic_dev *dev); |
723 | extern void bnx2i_ulp_exit(struct cnic_dev *dev); | 724 | extern void bnx2i_ulp_exit(struct cnic_dev *dev); |
724 | extern void bnx2i_start(void *handle); | 725 | extern void bnx2i_start(void *handle); |
725 | extern void bnx2i_stop(void *handle); | 726 | extern void bnx2i_stop(void *handle); |
726 | extern void bnx2i_reg_dev_all(void); | ||
727 | extern void bnx2i_unreg_dev_all(void); | ||
728 | extern struct bnx2i_hba *get_adapter_list_head(void); | 727 | extern struct bnx2i_hba *get_adapter_list_head(void); |
729 | 728 | ||
730 | struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, | 729 | struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, |
@@ -761,11 +760,11 @@ extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, | |||
761 | struct iscsi_task *mtask); | 760 | struct iscsi_task *mtask); |
762 | extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, | 761 | extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, |
763 | struct bnx2i_cmd *cmd); | 762 | struct bnx2i_cmd *cmd); |
764 | extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, | 763 | extern int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, |
765 | struct bnx2i_endpoint *ep); | ||
766 | extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); | ||
767 | extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, | ||
768 | struct bnx2i_endpoint *ep); | 764 | struct bnx2i_endpoint *ep); |
765 | extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); | ||
766 | extern int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, | ||
767 | struct bnx2i_endpoint *ep); | ||
769 | 768 | ||
770 | extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, | 769 | extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, |
771 | struct bnx2i_endpoint *ep); | 770 | struct bnx2i_endpoint *ep); |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 2f9622ebbd84..96505e3ab986 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006 - 2009 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2010 Broadcom Corporation |
4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
5 | * Copyright (c) 2007, 2008 Mike Christie | 5 | * Copyright (c) 2007, 2008 Mike Christie |
6 | * | 6 | * |
@@ -9,6 +9,7 @@ | |||
9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
10 | * | 10 | * |
11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
12 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
12 | */ | 13 | */ |
13 | 14 | ||
14 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
@@ -385,6 +386,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, | |||
385 | struct bnx2i_cmd *bnx2i_cmd; | 386 | struct bnx2i_cmd *bnx2i_cmd; |
386 | struct bnx2i_tmf_request *tmfabort_wqe; | 387 | struct bnx2i_tmf_request *tmfabort_wqe; |
387 | u32 dword; | 388 | u32 dword; |
389 | u32 scsi_lun[2]; | ||
388 | 390 | ||
389 | bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; | 391 | bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; |
390 | tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; | 392 | tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; |
@@ -426,7 +428,10 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, | |||
426 | default: | 428 | default: |
427 | tmfabort_wqe->ref_itt = RESERVED_ITT; | 429 | tmfabort_wqe->ref_itt = RESERVED_ITT; |
428 | } | 430 | } |
429 | memcpy(tmfabort_wqe->lun, tmfabort_hdr->lun, sizeof(struct scsi_lun)); | 431 | memcpy(scsi_lun, tmfabort_hdr->lun, sizeof(struct scsi_lun)); |
432 | tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]); | ||
433 | tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]); | ||
434 | |||
430 | tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); | 435 | tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); |
431 | 436 | ||
432 | tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; | 437 | tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; |
@@ -697,10 +702,11 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) | |||
697 | * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate | 702 | * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate |
698 | * iscsi connection context clean-up process | 703 | * iscsi connection context clean-up process |
699 | */ | 704 | */ |
700 | void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | 705 | int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) |
701 | { | 706 | { |
702 | struct kwqe *kwqe_arr[2]; | 707 | struct kwqe *kwqe_arr[2]; |
703 | struct iscsi_kwqe_conn_destroy conn_cleanup; | 708 | struct iscsi_kwqe_conn_destroy conn_cleanup; |
709 | int rc = -EINVAL; | ||
704 | 710 | ||
705 | memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); | 711 | memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); |
706 | 712 | ||
@@ -717,7 +723,9 @@ void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
717 | 723 | ||
718 | kwqe_arr[0] = (struct kwqe *) &conn_cleanup; | 724 | kwqe_arr[0] = (struct kwqe *) &conn_cleanup; |
719 | if (hba->cnic && hba->cnic->submit_kwqes) | 725 | if (hba->cnic && hba->cnic->submit_kwqes) |
720 | hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); | 726 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); |
727 | |||
728 | return rc; | ||
721 | } | 729 | } |
722 | 730 | ||
723 | 731 | ||
@@ -728,8 +736,8 @@ void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
728 | * | 736 | * |
729 | * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE | 737 | * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE |
730 | */ | 738 | */ |
731 | static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, | 739 | static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, |
732 | struct bnx2i_endpoint *ep) | 740 | struct bnx2i_endpoint *ep) |
733 | { | 741 | { |
734 | struct kwqe *kwqe_arr[2]; | 742 | struct kwqe *kwqe_arr[2]; |
735 | struct iscsi_kwqe_conn_offload1 ofld_req1; | 743 | struct iscsi_kwqe_conn_offload1 ofld_req1; |
@@ -737,6 +745,7 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, | |||
737 | dma_addr_t dma_addr; | 745 | dma_addr_t dma_addr; |
738 | int num_kwqes = 2; | 746 | int num_kwqes = 2; |
739 | u32 *ptbl; | 747 | u32 *ptbl; |
748 | int rc = -EINVAL; | ||
740 | 749 | ||
741 | ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; | 750 | ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; |
742 | ofld_req1.hdr.flags = | 751 | ofld_req1.hdr.flags = |
@@ -774,7 +783,9 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, | |||
774 | ofld_req2.num_additional_wqes = 0; | 783 | ofld_req2.num_additional_wqes = 0; |
775 | 784 | ||
776 | if (hba->cnic && hba->cnic->submit_kwqes) | 785 | if (hba->cnic && hba->cnic->submit_kwqes) |
777 | hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | 786 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
787 | |||
788 | return rc; | ||
778 | } | 789 | } |
779 | 790 | ||
780 | 791 | ||
@@ -785,8 +796,8 @@ static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, | |||
785 | * | 796 | * |
786 | * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE | 797 | * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE |
787 | */ | 798 | */ |
788 | static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, | 799 | static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, |
789 | struct bnx2i_endpoint *ep) | 800 | struct bnx2i_endpoint *ep) |
790 | { | 801 | { |
791 | struct kwqe *kwqe_arr[5]; | 802 | struct kwqe *kwqe_arr[5]; |
792 | struct iscsi_kwqe_conn_offload1 ofld_req1; | 803 | struct iscsi_kwqe_conn_offload1 ofld_req1; |
@@ -795,6 +806,7 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, | |||
795 | dma_addr_t dma_addr; | 806 | dma_addr_t dma_addr; |
796 | int num_kwqes = 2; | 807 | int num_kwqes = 2; |
797 | u32 *ptbl; | 808 | u32 *ptbl; |
809 | int rc = -EINVAL; | ||
798 | 810 | ||
799 | ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; | 811 | ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; |
800 | ofld_req1.hdr.flags = | 812 | ofld_req1.hdr.flags = |
@@ -840,7 +852,9 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, | |||
840 | num_kwqes += 1; | 852 | num_kwqes += 1; |
841 | 853 | ||
842 | if (hba->cnic && hba->cnic->submit_kwqes) | 854 | if (hba->cnic && hba->cnic->submit_kwqes) |
843 | hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | 855 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); |
856 | |||
857 | return rc; | ||
844 | } | 858 | } |
845 | 859 | ||
846 | /** | 860 | /** |
@@ -851,12 +865,16 @@ static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, | |||
851 | * | 865 | * |
852 | * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE | 866 | * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE |
853 | */ | 867 | */ |
854 | void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | 868 | int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) |
855 | { | 869 | { |
870 | int rc; | ||
871 | |||
856 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) | 872 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) |
857 | bnx2i_5771x_send_conn_ofld_req(hba, ep); | 873 | rc = bnx2i_5771x_send_conn_ofld_req(hba, ep); |
858 | else | 874 | else |
859 | bnx2i_570x_send_conn_ofld_req(hba, ep); | 875 | rc = bnx2i_570x_send_conn_ofld_req(hba, ep); |
876 | |||
877 | return rc; | ||
860 | } | 878 | } |
861 | 879 | ||
862 | 880 | ||
@@ -1513,7 +1531,7 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, | |||
1513 | task = iscsi_itt_to_task(conn, | 1531 | task = iscsi_itt_to_task(conn, |
1514 | nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); | 1532 | nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); |
1515 | if (task) | 1533 | if (task) |
1516 | iscsi_put_task(task); | 1534 | __iscsi_put_task(task); |
1517 | spin_unlock(&session->lock); | 1535 | spin_unlock(&session->lock); |
1518 | } | 1536 | } |
1519 | 1537 | ||
@@ -1549,11 +1567,9 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, | |||
1549 | struct iscsi_task *task; | 1567 | struct iscsi_task *task; |
1550 | struct bnx2i_nop_in_msg *nop_in; | 1568 | struct bnx2i_nop_in_msg *nop_in; |
1551 | struct iscsi_nopin *hdr; | 1569 | struct iscsi_nopin *hdr; |
1552 | u32 itt; | ||
1553 | int tgt_async_nop = 0; | 1570 | int tgt_async_nop = 0; |
1554 | 1571 | ||
1555 | nop_in = (struct bnx2i_nop_in_msg *)cqe; | 1572 | nop_in = (struct bnx2i_nop_in_msg *)cqe; |
1556 | itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX; | ||
1557 | 1573 | ||
1558 | spin_lock(&session->lock); | 1574 | spin_lock(&session->lock); |
1559 | hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; | 1575 | hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; |
@@ -1563,7 +1579,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, | |||
1563 | hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); | 1579 | hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); |
1564 | hdr->ttt = cpu_to_be32(nop_in->ttt); | 1580 | hdr->ttt = cpu_to_be32(nop_in->ttt); |
1565 | 1581 | ||
1566 | if (itt == (u16) RESERVED_ITT) { | 1582 | if (nop_in->itt == (u16) RESERVED_ITT) { |
1567 | bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); | 1583 | bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); |
1568 | hdr->itt = RESERVED_ITT; | 1584 | hdr->itt = RESERVED_ITT; |
1569 | tgt_async_nop = 1; | 1585 | tgt_async_nop = 1; |
@@ -1571,7 +1587,8 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session, | |||
1571 | } | 1587 | } |
1572 | 1588 | ||
1573 | /* this is a response to one of our nop-outs */ | 1589 | /* this is a response to one of our nop-outs */ |
1574 | task = iscsi_itt_to_task(conn, itt); | 1590 | task = iscsi_itt_to_task(conn, |
1591 | (itt_t) (nop_in->itt & ISCSI_NOP_IN_MSG_INDEX)); | ||
1575 | if (task) { | 1592 | if (task) { |
1576 | hdr->flags = ISCSI_FLAG_CMD_FINAL; | 1593 | hdr->flags = ISCSI_FLAG_CMD_FINAL; |
1577 | hdr->itt = task->hdr->itt; | 1594 | hdr->itt = task->hdr->itt; |
@@ -1721,9 +1738,18 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) | |||
1721 | if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) | 1738 | if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) |
1722 | break; | 1739 | break; |
1723 | 1740 | ||
1724 | if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) | 1741 | if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) { |
1742 | if (nopin->op_code == ISCSI_OP_NOOP_IN && | ||
1743 | nopin->itt == (u16) RESERVED_ITT) { | ||
1744 | printk(KERN_ALERT "bnx2i: Unsolicited " | ||
1745 | "NOP-In detected for suspended " | ||
1746 | "connection dev=%s!\n", | ||
1747 | bnx2i_conn->hba->netdev->name); | ||
1748 | bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); | ||
1749 | goto cqe_out; | ||
1750 | } | ||
1725 | break; | 1751 | break; |
1726 | 1752 | } | |
1727 | tgt_async_msg = 0; | 1753 | tgt_async_msg = 0; |
1728 | 1754 | ||
1729 | switch (nopin->op_code) { | 1755 | switch (nopin->op_code) { |
@@ -1770,10 +1796,9 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) | |||
1770 | printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", | 1796 | printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", |
1771 | nopin->op_code); | 1797 | nopin->op_code); |
1772 | } | 1798 | } |
1773 | |||
1774 | if (!tgt_async_msg) | 1799 | if (!tgt_async_msg) |
1775 | bnx2i_conn->ep->num_active_cmds--; | 1800 | bnx2i_conn->ep->num_active_cmds--; |
1776 | 1801 | cqe_out: | |
1777 | /* clear out in production version only, till beta keep opcode | 1802 | /* clear out in production version only, till beta keep opcode |
1778 | * field intact, will be helpful in debugging (context dump) | 1803 | * field intact, will be helpful in debugging (context dump) |
1779 | * nopin->op_code = 0; | 1804 | * nopin->op_code = 0; |
@@ -2154,11 +2179,24 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, | |||
2154 | } | 2179 | } |
2155 | 2180 | ||
2156 | if (ofld_kcqe->completion_status) { | 2181 | if (ofld_kcqe->completion_status) { |
2182 | ep->state = EP_STATE_OFLD_FAILED; | ||
2157 | if (ofld_kcqe->completion_status == | 2183 | if (ofld_kcqe->completion_status == |
2158 | ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) | 2184 | ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) |
2159 | printk(KERN_ALERT "bnx2i: unable to allocate" | 2185 | printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - unable " |
2160 | " iSCSI context resources\n"); | 2186 | "to allocate iSCSI context resources\n", |
2161 | ep->state = EP_STATE_OFLD_FAILED; | 2187 | hba->netdev->name); |
2188 | else if (ofld_kcqe->completion_status == | ||
2189 | ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE) | ||
2190 | printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " | ||
2191 | "opcode\n", hba->netdev->name); | ||
2192 | else if (ofld_kcqe->completion_status == | ||
2193 | ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY) | ||
2194 | /* error status code valid only for 5771x chipset */ | ||
2195 | ep->state = EP_STATE_OFLD_FAILED_CID_BUSY; | ||
2196 | else | ||
2197 | printk(KERN_ALERT "bnx2i (%s): ofld1 cmpl - invalid " | ||
2198 | "error code %d\n", hba->netdev->name, | ||
2199 | ofld_kcqe->completion_status); | ||
2162 | } else { | 2200 | } else { |
2163 | ep->state = EP_STATE_OFLD_COMPL; | 2201 | ep->state = EP_STATE_OFLD_COMPL; |
2164 | cid_addr = ofld_kcqe->iscsi_conn_context_id; | 2202 | cid_addr = ofld_kcqe->iscsi_conn_context_id; |
@@ -2339,10 +2377,14 @@ static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) | |||
2339 | static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) | 2377 | static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) |
2340 | { | 2378 | { |
2341 | struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; | 2379 | struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; |
2380 | u32 old_state = ep->state; | ||
2342 | 2381 | ||
2343 | ep->state = EP_STATE_TCP_RST_RCVD; | 2382 | ep->state = EP_STATE_TCP_RST_RCVD; |
2344 | if (ep->conn) | 2383 | if (old_state == EP_STATE_DISCONN_START) |
2345 | bnx2i_recovery_que_add_conn(ep->hba, ep->conn); | 2384 | wake_up_interruptible(&ep->ofld_wait); |
2385 | else | ||
2386 | if (ep->conn) | ||
2387 | bnx2i_recovery_que_add_conn(ep->hba, ep->conn); | ||
2346 | } | 2388 | } |
2347 | 2389 | ||
2348 | 2390 | ||
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 50c2aa3b8eb1..72a7b2d4a439 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i.c: Broadcom NetXtreme II iSCSI driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006 - 2009 Broadcom Corporation | 3 | * Copyright (c) 2006 - 2010 Broadcom Corporation |
4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 4 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
5 | * Copyright (c) 2007, 2008 Mike Christie | 5 | * Copyright (c) 2007, 2008 Mike Christie |
6 | * | 6 | * |
@@ -9,6 +9,7 @@ | |||
9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
10 | * | 10 | * |
11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 11 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
12 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
12 | */ | 13 | */ |
13 | 14 | ||
14 | #include "bnx2i.h" | 15 | #include "bnx2i.h" |
@@ -17,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); | |||
17 | static u32 adapter_count; | 18 | static u32 adapter_count; |
18 | 19 | ||
19 | #define DRV_MODULE_NAME "bnx2i" | 20 | #define DRV_MODULE_NAME "bnx2i" |
20 | #define DRV_MODULE_VERSION "2.1.3" | 21 | #define DRV_MODULE_VERSION "2.6.2.2" |
21 | #define DRV_MODULE_RELDATE "Aug 10, 2010" | 22 | #define DRV_MODULE_RELDATE "Nov 23, 2010" |
22 | 23 | ||
23 | static char version[] __devinitdata = | 24 | static char version[] __devinitdata = |
24 | "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ | 25 | "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ |
@@ -65,8 +66,6 @@ MODULE_PARM_DESC(rq_size, "Configure RQ size"); | |||
65 | 66 | ||
66 | u64 iscsi_error_mask = 0x00; | 67 | u64 iscsi_error_mask = 0x00; |
67 | 68 | ||
68 | static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ; | ||
69 | |||
70 | 69 | ||
71 | /** | 70 | /** |
72 | * bnx2i_identify_device - identifies NetXtreme II device type | 71 | * bnx2i_identify_device - identifies NetXtreme II device type |
@@ -211,13 +210,24 @@ void bnx2i_stop(void *handle) | |||
211 | { | 210 | { |
212 | struct bnx2i_hba *hba = handle; | 211 | struct bnx2i_hba *hba = handle; |
213 | int conns_active; | 212 | int conns_active; |
213 | int wait_delay = 1 * HZ; | ||
214 | 214 | ||
215 | /* check if cleanup happened in GOING_DOWN context */ | 215 | /* check if cleanup happened in GOING_DOWN context */ |
216 | if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, | 216 | if (!test_and_set_bit(ADAPTER_STATE_GOING_DOWN, |
217 | &hba->adapter_state)) | 217 | &hba->adapter_state)) { |
218 | iscsi_host_for_each_session(hba->shost, | 218 | iscsi_host_for_each_session(hba->shost, |
219 | bnx2i_drop_session); | 219 | bnx2i_drop_session); |
220 | 220 | wait_delay = hba->hba_shutdown_tmo; | |
221 | } | ||
222 | /* Wait for inflight offload connection tasks to complete before | ||
223 | * proceeding. Forcefully terminate all connection recovery in | ||
224 | * progress at the earliest, either in bind(), send_pdu(LOGIN), | ||
225 | * or conn_start() | ||
226 | */ | ||
227 | wait_event_interruptible_timeout(hba->eh_wait, | ||
228 | (list_empty(&hba->ep_ofld_list) && | ||
229 | list_empty(&hba->ep_destroy_list)), | ||
230 | 10 * HZ); | ||
221 | /* Wait for all endpoints to be torn down, Chip will be reset once | 231 | /* Wait for all endpoints to be torn down, Chip will be reset once |
222 | * control returns to network driver. So it is required to cleanup and | 232 | * control returns to network driver. So it is required to cleanup and |
223 | * release all connection resources before returning from this routine. | 233 | * release all connection resources before returning from this routine. |
@@ -226,7 +236,7 @@ void bnx2i_stop(void *handle) | |||
226 | conns_active = hba->ofld_conns_active; | 236 | conns_active = hba->ofld_conns_active; |
227 | wait_event_interruptible_timeout(hba->eh_wait, | 237 | wait_event_interruptible_timeout(hba->eh_wait, |
228 | (hba->ofld_conns_active != conns_active), | 238 | (hba->ofld_conns_active != conns_active), |
229 | hba->hba_shutdown_tmo); | 239 | wait_delay); |
230 | if (hba->ofld_conns_active == conns_active) | 240 | if (hba->ofld_conns_active == conns_active) |
231 | break; | 241 | break; |
232 | } | 242 | } |
@@ -235,88 +245,10 @@ void bnx2i_stop(void *handle) | |||
235 | /* This flag should be cleared last so that ep_disconnect() gracefully | 245 | /* This flag should be cleared last so that ep_disconnect() gracefully |
236 | * cleans up connection context | 246 | * cleans up connection context |
237 | */ | 247 | */ |
248 | clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); | ||
238 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); | 249 | clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); |
239 | } | 250 | } |
240 | 251 | ||
241 | /** | ||
242 | * bnx2i_register_device - register bnx2i adapter instance with the cnic driver | ||
243 | * @hba: Adapter instance to register | ||
244 | * | ||
245 | * registers bnx2i adapter instance with the cnic driver while holding the | ||
246 | * adapter structure lock | ||
247 | */ | ||
248 | void bnx2i_register_device(struct bnx2i_hba *hba) | ||
249 | { | ||
250 | int rc; | ||
251 | |||
252 | if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || | ||
253 | test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { | ||
254 | return; | ||
255 | } | ||
256 | |||
257 | rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); | ||
258 | |||
259 | if (!rc) | ||
260 | set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); | ||
261 | } | ||
262 | |||
263 | |||
264 | /** | ||
265 | * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver | ||
266 | * | ||
267 | * registers all bnx2i adapter instances with the cnic driver while holding | ||
268 | * the global resource lock | ||
269 | */ | ||
270 | void bnx2i_reg_dev_all(void) | ||
271 | { | ||
272 | struct bnx2i_hba *hba, *temp; | ||
273 | |||
274 | mutex_lock(&bnx2i_dev_lock); | ||
275 | list_for_each_entry_safe(hba, temp, &adapter_list, link) | ||
276 | bnx2i_register_device(hba); | ||
277 | mutex_unlock(&bnx2i_dev_lock); | ||
278 | } | ||
279 | |||
280 | |||
281 | /** | ||
282 | * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver | ||
283 | * @hba: Adapter instance to unregister | ||
284 | * | ||
285 | * registers bnx2i adapter instance with the cnic driver while holding | ||
286 | * the adapter structure lock | ||
287 | */ | ||
288 | static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) | ||
289 | { | ||
290 | if (hba->ofld_conns_active || | ||
291 | !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) || | ||
292 | test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) | ||
293 | return; | ||
294 | |||
295 | hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); | ||
296 | |||
297 | /* ep_disconnect could come before NETDEV_DOWN, driver won't | ||
298 | * see NETDEV_DOWN as it already unregistered itself. | ||
299 | */ | ||
300 | hba->adapter_state = 0; | ||
301 | clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver | ||
306 | * | ||
307 | * unregisters all bnx2i adapter instances with the cnic driver while holding | ||
308 | * the global resource lock | ||
309 | */ | ||
310 | void bnx2i_unreg_dev_all(void) | ||
311 | { | ||
312 | struct bnx2i_hba *hba, *temp; | ||
313 | |||
314 | mutex_lock(&bnx2i_dev_lock); | ||
315 | list_for_each_entry_safe(hba, temp, &adapter_list, link) | ||
316 | bnx2i_unreg_one_device(hba); | ||
317 | mutex_unlock(&bnx2i_dev_lock); | ||
318 | } | ||
319 | |||
320 | 252 | ||
321 | /** | 253 | /** |
322 | * bnx2i_init_one - initialize an adapter instance and allocate memory resources | 254 | * bnx2i_init_one - initialize an adapter instance and allocate memory resources |
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index fb50efbce087..f0dce26593eb 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. | 2 | * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2006 - 2009 Broadcom Corporation | 4 | * Copyright (c) 2006 - 2010 Broadcom Corporation |
5 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. | 5 | * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. |
6 | * Copyright (c) 2007, 2008 Mike Christie | 6 | * Copyright (c) 2007, 2008 Mike Christie |
7 | * | 7 | * |
@@ -10,6 +10,7 @@ | |||
10 | * the Free Software Foundation. | 10 | * the Free Software Foundation. |
11 | * | 11 | * |
12 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 12 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
13 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
13 | */ | 14 | */ |
14 | 15 | ||
15 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
@@ -411,7 +412,9 @@ static void bnx2i_free_ep(struct iscsi_endpoint *ep) | |||
411 | bnx2i_ep->state = EP_STATE_IDLE; | 412 | bnx2i_ep->state = EP_STATE_IDLE; |
412 | bnx2i_ep->hba->ofld_conns_active--; | 413 | bnx2i_ep->hba->ofld_conns_active--; |
413 | 414 | ||
414 | bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); | 415 | if (bnx2i_ep->ep_iscsi_cid != (u16) -1) |
416 | bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); | ||
417 | |||
415 | if (bnx2i_ep->conn) { | 418 | if (bnx2i_ep->conn) { |
416 | bnx2i_ep->conn->ep = NULL; | 419 | bnx2i_ep->conn->ep = NULL; |
417 | bnx2i_ep->conn = NULL; | 420 | bnx2i_ep->conn = NULL; |
@@ -1383,6 +1386,12 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, | |||
1383 | ep = iscsi_lookup_endpoint(transport_fd); | 1386 | ep = iscsi_lookup_endpoint(transport_fd); |
1384 | if (!ep) | 1387 | if (!ep) |
1385 | return -EINVAL; | 1388 | return -EINVAL; |
1389 | /* | ||
1390 | * Forcefully terminate all in progress connection recovery at the | ||
1391 | * earliest, either in bind(), send_pdu(LOGIN), or conn_start() | ||
1392 | */ | ||
1393 | if (bnx2i_adapter_ready(hba)) | ||
1394 | return -EIO; | ||
1386 | 1395 | ||
1387 | bnx2i_ep = ep->dd_data; | 1396 | bnx2i_ep = ep->dd_data; |
1388 | if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || | 1397 | if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || |
@@ -1404,7 +1413,6 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, | |||
1404 | hba->netdev->name); | 1413 | hba->netdev->name); |
1405 | return -EEXIST; | 1414 | return -EEXIST; |
1406 | } | 1415 | } |
1407 | |||
1408 | bnx2i_ep->conn = bnx2i_conn; | 1416 | bnx2i_ep->conn = bnx2i_conn; |
1409 | bnx2i_conn->ep = bnx2i_ep; | 1417 | bnx2i_conn->ep = bnx2i_ep; |
1410 | bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; | 1418 | bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; |
@@ -1461,21 +1469,28 @@ static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, | |||
1461 | struct bnx2i_conn *bnx2i_conn = conn->dd_data; | 1469 | struct bnx2i_conn *bnx2i_conn = conn->dd_data; |
1462 | int len = 0; | 1470 | int len = 0; |
1463 | 1471 | ||
1472 | if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba)) | ||
1473 | goto out; | ||
1474 | |||
1464 | switch (param) { | 1475 | switch (param) { |
1465 | case ISCSI_PARAM_CONN_PORT: | 1476 | case ISCSI_PARAM_CONN_PORT: |
1466 | if (bnx2i_conn->ep) | 1477 | mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); |
1478 | if (bnx2i_conn->ep->cm_sk) | ||
1467 | len = sprintf(buf, "%hu\n", | 1479 | len = sprintf(buf, "%hu\n", |
1468 | bnx2i_conn->ep->cm_sk->dst_port); | 1480 | bnx2i_conn->ep->cm_sk->dst_port); |
1481 | mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); | ||
1469 | break; | 1482 | break; |
1470 | case ISCSI_PARAM_CONN_ADDRESS: | 1483 | case ISCSI_PARAM_CONN_ADDRESS: |
1471 | if (bnx2i_conn->ep) | 1484 | mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); |
1485 | if (bnx2i_conn->ep->cm_sk) | ||
1472 | len = sprintf(buf, "%pI4\n", | 1486 | len = sprintf(buf, "%pI4\n", |
1473 | &bnx2i_conn->ep->cm_sk->dst_ip); | 1487 | &bnx2i_conn->ep->cm_sk->dst_ip); |
1488 | mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); | ||
1474 | break; | 1489 | break; |
1475 | default: | 1490 | default: |
1476 | return iscsi_conn_get_param(cls_conn, param, buf); | 1491 | return iscsi_conn_get_param(cls_conn, param, buf); |
1477 | } | 1492 | } |
1478 | 1493 | out: | |
1479 | return len; | 1494 | return len; |
1480 | } | 1495 | } |
1481 | 1496 | ||
@@ -1599,8 +1614,6 @@ static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) | |||
1599 | struct bnx2i_hba *hba; | 1614 | struct bnx2i_hba *hba; |
1600 | struct cnic_dev *cnic = NULL; | 1615 | struct cnic_dev *cnic = NULL; |
1601 | 1616 | ||
1602 | bnx2i_reg_dev_all(); | ||
1603 | |||
1604 | hba = get_adapter_list_head(); | 1617 | hba = get_adapter_list_head(); |
1605 | if (hba && hba->cnic) | 1618 | if (hba && hba->cnic) |
1606 | cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); | 1619 | cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); |
@@ -1640,18 +1653,26 @@ no_nx2_route: | |||
1640 | static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, | 1653 | static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, |
1641 | struct bnx2i_endpoint *ep) | 1654 | struct bnx2i_endpoint *ep) |
1642 | { | 1655 | { |
1643 | if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) | 1656 | if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk) |
1644 | hba->cnic->cm_destroy(ep->cm_sk); | 1657 | hba->cnic->cm_destroy(ep->cm_sk); |
1645 | 1658 | ||
1646 | if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) | ||
1647 | ep->state = EP_STATE_DISCONN_COMPL; | ||
1648 | |||
1649 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && | 1659 | if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && |
1650 | ep->state == EP_STATE_DISCONN_TIMEDOUT) { | 1660 | ep->state == EP_STATE_DISCONN_TIMEDOUT) { |
1651 | printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," | 1661 | if (ep->conn && ep->conn->cls_conn && |
1652 | " NW/PCIe trace, driver msgs to developers" | 1662 | ep->conn->cls_conn->dd_data) { |
1653 | " for analysis\n"); | 1663 | struct iscsi_conn *conn = ep->conn->cls_conn->dd_data; |
1654 | return 1; | 1664 | |
1665 | /* Must suspend all rx queue activity for this ep */ | ||
1666 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | ||
1667 | } | ||
1668 | /* CONN_DISCONNECT timeout may or may not be an issue depending | ||
1669 | * on what transcribed in TCP layer, different targets behave | ||
1670 | * differently | ||
1671 | */ | ||
1672 | printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, " | ||
1673 | "please submit GRC Dump, NW/PCIe trace, " | ||
1674 | "driver msgs to developers for analysis\n", | ||
1675 | hba->netdev->name); | ||
1655 | } | 1676 | } |
1656 | 1677 | ||
1657 | ep->state = EP_STATE_CLEANUP_START; | 1678 | ep->state = EP_STATE_CLEANUP_START; |
@@ -1664,7 +1685,9 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, | |||
1664 | bnx2i_ep_destroy_list_add(hba, ep); | 1685 | bnx2i_ep_destroy_list_add(hba, ep); |
1665 | 1686 | ||
1666 | /* destroy iSCSI context, wait for it to complete */ | 1687 | /* destroy iSCSI context, wait for it to complete */ |
1667 | bnx2i_send_conn_destroy(hba, ep); | 1688 | if (bnx2i_send_conn_destroy(hba, ep)) |
1689 | ep->state = EP_STATE_CLEANUP_CMPL; | ||
1690 | |||
1668 | wait_event_interruptible(ep->ofld_wait, | 1691 | wait_event_interruptible(ep->ofld_wait, |
1669 | (ep->state != EP_STATE_CLEANUP_START)); | 1692 | (ep->state != EP_STATE_CLEANUP_START)); |
1670 | 1693 | ||
@@ -1711,8 +1734,6 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1711 | if (shost) { | 1734 | if (shost) { |
1712 | /* driver is given scsi host to work with */ | 1735 | /* driver is given scsi host to work with */ |
1713 | hba = iscsi_host_priv(shost); | 1736 | hba = iscsi_host_priv(shost); |
1714 | /* Register the device with cnic if not already done so */ | ||
1715 | bnx2i_register_device(hba); | ||
1716 | } else | 1737 | } else |
1717 | /* | 1738 | /* |
1718 | * check if the given destination can be reached through | 1739 | * check if the given destination can be reached through |
@@ -1720,13 +1741,17 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1720 | */ | 1741 | */ |
1721 | hba = bnx2i_check_route(dst_addr); | 1742 | hba = bnx2i_check_route(dst_addr); |
1722 | 1743 | ||
1723 | if (!hba || test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) { | 1744 | if (!hba) { |
1724 | rc = -EINVAL; | 1745 | rc = -EINVAL; |
1725 | goto nohba; | 1746 | goto nohba; |
1726 | } | 1747 | } |
1748 | mutex_lock(&hba->net_dev_lock); | ||
1727 | 1749 | ||
1750 | if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) { | ||
1751 | rc = -EPERM; | ||
1752 | goto check_busy; | ||
1753 | } | ||
1728 | cnic = hba->cnic; | 1754 | cnic = hba->cnic; |
1729 | mutex_lock(&hba->net_dev_lock); | ||
1730 | ep = bnx2i_alloc_ep(hba); | 1755 | ep = bnx2i_alloc_ep(hba); |
1731 | if (!ep) { | 1756 | if (!ep) { |
1732 | rc = -ENOMEM; | 1757 | rc = -ENOMEM; |
@@ -1734,23 +1759,21 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1734 | } | 1759 | } |
1735 | bnx2i_ep = ep->dd_data; | 1760 | bnx2i_ep = ep->dd_data; |
1736 | 1761 | ||
1737 | if (bnx2i_adapter_ready(hba)) { | ||
1738 | rc = -EPERM; | ||
1739 | goto net_if_down; | ||
1740 | } | ||
1741 | |||
1742 | bnx2i_ep->num_active_cmds = 0; | 1762 | bnx2i_ep->num_active_cmds = 0; |
1743 | iscsi_cid = bnx2i_alloc_iscsi_cid(hba); | 1763 | iscsi_cid = bnx2i_alloc_iscsi_cid(hba); |
1744 | if (iscsi_cid == -1) { | 1764 | if (iscsi_cid == -1) { |
1745 | printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); | 1765 | printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate " |
1766 | "iscsi cid\n", hba->netdev->name); | ||
1746 | rc = -ENOMEM; | 1767 | rc = -ENOMEM; |
1747 | goto iscsi_cid_err; | 1768 | bnx2i_free_ep(ep); |
1769 | goto check_busy; | ||
1748 | } | 1770 | } |
1749 | bnx2i_ep->hba_age = hba->age; | 1771 | bnx2i_ep->hba_age = hba->age; |
1750 | 1772 | ||
1751 | rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); | 1773 | rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); |
1752 | if (rc != 0) { | 1774 | if (rc != 0) { |
1753 | printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); | 1775 | printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error" |
1776 | "\n", hba->netdev->name); | ||
1754 | rc = -ENOMEM; | 1777 | rc = -ENOMEM; |
1755 | goto qp_resc_err; | 1778 | goto qp_resc_err; |
1756 | } | 1779 | } |
@@ -1765,7 +1788,18 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1765 | bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; | 1788 | bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; |
1766 | add_timer(&bnx2i_ep->ofld_timer); | 1789 | add_timer(&bnx2i_ep->ofld_timer); |
1767 | 1790 | ||
1768 | bnx2i_send_conn_ofld_req(hba, bnx2i_ep); | 1791 | if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { |
1792 | if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { | ||
1793 | printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", | ||
1794 | hba->netdev->name, bnx2i_ep->ep_iscsi_cid); | ||
1795 | rc = -EBUSY; | ||
1796 | } else | ||
1797 | rc = -ENOSPC; | ||
1798 | printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe" | ||
1799 | "\n", hba->netdev->name); | ||
1800 | bnx2i_ep_ofld_list_del(hba, bnx2i_ep); | ||
1801 | goto conn_failed; | ||
1802 | } | ||
1769 | 1803 | ||
1770 | /* Wait for CNIC hardware to setup conn context and return 'cid' */ | 1804 | /* Wait for CNIC hardware to setup conn context and return 'cid' */ |
1771 | wait_event_interruptible(bnx2i_ep->ofld_wait, | 1805 | wait_event_interruptible(bnx2i_ep->ofld_wait, |
@@ -1778,7 +1812,12 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1778 | bnx2i_ep_ofld_list_del(hba, bnx2i_ep); | 1812 | bnx2i_ep_ofld_list_del(hba, bnx2i_ep); |
1779 | 1813 | ||
1780 | if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { | 1814 | if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { |
1781 | rc = -ENOSPC; | 1815 | if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) { |
1816 | printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n", | ||
1817 | hba->netdev->name, bnx2i_ep->ep_iscsi_cid); | ||
1818 | rc = -EBUSY; | ||
1819 | } else | ||
1820 | rc = -ENOSPC; | ||
1782 | goto conn_failed; | 1821 | goto conn_failed; |
1783 | } | 1822 | } |
1784 | 1823 | ||
@@ -1786,7 +1825,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, | |||
1786 | iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); | 1825 | iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); |
1787 | if (rc) { | 1826 | if (rc) { |
1788 | rc = -EINVAL; | 1827 | rc = -EINVAL; |
1789 | goto conn_failed; | 1828 | /* Need to terminate and cleanup the connection */ |
1829 | goto release_ep; | ||
1790 | } | 1830 | } |
1791 | 1831 | ||
1792 | bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; | 1832 | bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; |
@@ -1830,15 +1870,12 @@ release_ep: | |||
1830 | return ERR_PTR(rc); | 1870 | return ERR_PTR(rc); |
1831 | } | 1871 | } |
1832 | conn_failed: | 1872 | conn_failed: |
1833 | net_if_down: | ||
1834 | iscsi_cid_err: | ||
1835 | bnx2i_free_qp_resc(hba, bnx2i_ep); | 1873 | bnx2i_free_qp_resc(hba, bnx2i_ep); |
1836 | qp_resc_err: | 1874 | qp_resc_err: |
1837 | bnx2i_free_ep(ep); | 1875 | bnx2i_free_ep(ep); |
1838 | check_busy: | 1876 | check_busy: |
1839 | mutex_unlock(&hba->net_dev_lock); | 1877 | mutex_unlock(&hba->net_dev_lock); |
1840 | nohba: | 1878 | nohba: |
1841 | bnx2i_unreg_dev_all(); | ||
1842 | return ERR_PTR(rc); | 1879 | return ERR_PTR(rc); |
1843 | } | 1880 | } |
1844 | 1881 | ||
@@ -1898,12 +1935,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) | |||
1898 | cnic_dev_10g = 1; | 1935 | cnic_dev_10g = 1; |
1899 | 1936 | ||
1900 | switch (bnx2i_ep->state) { | 1937 | switch (bnx2i_ep->state) { |
1901 | case EP_STATE_CONNECT_START: | 1938 | case EP_STATE_CONNECT_FAILED: |
1902 | case EP_STATE_CLEANUP_FAILED: | 1939 | case EP_STATE_CLEANUP_FAILED: |
1903 | case EP_STATE_OFLD_FAILED: | 1940 | case EP_STATE_OFLD_FAILED: |
1904 | case EP_STATE_DISCONN_TIMEDOUT: | 1941 | case EP_STATE_DISCONN_TIMEDOUT: |
1905 | ret = 0; | 1942 | ret = 0; |
1906 | break; | 1943 | break; |
1944 | case EP_STATE_CONNECT_START: | ||
1907 | case EP_STATE_CONNECT_COMPL: | 1945 | case EP_STATE_CONNECT_COMPL: |
1908 | case EP_STATE_ULP_UPDATE_START: | 1946 | case EP_STATE_ULP_UPDATE_START: |
1909 | case EP_STATE_ULP_UPDATE_COMPL: | 1947 | case EP_STATE_ULP_UPDATE_COMPL: |
@@ -1914,13 +1952,10 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) | |||
1914 | ret = 1; | 1952 | ret = 1; |
1915 | break; | 1953 | break; |
1916 | case EP_STATE_TCP_RST_RCVD: | 1954 | case EP_STATE_TCP_RST_RCVD: |
1917 | ret = 0; | ||
1918 | break; | ||
1919 | case EP_STATE_CONNECT_FAILED: | ||
1920 | if (cnic_dev_10g) | 1955 | if (cnic_dev_10g) |
1921 | ret = 1; | ||
1922 | else | ||
1923 | ret = 0; | 1956 | ret = 0; |
1957 | else | ||
1958 | ret = 1; | ||
1924 | break; | 1959 | break; |
1925 | default: | 1960 | default: |
1926 | ret = 0; | 1961 | ret = 0; |
@@ -1953,7 +1988,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) | |||
1953 | if (!cnic) | 1988 | if (!cnic) |
1954 | return 0; | 1989 | return 0; |
1955 | 1990 | ||
1956 | if (bnx2i_ep->state == EP_STATE_IDLE) | 1991 | if (bnx2i_ep->state == EP_STATE_IDLE || |
1992 | bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) | ||
1957 | return 0; | 1993 | return 0; |
1958 | 1994 | ||
1959 | if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) | 1995 | if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) |
@@ -1979,9 +2015,10 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) | |||
1979 | if (session->state == ISCSI_STATE_LOGGING_OUT) { | 2015 | if (session->state == ISCSI_STATE_LOGGING_OUT) { |
1980 | if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { | 2016 | if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { |
1981 | /* Logout sent, but no resp */ | 2017 | /* Logout sent, but no resp */ |
1982 | printk(KERN_ALERT "bnx2i - WARNING " | 2018 | printk(KERN_ALERT "bnx2i (%s): WARNING" |
1983 | "logout response was not " | 2019 | " logout response was not " |
1984 | "received!\n"); | 2020 | "received!\n", |
2021 | bnx2i_ep->hba->netdev->name); | ||
1985 | } else if (bnx2i_ep->state == | 2022 | } else if (bnx2i_ep->state == |
1986 | EP_STATE_LOGOUT_RESP_RCVD) | 2023 | EP_STATE_LOGOUT_RESP_RCVD) |
1987 | close = 1; | 2024 | close = 1; |
@@ -1999,9 +2036,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) | |||
1999 | else | 2036 | else |
2000 | close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); | 2037 | close_ret = cnic->cm_abort(bnx2i_ep->cm_sk); |
2001 | 2038 | ||
2002 | /* No longer allow CFC delete if cm_close/abort fails the request */ | ||
2003 | if (close_ret) | 2039 | if (close_ret) |
2004 | printk(KERN_ALERT "bnx2i: %s close/abort(%d) returned %d\n", | 2040 | printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n", |
2005 | bnx2i_ep->hba->netdev->name, close, close_ret); | 2041 | bnx2i_ep->hba->netdev->name, close, close_ret); |
2006 | else | 2042 | else |
2007 | /* wait for option-2 conn teardown */ | 2043 | /* wait for option-2 conn teardown */ |
@@ -2015,7 +2051,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) | |||
2015 | destroy_conn: | 2051 | destroy_conn: |
2016 | bnx2i_ep_active_list_del(hba, bnx2i_ep); | 2052 | bnx2i_ep_active_list_del(hba, bnx2i_ep); |
2017 | if (bnx2i_tear_down_conn(hba, bnx2i_ep)) | 2053 | if (bnx2i_tear_down_conn(hba, bnx2i_ep)) |
2018 | ret = -EINVAL; | 2054 | return -EINVAL; |
2019 | out: | 2055 | out: |
2020 | bnx2i_ep->state = EP_STATE_IDLE; | 2056 | bnx2i_ep->state = EP_STATE_IDLE; |
2021 | return ret; | 2057 | return ret; |
@@ -2054,14 +2090,17 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) | |||
2054 | 2090 | ||
2055 | mutex_lock(&hba->net_dev_lock); | 2091 | mutex_lock(&hba->net_dev_lock); |
2056 | 2092 | ||
2057 | if (bnx2i_ep->state == EP_STATE_IDLE) | 2093 | if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT) |
2058 | goto return_bnx2i_ep; | 2094 | goto out; |
2059 | 2095 | ||
2060 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) | 2096 | if (bnx2i_ep->state == EP_STATE_IDLE) |
2061 | goto free_resc; | 2097 | goto free_resc; |
2062 | 2098 | ||
2063 | if (bnx2i_ep->hba_age != hba->age) | 2099 | if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || |
2100 | (bnx2i_ep->hba_age != hba->age)) { | ||
2101 | bnx2i_ep_active_list_del(hba, bnx2i_ep); | ||
2064 | goto free_resc; | 2102 | goto free_resc; |
2103 | } | ||
2065 | 2104 | ||
2066 | /* Do all chip cleanup here */ | 2105 | /* Do all chip cleanup here */ |
2067 | if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { | 2106 | if (bnx2i_hw_ep_disconnect(bnx2i_ep)) { |
@@ -2070,14 +2109,13 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) | |||
2070 | } | 2109 | } |
2071 | free_resc: | 2110 | free_resc: |
2072 | bnx2i_free_qp_resc(hba, bnx2i_ep); | 2111 | bnx2i_free_qp_resc(hba, bnx2i_ep); |
2073 | return_bnx2i_ep: | 2112 | |
2074 | if (bnx2i_conn) | 2113 | if (bnx2i_conn) |
2075 | bnx2i_conn->ep = NULL; | 2114 | bnx2i_conn->ep = NULL; |
2076 | 2115 | ||
2077 | bnx2i_free_ep(ep); | 2116 | bnx2i_free_ep(ep); |
2117 | out: | ||
2078 | mutex_unlock(&hba->net_dev_lock); | 2118 | mutex_unlock(&hba->net_dev_lock); |
2079 | if (!hba->ofld_conns_active) | ||
2080 | bnx2i_unreg_dev_all(); | ||
2081 | 2119 | ||
2082 | wake_up_interruptible(&hba->eh_wait); | 2120 | wake_up_interruptible(&hba->eh_wait); |
2083 | } | 2121 | } |
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c index 96426b751eb2..9174196d9033 100644 --- a/drivers/scsi/bnx2i/bnx2i_sysfs.c +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c | |||
@@ -1,12 +1,13 @@ | |||
1 | /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. | 1 | /* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2004 - 2009 Broadcom Corporation | 3 | * Copyright (c) 2004 - 2010 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
8 | * | 8 | * |
9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) | 9 | * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) |
10 | * Maintained by: Eddie Wai (eddie.wai@broadcom.com) | ||
10 | */ | 11 | */ |
11 | 12 | ||
12 | #include "bnx2i.h" | 13 | #include "bnx2i.h" |
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 6fae3d285ae7..b837c5b3c8f9 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c | |||
@@ -442,12 +442,19 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) | |||
442 | sdev = q->queuedata; | 442 | sdev = q->queuedata; |
443 | if (sdev && sdev->scsi_dh_data) | 443 | if (sdev && sdev->scsi_dh_data) |
444 | scsi_dh = sdev->scsi_dh_data->scsi_dh; | 444 | scsi_dh = sdev->scsi_dh_data->scsi_dh; |
445 | if (!scsi_dh || !get_device(&sdev->sdev_gendev)) | 445 | if (!scsi_dh || !get_device(&sdev->sdev_gendev) || |
446 | sdev->sdev_state == SDEV_CANCEL || | ||
447 | sdev->sdev_state == SDEV_DEL) | ||
446 | err = SCSI_DH_NOSYS; | 448 | err = SCSI_DH_NOSYS; |
449 | if (sdev->sdev_state == SDEV_OFFLINE) | ||
450 | err = SCSI_DH_DEV_OFFLINED; | ||
447 | spin_unlock_irqrestore(q->queue_lock, flags); | 451 | spin_unlock_irqrestore(q->queue_lock, flags); |
448 | 452 | ||
449 | if (err) | 453 | if (err) { |
454 | if (fn) | ||
455 | fn(data, err); | ||
450 | return err; | 456 | return err; |
457 | } | ||
451 | 458 | ||
452 | if (scsi_dh->activate) | 459 | if (scsi_dh->activate) |
453 | err = scsi_dh->activate(sdev, fn, data); | 460 | err = scsi_dh->activate(sdev, fn, data); |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d23a538a9dfc..9f9600b67001 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -854,7 +854,6 @@ static void fcoe_if_destroy(struct fc_lport *lport) | |||
854 | 854 | ||
855 | /* Cleanup the fc_lport */ | 855 | /* Cleanup the fc_lport */ |
856 | fc_lport_destroy(lport); | 856 | fc_lport_destroy(lport); |
857 | fc_fcp_destroy(lport); | ||
858 | 857 | ||
859 | /* Stop the transmit retry timer */ | 858 | /* Stop the transmit retry timer */ |
860 | del_timer_sync(&port->timer); | 859 | del_timer_sync(&port->timer); |
@@ -876,6 +875,9 @@ static void fcoe_if_destroy(struct fc_lport *lport) | |||
876 | fc_remove_host(lport->host); | 875 | fc_remove_host(lport->host); |
877 | scsi_remove_host(lport->host); | 876 | scsi_remove_host(lport->host); |
878 | 877 | ||
878 | /* Destroy lport scsi_priv */ | ||
879 | fc_fcp_destroy(lport); | ||
880 | |||
879 | /* There are no more rports or I/O, free the EM */ | 881 | /* There are no more rports or I/O, free the EM */ |
880 | fc_exch_mgr_free(lport); | 882 | fc_exch_mgr_free(lport); |
881 | 883 | ||
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index bc17c7123202..625c6be25396 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c | |||
@@ -54,6 +54,7 @@ MODULE_LICENSE("GPL v2"); | |||
54 | static void fcoe_ctlr_timeout(unsigned long); | 54 | static void fcoe_ctlr_timeout(unsigned long); |
55 | static void fcoe_ctlr_timer_work(struct work_struct *); | 55 | static void fcoe_ctlr_timer_work(struct work_struct *); |
56 | static void fcoe_ctlr_recv_work(struct work_struct *); | 56 | static void fcoe_ctlr_recv_work(struct work_struct *); |
57 | static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *); | ||
57 | 58 | ||
58 | static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); | 59 | static void fcoe_ctlr_vn_start(struct fcoe_ctlr *); |
59 | static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); | 60 | static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *, struct sk_buff *); |
@@ -176,6 +177,7 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) | |||
176 | fip->mode = mode; | 177 | fip->mode = mode; |
177 | INIT_LIST_HEAD(&fip->fcfs); | 178 | INIT_LIST_HEAD(&fip->fcfs); |
178 | mutex_init(&fip->ctlr_mutex); | 179 | mutex_init(&fip->ctlr_mutex); |
180 | spin_lock_init(&fip->ctlr_lock); | ||
179 | fip->flogi_oxid = FC_XID_UNKNOWN; | 181 | fip->flogi_oxid = FC_XID_UNKNOWN; |
180 | setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); | 182 | setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip); |
181 | INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); | 183 | INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work); |
@@ -231,6 +233,49 @@ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip) | |||
231 | EXPORT_SYMBOL(fcoe_ctlr_destroy); | 233 | EXPORT_SYMBOL(fcoe_ctlr_destroy); |
232 | 234 | ||
233 | /** | 235 | /** |
236 | * fcoe_ctlr_announce() - announce new FCF selection | ||
237 | * @fip: The FCoE controller | ||
238 | * | ||
239 | * Also sets the destination MAC for FCoE and control packets | ||
240 | * | ||
241 | * Called with neither ctlr_mutex nor ctlr_lock held. | ||
242 | */ | ||
243 | static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) | ||
244 | { | ||
245 | struct fcoe_fcf *sel; | ||
246 | struct fcoe_fcf *fcf; | ||
247 | |||
248 | mutex_lock(&fip->ctlr_mutex); | ||
249 | spin_lock_bh(&fip->ctlr_lock); | ||
250 | |||
251 | kfree_skb(fip->flogi_req); | ||
252 | fip->flogi_req = NULL; | ||
253 | list_for_each_entry(fcf, &fip->fcfs, list) | ||
254 | fcf->flogi_sent = 0; | ||
255 | |||
256 | spin_unlock_bh(&fip->ctlr_lock); | ||
257 | sel = fip->sel_fcf; | ||
258 | |||
259 | if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) | ||
260 | goto unlock; | ||
261 | if (!is_zero_ether_addr(fip->dest_addr)) { | ||
262 | printk(KERN_NOTICE "libfcoe: host%d: " | ||
263 | "FIP Fibre-Channel Forwarder MAC %pM deselected\n", | ||
264 | fip->lp->host->host_no, fip->dest_addr); | ||
265 | memset(fip->dest_addr, 0, ETH_ALEN); | ||
266 | } | ||
267 | if (sel) { | ||
268 | printk(KERN_INFO "libfcoe: host%d: FIP selected " | ||
269 | "Fibre-Channel Forwarder MAC %pM\n", | ||
270 | fip->lp->host->host_no, sel->fcf_mac); | ||
271 | memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); | ||
272 | fip->map_dest = 0; | ||
273 | } | ||
274 | unlock: | ||
275 | mutex_unlock(&fip->ctlr_mutex); | ||
276 | } | ||
277 | |||
278 | /** | ||
234 | * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port | 279 | * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port |
235 | * @fip: The FCoE controller to get the maximum FCoE size from | 280 | * @fip: The FCoE controller to get the maximum FCoE size from |
236 | * | 281 | * |
@@ -564,6 +609,9 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, | |||
564 | * The caller must check that the length is a multiple of 4. | 609 | * The caller must check that the length is a multiple of 4. |
565 | * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). | 610 | * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). |
566 | * The the skb must also be an fc_frame. | 611 | * The the skb must also be an fc_frame. |
612 | * | ||
613 | * This is called from the lower-level driver with spinlocks held, | ||
614 | * so we must not take a mutex here. | ||
567 | */ | 615 | */ |
568 | int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, | 616 | int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, |
569 | struct sk_buff *skb) | 617 | struct sk_buff *skb) |
@@ -601,7 +649,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, | |||
601 | switch (op) { | 649 | switch (op) { |
602 | case ELS_FLOGI: | 650 | case ELS_FLOGI: |
603 | op = FIP_DT_FLOGI; | 651 | op = FIP_DT_FLOGI; |
604 | break; | 652 | if (fip->mode == FIP_MODE_VN2VN) |
653 | break; | ||
654 | spin_lock_bh(&fip->ctlr_lock); | ||
655 | kfree_skb(fip->flogi_req); | ||
656 | fip->flogi_req = skb; | ||
657 | fip->flogi_req_send = 1; | ||
658 | spin_unlock_bh(&fip->ctlr_lock); | ||
659 | schedule_work(&fip->timer_work); | ||
660 | return -EINPROGRESS; | ||
605 | case ELS_FDISC: | 661 | case ELS_FDISC: |
606 | if (ntoh24(fh->fh_s_id)) | 662 | if (ntoh24(fh->fh_s_id)) |
607 | return 0; | 663 | return 0; |
@@ -922,11 +978,9 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
922 | } | 978 | } |
923 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); | 979 | mtu_valid = fcoe_ctlr_mtu_valid(fcf); |
924 | fcf->time = jiffies; | 980 | fcf->time = jiffies; |
925 | if (!found) { | 981 | if (!found) |
926 | LIBFCOE_FIP_DBG(fip, "New FCF for fab %16.16llx " | 982 | LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n", |
927 | "map %x val %d\n", | 983 | fcf->fabric_name, fcf->fcf_mac); |
928 | fcf->fabric_name, fcf->fc_map, mtu_valid); | ||
929 | } | ||
930 | 984 | ||
931 | /* | 985 | /* |
932 | * If this advertisement is not solicited and our max receive size | 986 | * If this advertisement is not solicited and our max receive size |
@@ -945,6 +999,17 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
945 | fcoe_ctlr_solicit(fip, NULL); | 999 | fcoe_ctlr_solicit(fip, NULL); |
946 | 1000 | ||
947 | /* | 1001 | /* |
1002 | * Put this FCF at the head of the list for priority among equals. | ||
1003 | * This helps in the case of an NPV switch which insists we use | ||
1004 | * the FCF that answers multicast solicitations, not the others that | ||
1005 | * are sending periodic multicast advertisements. | ||
1006 | */ | ||
1007 | if (mtu_valid) { | ||
1008 | list_del(&fcf->list); | ||
1009 | list_add(&fcf->list, &fip->fcfs); | ||
1010 | } | ||
1011 | |||
1012 | /* | ||
948 | * If this is the first validated FCF, note the time and | 1013 | * If this is the first validated FCF, note the time and |
949 | * set a timer to trigger selection. | 1014 | * set a timer to trigger selection. |
950 | */ | 1015 | */ |
@@ -1061,18 +1126,24 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
1061 | els_op = *(u8 *)(fh + 1); | 1126 | els_op = *(u8 *)(fh + 1); |
1062 | 1127 | ||
1063 | if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && | 1128 | if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) && |
1064 | sub == FIP_SC_REP && els_op == ELS_LS_ACC && | 1129 | sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) { |
1065 | fip->mode != FIP_MODE_VN2VN) { | 1130 | if (els_op == ELS_LS_ACC) { |
1066 | if (!is_valid_ether_addr(granted_mac)) { | 1131 | if (!is_valid_ether_addr(granted_mac)) { |
1067 | LIBFCOE_FIP_DBG(fip, | 1132 | LIBFCOE_FIP_DBG(fip, |
1068 | "Invalid MAC address %pM in FIP ELS\n", | 1133 | "Invalid MAC address %pM in FIP ELS\n", |
1069 | granted_mac); | 1134 | granted_mac); |
1070 | goto drop; | 1135 | goto drop; |
1071 | } | 1136 | } |
1072 | memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); | 1137 | memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN); |
1073 | 1138 | ||
1074 | if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) | 1139 | if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) { |
1075 | fip->flogi_oxid = FC_XID_UNKNOWN; | 1140 | fip->flogi_oxid = FC_XID_UNKNOWN; |
1141 | if (els_dtype == FIP_DT_FLOGI) | ||
1142 | fcoe_ctlr_announce(fip); | ||
1143 | } | ||
1144 | } else if (els_dtype == FIP_DT_FLOGI && | ||
1145 | !fcoe_ctlr_flogi_retry(fip)) | ||
1146 | goto drop; /* retrying FLOGI so drop reject */ | ||
1076 | } | 1147 | } |
1077 | 1148 | ||
1078 | if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && | 1149 | if ((desc_cnt == 0) || ((els_op != ELS_LS_RJT) && |
@@ -1326,20 +1397,39 @@ drop: | |||
1326 | * fcoe_ctlr_select() - Select the best FCF (if possible) | 1397 | * fcoe_ctlr_select() - Select the best FCF (if possible) |
1327 | * @fip: The FCoE controller | 1398 | * @fip: The FCoE controller |
1328 | * | 1399 | * |
1400 | * Returns the selected FCF, or NULL if none are usable. | ||
1401 | * | ||
1329 | * If there are conflicting advertisements, no FCF can be chosen. | 1402 | * If there are conflicting advertisements, no FCF can be chosen. |
1330 | * | 1403 | * |
1404 | * If there is already a selected FCF, this will choose a better one or | ||
1405 | * an equivalent one that hasn't already been sent a FLOGI. | ||
1406 | * | ||
1331 | * Called with lock held. | 1407 | * Called with lock held. |
1332 | */ | 1408 | */ |
1333 | static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | 1409 | static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip) |
1334 | { | 1410 | { |
1335 | struct fcoe_fcf *fcf; | 1411 | struct fcoe_fcf *fcf; |
1336 | struct fcoe_fcf *best = NULL; | 1412 | struct fcoe_fcf *best = fip->sel_fcf; |
1413 | struct fcoe_fcf *first; | ||
1414 | |||
1415 | first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list); | ||
1337 | 1416 | ||
1338 | list_for_each_entry(fcf, &fip->fcfs, list) { | 1417 | list_for_each_entry(fcf, &fip->fcfs, list) { |
1339 | LIBFCOE_FIP_DBG(fip, "consider FCF for fab %16.16llx " | 1418 | LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " |
1340 | "VFID %d map %x val %d\n", | 1419 | "VFID %d mac %pM map %x val %d " |
1341 | fcf->fabric_name, fcf->vfid, | 1420 | "sent %u pri %u\n", |
1342 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf)); | 1421 | fcf->fabric_name, fcf->vfid, fcf->fcf_mac, |
1422 | fcf->fc_map, fcoe_ctlr_mtu_valid(fcf), | ||
1423 | fcf->flogi_sent, fcf->pri); | ||
1424 | if (fcf->fabric_name != first->fabric_name || | ||
1425 | fcf->vfid != first->vfid || | ||
1426 | fcf->fc_map != first->fc_map) { | ||
1427 | LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " | ||
1428 | "or FC-MAP\n"); | ||
1429 | return NULL; | ||
1430 | } | ||
1431 | if (fcf->flogi_sent) | ||
1432 | continue; | ||
1343 | if (!fcoe_ctlr_fcf_usable(fcf)) { | 1433 | if (!fcoe_ctlr_fcf_usable(fcf)) { |
1344 | LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " | 1434 | LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " |
1345 | "map %x %svalid %savailable\n", | 1435 | "map %x %svalid %savailable\n", |
@@ -1349,21 +1439,131 @@ static void fcoe_ctlr_select(struct fcoe_ctlr *fip) | |||
1349 | "" : "un"); | 1439 | "" : "un"); |
1350 | continue; | 1440 | continue; |
1351 | } | 1441 | } |
1352 | if (!best) { | 1442 | if (!best || fcf->pri < best->pri || best->flogi_sent) |
1353 | best = fcf; | ||
1354 | continue; | ||
1355 | } | ||
1356 | if (fcf->fabric_name != best->fabric_name || | ||
1357 | fcf->vfid != best->vfid || | ||
1358 | fcf->fc_map != best->fc_map) { | ||
1359 | LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " | ||
1360 | "or FC-MAP\n"); | ||
1361 | return; | ||
1362 | } | ||
1363 | if (fcf->pri < best->pri) | ||
1364 | best = fcf; | 1443 | best = fcf; |
1365 | } | 1444 | } |
1366 | fip->sel_fcf = best; | 1445 | fip->sel_fcf = best; |
1446 | if (best) { | ||
1447 | LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac); | ||
1448 | fip->port_ka_time = jiffies + | ||
1449 | msecs_to_jiffies(FIP_VN_KA_PERIOD); | ||
1450 | fip->ctlr_ka_time = jiffies + best->fka_period; | ||
1451 | if (time_before(fip->ctlr_ka_time, fip->timer.expires)) | ||
1452 | mod_timer(&fip->timer, fip->ctlr_ka_time); | ||
1453 | } | ||
1454 | return best; | ||
1455 | } | ||
1456 | |||
1457 | /** | ||
1458 | * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF | ||
1459 | * @fip: The FCoE controller | ||
1460 | * | ||
1461 | * Returns non-zero error if it could not be sent. | ||
1462 | * | ||
1463 | * Called with ctlr_mutex and ctlr_lock held. | ||
1464 | * Caller must verify that fip->sel_fcf is not NULL. | ||
1465 | */ | ||
1466 | static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) | ||
1467 | { | ||
1468 | struct sk_buff *skb; | ||
1469 | struct sk_buff *skb_orig; | ||
1470 | struct fc_frame_header *fh; | ||
1471 | int error; | ||
1472 | |||
1473 | skb_orig = fip->flogi_req; | ||
1474 | if (!skb_orig) | ||
1475 | return -EINVAL; | ||
1476 | |||
1477 | /* | ||
1478 | * Clone and send the FLOGI request. If clone fails, use original. | ||
1479 | */ | ||
1480 | skb = skb_clone(skb_orig, GFP_ATOMIC); | ||
1481 | if (!skb) { | ||
1482 | skb = skb_orig; | ||
1483 | fip->flogi_req = NULL; | ||
1484 | } | ||
1485 | fh = (struct fc_frame_header *)skb->data; | ||
1486 | error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb, | ||
1487 | ntoh24(fh->fh_d_id)); | ||
1488 | if (error) { | ||
1489 | kfree_skb(skb); | ||
1490 | return error; | ||
1491 | } | ||
1492 | fip->send(fip, skb); | ||
1493 | fip->sel_fcf->flogi_sent = 1; | ||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | /** | ||
1498 | * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible | ||
1499 | * @fip: The FCoE controller | ||
1500 | * | ||
1501 | * Returns non-zero error code if there's no FLOGI request to retry or | ||
1502 | * no alternate FCF available. | ||
1503 | */ | ||
1504 | static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) | ||
1505 | { | ||
1506 | struct fcoe_fcf *fcf; | ||
1507 | int error; | ||
1508 | |||
1509 | mutex_lock(&fip->ctlr_mutex); | ||
1510 | spin_lock_bh(&fip->ctlr_lock); | ||
1511 | LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); | ||
1512 | fcf = fcoe_ctlr_select(fip); | ||
1513 | if (!fcf || fcf->flogi_sent) { | ||
1514 | kfree_skb(fip->flogi_req); | ||
1515 | fip->flogi_req = NULL; | ||
1516 | error = -ENOENT; | ||
1517 | } else { | ||
1518 | fcoe_ctlr_solicit(fip, NULL); | ||
1519 | error = fcoe_ctlr_flogi_send_locked(fip); | ||
1520 | } | ||
1521 | spin_unlock_bh(&fip->ctlr_lock); | ||
1522 | mutex_unlock(&fip->ctlr_mutex); | ||
1523 | return error; | ||
1524 | } | ||
1525 | |||
1526 | |||
1527 | /** | ||
1528 | * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI. | ||
1529 | * @fip: The FCoE controller that timed out | ||
1530 | * | ||
1531 | * Done here because fcoe_ctlr_els_send() can't get mutex. | ||
1532 | * | ||
1533 | * Called with ctlr_mutex held. The caller must not hold ctlr_lock. | ||
1534 | */ | ||
1535 | static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) | ||
1536 | { | ||
1537 | struct fcoe_fcf *fcf; | ||
1538 | |||
1539 | spin_lock_bh(&fip->ctlr_lock); | ||
1540 | fcf = fip->sel_fcf; | ||
1541 | if (!fcf || !fip->flogi_req_send) | ||
1542 | goto unlock; | ||
1543 | |||
1544 | LIBFCOE_FIP_DBG(fip, "sending FLOGI\n"); | ||
1545 | |||
1546 | /* | ||
1547 | * If this FLOGI is being sent due to a timeout retry | ||
1548 | * to the same FCF as before, select a different FCF if possible. | ||
1549 | */ | ||
1550 | if (fcf->flogi_sent) { | ||
1551 | LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n"); | ||
1552 | fcf = fcoe_ctlr_select(fip); | ||
1553 | if (!fcf || fcf->flogi_sent) { | ||
1554 | LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n"); | ||
1555 | list_for_each_entry(fcf, &fip->fcfs, list) | ||
1556 | fcf->flogi_sent = 0; | ||
1557 | fcf = fcoe_ctlr_select(fip); | ||
1558 | } | ||
1559 | } | ||
1560 | if (fcf) { | ||
1561 | fcoe_ctlr_flogi_send_locked(fip); | ||
1562 | fip->flogi_req_send = 0; | ||
1563 | } else /* XXX */ | ||
1564 | LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); | ||
1565 | unlock: | ||
1566 | spin_unlock_bh(&fip->ctlr_lock); | ||
1367 | } | 1567 | } |
1368 | 1568 | ||
1369 | /** | 1569 | /** |
@@ -1411,34 +1611,16 @@ static void fcoe_ctlr_timer_work(struct work_struct *work) | |||
1411 | sel = fip->sel_fcf; | 1611 | sel = fip->sel_fcf; |
1412 | if (!sel && fip->sel_time) { | 1612 | if (!sel && fip->sel_time) { |
1413 | if (time_after_eq(jiffies, fip->sel_time)) { | 1613 | if (time_after_eq(jiffies, fip->sel_time)) { |
1414 | fcoe_ctlr_select(fip); | 1614 | sel = fcoe_ctlr_select(fip); |
1415 | sel = fip->sel_fcf; | ||
1416 | fip->sel_time = 0; | 1615 | fip->sel_time = 0; |
1417 | } else if (time_after(next_timer, fip->sel_time)) | 1616 | } else if (time_after(next_timer, fip->sel_time)) |
1418 | next_timer = fip->sel_time; | 1617 | next_timer = fip->sel_time; |
1419 | } | 1618 | } |
1420 | 1619 | ||
1421 | if (sel != fcf) { | 1620 | if (sel && fip->flogi_req_send) |
1422 | fcf = sel; /* the old FCF may have been freed */ | 1621 | fcoe_ctlr_flogi_send(fip); |
1423 | if (sel) { | 1622 | else if (!sel && fcf) |
1424 | printk(KERN_INFO "libfcoe: host%d: FIP selected " | 1623 | reset = 1; |
1425 | "Fibre-Channel Forwarder MAC %pM\n", | ||
1426 | fip->lp->host->host_no, sel->fcf_mac); | ||
1427 | memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN); | ||
1428 | fip->map_dest = 0; | ||
1429 | fip->port_ka_time = jiffies + | ||
1430 | msecs_to_jiffies(FIP_VN_KA_PERIOD); | ||
1431 | fip->ctlr_ka_time = jiffies + sel->fka_period; | ||
1432 | if (time_after(next_timer, fip->ctlr_ka_time)) | ||
1433 | next_timer = fip->ctlr_ka_time; | ||
1434 | } else { | ||
1435 | printk(KERN_NOTICE "libfcoe: host%d: " | ||
1436 | "FIP Fibre-Channel Forwarder timed out. " | ||
1437 | "Starting FCF discovery.\n", | ||
1438 | fip->lp->host->host_no); | ||
1439 | reset = 1; | ||
1440 | } | ||
1441 | } | ||
1442 | 1624 | ||
1443 | if (sel && !sel->fd_flags) { | 1625 | if (sel && !sel->fd_flags) { |
1444 | if (time_after_eq(jiffies, fip->ctlr_ka_time)) { | 1626 | if (time_after_eq(jiffies, fip->ctlr_ka_time)) { |
@@ -2475,7 +2657,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) | |||
2475 | case FIP_ST_LINK_WAIT: | 2657 | case FIP_ST_LINK_WAIT: |
2476 | goto unlock; | 2658 | goto unlock; |
2477 | default: | 2659 | default: |
2478 | WARN(1, "unexpected state %d", fip->state); | 2660 | WARN(1, "unexpected state %d\n", fip->state); |
2479 | goto unlock; | 2661 | goto unlock; |
2480 | } | 2662 | } |
2481 | mod_timer(&fip->timer, next_time); | 2663 | mod_timer(&fip->timer, next_time); |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 76365700e2d5..3242bcabad97 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -4273,8 +4273,10 @@ static int ioc_general(void __user *arg, char *cmnd) | |||
4273 | } | 4273 | } |
4274 | 4274 | ||
4275 | rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); | 4275 | rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); |
4276 | if (rval < 0) | 4276 | if (rval < 0) { |
4277 | gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); | ||
4277 | return rval; | 4278 | return rval; |
4279 | } | ||
4278 | gen.status = rval; | 4280 | gen.status = rval; |
4279 | 4281 | ||
4280 | if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, | 4282 | if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 0572b9bf4bd6..652754319a4b 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -365,8 +365,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
365 | len = 0; | 365 | len = 0; |
366 | begin = pos; | 366 | begin = pos; |
367 | } | 367 | } |
368 | if (pos > offset + length) | 368 | if (pos > offset + length) { |
369 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | ||
369 | goto stop_output; | 370 | goto stop_output; |
371 | } | ||
370 | } | 372 | } |
371 | } | 373 | } |
372 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 374 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
@@ -450,8 +452,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
450 | len = 0; | 452 | len = 0; |
451 | begin = pos; | 453 | begin = pos; |
452 | } | 454 | } |
453 | if (pos > offset + length) | 455 | if (pos > offset + length) { |
456 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | ||
454 | goto stop_output; | 457 | goto stop_output; |
458 | } | ||
455 | } while (drv_no != -1); | 459 | } while (drv_no != -1); |
456 | 460 | ||
457 | if (is_mirr) { | 461 | if (is_mirr) { |
@@ -472,8 +476,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
472 | len = 0; | 476 | len = 0; |
473 | begin = pos; | 477 | begin = pos; |
474 | } | 478 | } |
475 | if (pos > offset + length) | 479 | if (pos > offset + length) { |
480 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | ||
476 | goto stop_output; | 481 | goto stop_output; |
482 | } | ||
477 | } | 483 | } |
478 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 484 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
479 | 485 | ||
@@ -542,8 +548,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, | |||
542 | len = 0; | 548 | len = 0; |
543 | begin = pos; | 549 | begin = pos; |
544 | } | 550 | } |
545 | if (pos > offset + length) | 551 | if (pos > offset + length) { |
552 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | ||
546 | goto stop_output; | 553 | goto stop_output; |
554 | } | ||
547 | } | 555 | } |
548 | } | 556 | } |
549 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); | 557 | gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index a6dea08664fc..12deffccb8da 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -641,11 +641,6 @@ static void fixup_botched_add(struct ctlr_info *h, | |||
641 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, | 641 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, |
642 | struct hpsa_scsi_dev_t *dev2) | 642 | struct hpsa_scsi_dev_t *dev2) |
643 | { | 643 | { |
644 | if ((is_logical_dev_addr_mode(dev1->scsi3addr) || | ||
645 | (dev1->lun != -1 && dev2->lun != -1)) && | ||
646 | dev1->devtype != 0x0C) | ||
647 | return (memcmp(dev1, dev2, sizeof(*dev1)) == 0); | ||
648 | |||
649 | /* we compare everything except lun and target as these | 644 | /* we compare everything except lun and target as these |
650 | * are not yet assigned. Compare parts likely | 645 | * are not yet assigned. Compare parts likely |
651 | * to differ first | 646 | * to differ first |
@@ -660,12 +655,8 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, | |||
660 | return 0; | 655 | return 0; |
661 | if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) | 656 | if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) |
662 | return 0; | 657 | return 0; |
663 | if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0) | ||
664 | return 0; | ||
665 | if (dev1->devtype != dev2->devtype) | 658 | if (dev1->devtype != dev2->devtype) |
666 | return 0; | 659 | return 0; |
667 | if (dev1->raid_level != dev2->raid_level) | ||
668 | return 0; | ||
669 | if (dev1->bus != dev2->bus) | 660 | if (dev1->bus != dev2->bus) |
670 | return 0; | 661 | return 0; |
671 | return 1; | 662 | return 1; |
@@ -1477,8 +1468,6 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
1477 | sizeof(this_device->vendor)); | 1468 | sizeof(this_device->vendor)); |
1478 | memcpy(this_device->model, &inq_buff[16], | 1469 | memcpy(this_device->model, &inq_buff[16], |
1479 | sizeof(this_device->model)); | 1470 | sizeof(this_device->model)); |
1480 | memcpy(this_device->revision, &inq_buff[32], | ||
1481 | sizeof(this_device->revision)); | ||
1482 | memset(this_device->device_id, 0, | 1471 | memset(this_device->device_id, 0, |
1483 | sizeof(this_device->device_id)); | 1472 | sizeof(this_device->device_id)); |
1484 | hpsa_get_device_id(h, scsi3addr, this_device->device_id, | 1473 | hpsa_get_device_id(h, scsi3addr, this_device->device_id, |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index a203ef65cb50..19586e189f0f 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
@@ -45,7 +45,6 @@ struct hpsa_scsi_dev_t { | |||
45 | unsigned char device_id[16]; /* from inquiry pg. 0x83 */ | 45 | unsigned char device_id[16]; /* from inquiry pg. 0x83 */ |
46 | unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ | 46 | unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ |
47 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ | 47 | unsigned char model[16]; /* bytes 16-31 of inquiry data */ |
48 | unsigned char revision[4]; /* bytes 32-35 of inquiry data */ | ||
49 | unsigned char raid_level; /* from inquiry page 0xC1 */ | 48 | unsigned char raid_level; /* from inquiry page 0xC1 */ |
50 | }; | 49 | }; |
51 | 50 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 57cad7e20caa..b7650613b8c2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -2493,23 +2493,23 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) | |||
2493 | } | 2493 | } |
2494 | 2494 | ||
2495 | static const struct ibmvfc_async_desc ae_desc [] = { | 2495 | static const struct ibmvfc_async_desc ae_desc [] = { |
2496 | { IBMVFC_AE_ELS_PLOGI, "PLOGI", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, | 2496 | { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, |
2497 | { IBMVFC_AE_ELS_LOGO, "LOGO", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, | 2497 | { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, |
2498 | { IBMVFC_AE_ELS_PRLO, "PRLO", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, | 2498 | { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, |
2499 | { IBMVFC_AE_SCN_NPORT, "N-Port SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, | 2499 | { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, |
2500 | { IBMVFC_AE_SCN_GROUP, "Group SCN", IBMVFC_DEFAULT_LOG_LEVEL + 1 }, | 2500 | { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 }, |
2501 | { IBMVFC_AE_SCN_DOMAIN, "Domain SCN", IBMVFC_DEFAULT_LOG_LEVEL }, | 2501 | { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL }, |
2502 | { IBMVFC_AE_SCN_FABRIC, "Fabric SCN", IBMVFC_DEFAULT_LOG_LEVEL }, | 2502 | { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL }, |
2503 | { IBMVFC_AE_LINK_UP, "Link Up", IBMVFC_DEFAULT_LOG_LEVEL }, | 2503 | { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL }, |
2504 | { IBMVFC_AE_LINK_DOWN, "Link Down", IBMVFC_DEFAULT_LOG_LEVEL }, | 2504 | { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL }, |
2505 | { IBMVFC_AE_LINK_DEAD, "Link Dead", IBMVFC_DEFAULT_LOG_LEVEL }, | 2505 | { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL }, |
2506 | { IBMVFC_AE_HALT, "Halt", IBMVFC_DEFAULT_LOG_LEVEL }, | 2506 | { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL }, |
2507 | { IBMVFC_AE_RESUME, "Resume", IBMVFC_DEFAULT_LOG_LEVEL }, | 2507 | { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL }, |
2508 | { IBMVFC_AE_ADAPTER_FAILED, "Adapter Failed", IBMVFC_DEFAULT_LOG_LEVEL }, | 2508 | { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL }, |
2509 | }; | 2509 | }; |
2510 | 2510 | ||
2511 | static const struct ibmvfc_async_desc unknown_ae = { | 2511 | static const struct ibmvfc_async_desc unknown_ae = { |
2512 | 0, "Unknown async", IBMVFC_DEFAULT_LOG_LEVEL | 2512 | "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL |
2513 | }; | 2513 | }; |
2514 | 2514 | ||
2515 | /** | 2515 | /** |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index ef663e7c9bbc..834c37fc7ce9 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h | |||
@@ -542,8 +542,8 @@ enum ibmvfc_async_event { | |||
542 | }; | 542 | }; |
543 | 543 | ||
544 | struct ibmvfc_async_desc { | 544 | struct ibmvfc_async_desc { |
545 | enum ibmvfc_async_event ae; | ||
546 | const char *desc; | 545 | const char *desc; |
546 | enum ibmvfc_async_event ae; | ||
547 | int log_level; | 547 | int log_level; |
548 | }; | 548 | }; |
549 | 549 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 5bbaee597e88..de2e09e49a3e 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -146,7 +146,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { | |||
146 | } | 146 | } |
147 | }, | 147 | }, |
148 | { /* CRoC */ | 148 | { /* CRoC */ |
149 | .mailbox = 0x00040, | 149 | .mailbox = 0x00044, |
150 | .cache_line_size = 0x20, | 150 | .cache_line_size = 0x20, |
151 | { | 151 | { |
152 | .set_interrupt_mask_reg = 0x00010, | 152 | .set_interrupt_mask_reg = 0x00010, |
@@ -1048,6 +1048,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, | |||
1048 | sizeof(res->res_path)); | 1048 | sizeof(res->res_path)); |
1049 | 1049 | ||
1050 | res->bus = 0; | 1050 | res->bus = 0; |
1051 | memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, | ||
1052 | sizeof(res->dev_lun.scsi_lun)); | ||
1051 | res->lun = scsilun_to_int(&res->dev_lun); | 1053 | res->lun = scsilun_to_int(&res->dev_lun); |
1052 | 1054 | ||
1053 | if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { | 1055 | if (res->type == IPR_RES_TYPE_GENERIC_SCSI) { |
@@ -1063,9 +1065,6 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res, | |||
1063 | ioa_cfg->max_devs_supported); | 1065 | ioa_cfg->max_devs_supported); |
1064 | set_bit(res->target, ioa_cfg->target_ids); | 1066 | set_bit(res->target, ioa_cfg->target_ids); |
1065 | } | 1067 | } |
1066 | |||
1067 | memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, | ||
1068 | sizeof(res->dev_lun.scsi_lun)); | ||
1069 | } else if (res->type == IPR_RES_TYPE_IOAFP) { | 1068 | } else if (res->type == IPR_RES_TYPE_IOAFP) { |
1070 | res->bus = IPR_IOAFP_VIRTUAL_BUS; | 1069 | res->bus = IPR_IOAFP_VIRTUAL_BUS; |
1071 | res->target = 0; | 1070 | res->target = 0; |
@@ -1116,7 +1115,7 @@ static int ipr_is_same_device(struct ipr_resource_entry *res, | |||
1116 | if (res->ioa_cfg->sis64) { | 1115 | if (res->ioa_cfg->sis64) { |
1117 | if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, | 1116 | if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id, |
1118 | sizeof(cfgtew->u.cfgte64->dev_id)) && | 1117 | sizeof(cfgtew->u.cfgte64->dev_id)) && |
1119 | !memcmp(&res->lun, &cfgtew->u.cfgte64->lun, | 1118 | !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun, |
1120 | sizeof(cfgtew->u.cfgte64->lun))) { | 1119 | sizeof(cfgtew->u.cfgte64->lun))) { |
1121 | return 1; | 1120 | return 1; |
1122 | } | 1121 | } |
@@ -2901,6 +2900,12 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) | |||
2901 | return; | 2900 | return; |
2902 | } | 2901 | } |
2903 | 2902 | ||
2903 | if (ioa_cfg->sis64) { | ||
2904 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
2905 | ssleep(IPR_DUMP_DELAY_SECONDS); | ||
2906 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
2907 | } | ||
2908 | |||
2904 | start_addr = readl(ioa_cfg->ioa_mailbox); | 2909 | start_addr = readl(ioa_cfg->ioa_mailbox); |
2905 | 2910 | ||
2906 | if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { | 2911 | if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) { |
@@ -7473,6 +7478,29 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) | |||
7473 | } | 7478 | } |
7474 | 7479 | ||
7475 | /** | 7480 | /** |
7481 | * ipr_reset_get_unit_check_job - Call to get the unit check buffer. | ||
7482 | * @ipr_cmd: ipr command struct | ||
7483 | * | ||
7484 | * Description: This function will call to get the unit check buffer. | ||
7485 | * | ||
7486 | * Return value: | ||
7487 | * IPR_RC_JOB_RETURN | ||
7488 | **/ | ||
7489 | static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd) | ||
7490 | { | ||
7491 | struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; | ||
7492 | |||
7493 | ENTER; | ||
7494 | ioa_cfg->ioa_unit_checked = 0; | ||
7495 | ipr_get_unit_check_buffer(ioa_cfg); | ||
7496 | ipr_cmd->job_step = ipr_reset_alert; | ||
7497 | ipr_reset_start_timer(ipr_cmd, 0); | ||
7498 | |||
7499 | LEAVE; | ||
7500 | return IPR_RC_JOB_RETURN; | ||
7501 | } | ||
7502 | |||
7503 | /** | ||
7476 | * ipr_reset_restore_cfg_space - Restore PCI config space. | 7504 | * ipr_reset_restore_cfg_space - Restore PCI config space. |
7477 | * @ipr_cmd: ipr command struct | 7505 | * @ipr_cmd: ipr command struct |
7478 | * | 7506 | * |
@@ -7512,11 +7540,17 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) | |||
7512 | } | 7540 | } |
7513 | 7541 | ||
7514 | if (ioa_cfg->ioa_unit_checked) { | 7542 | if (ioa_cfg->ioa_unit_checked) { |
7515 | ioa_cfg->ioa_unit_checked = 0; | 7543 | if (ioa_cfg->sis64) { |
7516 | ipr_get_unit_check_buffer(ioa_cfg); | 7544 | ipr_cmd->job_step = ipr_reset_get_unit_check_job; |
7517 | ipr_cmd->job_step = ipr_reset_alert; | 7545 | ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT); |
7518 | ipr_reset_start_timer(ipr_cmd, 0); | 7546 | return IPR_RC_JOB_RETURN; |
7519 | return IPR_RC_JOB_RETURN; | 7547 | } else { |
7548 | ioa_cfg->ioa_unit_checked = 0; | ||
7549 | ipr_get_unit_check_buffer(ioa_cfg); | ||
7550 | ipr_cmd->job_step = ipr_reset_alert; | ||
7551 | ipr_reset_start_timer(ipr_cmd, 0); | ||
7552 | return IPR_RC_JOB_RETURN; | ||
7553 | } | ||
7520 | } | 7554 | } |
7521 | 7555 | ||
7522 | if (ioa_cfg->in_ioa_bringdown) { | 7556 | if (ioa_cfg->in_ioa_bringdown) { |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b28a00f1082c..13f425fb8851 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -218,6 +218,8 @@ | |||
218 | #define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) | 218 | #define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) |
219 | #define IPR_PCI_RESET_TIMEOUT (HZ / 2) | 219 | #define IPR_PCI_RESET_TIMEOUT (HZ / 2) |
220 | #define IPR_DUMP_TIMEOUT (15 * HZ) | 220 | #define IPR_DUMP_TIMEOUT (15 * HZ) |
221 | #define IPR_DUMP_DELAY_SECONDS 4 | ||
222 | #define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ) | ||
221 | 223 | ||
222 | /* | 224 | /* |
223 | * SCSI Literals | 225 | * SCSI Literals |
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index ec2a1aec2350..d21367d3305f 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c | |||
@@ -67,6 +67,11 @@ struct workqueue_struct *fc_exch_workqueue; | |||
67 | struct fc_exch_pool { | 67 | struct fc_exch_pool { |
68 | u16 next_index; | 68 | u16 next_index; |
69 | u16 total_exches; | 69 | u16 total_exches; |
70 | |||
71 | /* two cache of free slot in exch array */ | ||
72 | u16 left; | ||
73 | u16 right; | ||
74 | |||
70 | spinlock_t lock; | 75 | spinlock_t lock; |
71 | struct list_head ex_list; | 76 | struct list_head ex_list; |
72 | }; | 77 | }; |
@@ -108,7 +113,6 @@ struct fc_exch_mgr { | |||
108 | atomic_t non_bls_resp; | 113 | atomic_t non_bls_resp; |
109 | } stats; | 114 | } stats; |
110 | }; | 115 | }; |
111 | #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) | ||
112 | 116 | ||
113 | /** | 117 | /** |
114 | * struct fc_exch_mgr_anchor - primary structure for list of EMs | 118 | * struct fc_exch_mgr_anchor - primary structure for list of EMs |
@@ -397,13 +401,23 @@ static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index, | |||
397 | static void fc_exch_delete(struct fc_exch *ep) | 401 | static void fc_exch_delete(struct fc_exch *ep) |
398 | { | 402 | { |
399 | struct fc_exch_pool *pool; | 403 | struct fc_exch_pool *pool; |
404 | u16 index; | ||
400 | 405 | ||
401 | pool = ep->pool; | 406 | pool = ep->pool; |
402 | spin_lock_bh(&pool->lock); | 407 | spin_lock_bh(&pool->lock); |
403 | WARN_ON(pool->total_exches <= 0); | 408 | WARN_ON(pool->total_exches <= 0); |
404 | pool->total_exches--; | 409 | pool->total_exches--; |
405 | fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order, | 410 | |
406 | NULL); | 411 | /* update cache of free slot */ |
412 | index = (ep->xid - ep->em->min_xid) >> fc_cpu_order; | ||
413 | if (pool->left == FC_XID_UNKNOWN) | ||
414 | pool->left = index; | ||
415 | else if (pool->right == FC_XID_UNKNOWN) | ||
416 | pool->right = index; | ||
417 | else | ||
418 | pool->next_index = index; | ||
419 | |||
420 | fc_exch_ptr_set(pool, index, NULL); | ||
407 | list_del(&ep->ex_list); | 421 | list_del(&ep->ex_list); |
408 | spin_unlock_bh(&pool->lock); | 422 | spin_unlock_bh(&pool->lock); |
409 | fc_exch_release(ep); /* drop hold for exch in mp */ | 423 | fc_exch_release(ep); /* drop hold for exch in mp */ |
@@ -636,10 +650,13 @@ static void fc_exch_timeout(struct work_struct *work) | |||
636 | if (e_stat & ESB_ST_ABNORMAL) | 650 | if (e_stat & ESB_ST_ABNORMAL) |
637 | rc = fc_exch_done_locked(ep); | 651 | rc = fc_exch_done_locked(ep); |
638 | spin_unlock_bh(&ep->ex_lock); | 652 | spin_unlock_bh(&ep->ex_lock); |
639 | if (!rc) | ||
640 | fc_exch_delete(ep); | ||
641 | if (resp) | 653 | if (resp) |
642 | resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); | 654 | resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); |
655 | if (!rc) { | ||
656 | /* delete the exchange if it's already being aborted */ | ||
657 | fc_exch_delete(ep); | ||
658 | return; | ||
659 | } | ||
643 | fc_seq_exch_abort(sp, 2 * ep->r_a_tov); | 660 | fc_seq_exch_abort(sp, 2 * ep->r_a_tov); |
644 | goto done; | 661 | goto done; |
645 | } | 662 | } |
@@ -679,6 +696,19 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, | |||
679 | pool = per_cpu_ptr(mp->pool, cpu); | 696 | pool = per_cpu_ptr(mp->pool, cpu); |
680 | spin_lock_bh(&pool->lock); | 697 | spin_lock_bh(&pool->lock); |
681 | put_cpu(); | 698 | put_cpu(); |
699 | |||
700 | /* peek cache of free slot */ | ||
701 | if (pool->left != FC_XID_UNKNOWN) { | ||
702 | index = pool->left; | ||
703 | pool->left = FC_XID_UNKNOWN; | ||
704 | goto hit; | ||
705 | } | ||
706 | if (pool->right != FC_XID_UNKNOWN) { | ||
707 | index = pool->right; | ||
708 | pool->right = FC_XID_UNKNOWN; | ||
709 | goto hit; | ||
710 | } | ||
711 | |||
682 | index = pool->next_index; | 712 | index = pool->next_index; |
683 | /* allocate new exch from pool */ | 713 | /* allocate new exch from pool */ |
684 | while (fc_exch_ptr_get(pool, index)) { | 714 | while (fc_exch_ptr_get(pool, index)) { |
@@ -687,7 +717,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, | |||
687 | goto err; | 717 | goto err; |
688 | } | 718 | } |
689 | pool->next_index = index == mp->pool_max_index ? 0 : index + 1; | 719 | pool->next_index = index == mp->pool_max_index ? 0 : index + 1; |
690 | 720 | hit: | |
691 | fc_exch_hold(ep); /* hold for exch in mp */ | 721 | fc_exch_hold(ep); /* hold for exch in mp */ |
692 | spin_lock_init(&ep->ex_lock); | 722 | spin_lock_init(&ep->ex_lock); |
693 | /* | 723 | /* |
@@ -1247,7 +1277,7 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) | |||
1247 | 1277 | ||
1248 | list_for_each_entry(ema, &lport->ema_list, ema_list) | 1278 | list_for_each_entry(ema, &lport->ema_list, ema_list) |
1249 | if ((!ema->match || ema->match(fp)) && | 1279 | if ((!ema->match || ema->match(fp)) && |
1250 | fc_seq_lookup_recip(lport, ema->mp, fp) != FC_RJT_NONE) | 1280 | fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE) |
1251 | break; | 1281 | break; |
1252 | return fr_seq(fp); | 1282 | return fr_seq(fp); |
1253 | } | 1283 | } |
@@ -1343,7 +1373,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) | |||
1343 | } | 1373 | } |
1344 | if (ep->esb_stat & ESB_ST_COMPLETE) { | 1374 | if (ep->esb_stat & ESB_ST_COMPLETE) { |
1345 | atomic_inc(&mp->stats.xid_not_found); | 1375 | atomic_inc(&mp->stats.xid_not_found); |
1346 | goto out; | 1376 | goto rel; |
1347 | } | 1377 | } |
1348 | if (ep->rxid == FC_XID_UNKNOWN) | 1378 | if (ep->rxid == FC_XID_UNKNOWN) |
1349 | ep->rxid = ntohs(fh->fh_rx_id); | 1379 | ep->rxid = ntohs(fh->fh_rx_id); |
@@ -2181,6 +2211,8 @@ struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport, | |||
2181 | goto free_mempool; | 2211 | goto free_mempool; |
2182 | for_each_possible_cpu(cpu) { | 2212 | for_each_possible_cpu(cpu) { |
2183 | pool = per_cpu_ptr(mp->pool, cpu); | 2213 | pool = per_cpu_ptr(mp->pool, cpu); |
2214 | pool->left = FC_XID_UNKNOWN; | ||
2215 | pool->right = FC_XID_UNKNOWN; | ||
2184 | spin_lock_init(&pool->lock); | 2216 | spin_lock_init(&pool->lock); |
2185 | INIT_LIST_HEAD(&pool->ex_list); | 2217 | INIT_LIST_HEAD(&pool->ex_list); |
2186 | } | 2218 | } |
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 2924363d142b..cdc06cda76e5 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c | |||
@@ -57,6 +57,9 @@ struct kmem_cache *scsi_pkt_cachep; | |||
57 | #define FC_SRB_READ (1 << 1) | 57 | #define FC_SRB_READ (1 << 1) |
58 | #define FC_SRB_WRITE (1 << 0) | 58 | #define FC_SRB_WRITE (1 << 0) |
59 | 59 | ||
60 | /* constant added to e_d_tov timeout to get rec_tov value */ | ||
61 | #define REC_TOV_CONST 1 | ||
62 | |||
60 | /* | 63 | /* |
61 | * The SCp.ptr should be tested and set under the scsi_pkt_queue lock | 64 | * The SCp.ptr should be tested and set under the scsi_pkt_queue lock |
62 | */ | 65 | */ |
@@ -96,7 +99,7 @@ static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *); | |||
96 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *); | 99 | static void fc_fcp_complete_locked(struct fc_fcp_pkt *); |
97 | static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); | 100 | static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); |
98 | static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); | 101 | static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); |
99 | static void fc_fcp_recovery(struct fc_fcp_pkt *); | 102 | static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code); |
100 | static void fc_fcp_timeout(unsigned long); | 103 | static void fc_fcp_timeout(unsigned long); |
101 | static void fc_fcp_rec(struct fc_fcp_pkt *); | 104 | static void fc_fcp_rec(struct fc_fcp_pkt *); |
102 | static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); | 105 | static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); |
@@ -120,14 +123,13 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *); | |||
120 | #define FC_DATA_UNDRUN 7 | 123 | #define FC_DATA_UNDRUN 7 |
121 | #define FC_ERROR 8 | 124 | #define FC_ERROR 8 |
122 | #define FC_HRD_ERROR 9 | 125 | #define FC_HRD_ERROR 9 |
123 | #define FC_CMD_RECOVERY 10 | 126 | #define FC_CRC_ERROR 10 |
127 | #define FC_TIMED_OUT 11 | ||
124 | 128 | ||
125 | /* | 129 | /* |
126 | * Error recovery timeout values. | 130 | * Error recovery timeout values. |
127 | */ | 131 | */ |
128 | #define FC_SCSI_ER_TIMEOUT (10 * HZ) | ||
129 | #define FC_SCSI_TM_TOV (10 * HZ) | 132 | #define FC_SCSI_TM_TOV (10 * HZ) |
130 | #define FC_SCSI_REC_TOV (2 * HZ) | ||
131 | #define FC_HOST_RESET_TIMEOUT (30 * HZ) | 133 | #define FC_HOST_RESET_TIMEOUT (30 * HZ) |
132 | #define FC_CAN_QUEUE_PERIOD (60 * HZ) | 134 | #define FC_CAN_QUEUE_PERIOD (60 * HZ) |
133 | 135 | ||
@@ -438,6 +440,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
438 | void *buf; | 440 | void *buf; |
439 | struct scatterlist *sg; | 441 | struct scatterlist *sg; |
440 | u32 nents; | 442 | u32 nents; |
443 | u8 host_bcode = FC_COMPLETE; | ||
441 | 444 | ||
442 | fh = fc_frame_header_get(fp); | 445 | fh = fc_frame_header_get(fp); |
443 | offset = ntohl(fh->fh_parm_offset); | 446 | offset = ntohl(fh->fh_parm_offset); |
@@ -446,13 +449,16 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
446 | buf = fc_frame_payload_get(fp, 0); | 449 | buf = fc_frame_payload_get(fp, 0); |
447 | 450 | ||
448 | /* | 451 | /* |
449 | * if this I/O is ddped then clear it | 452 | * if this I/O is ddped then clear it and initiate recovery since data |
450 | * and initiate recovery since data | 453 | * frames are expected to be placed directly in that case. |
451 | * frames are expected to be placed | 454 | * |
452 | * directly in that case. | 455 | * Indicate error to scsi-ml because something went wrong with the |
456 | * ddp handling to get us here. | ||
453 | */ | 457 | */ |
454 | if (fsp->xfer_ddp != FC_XID_UNKNOWN) { | 458 | if (fsp->xfer_ddp != FC_XID_UNKNOWN) { |
455 | fc_fcp_ddp_done(fsp); | 459 | fc_fcp_ddp_done(fsp); |
460 | FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n"); | ||
461 | host_bcode = FC_ERROR; | ||
456 | goto err; | 462 | goto err; |
457 | } | 463 | } |
458 | if (offset + len > fsp->data_len) { | 464 | if (offset + len > fsp->data_len) { |
@@ -462,6 +468,9 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
462 | goto crc_err; | 468 | goto crc_err; |
463 | FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " | 469 | FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx " |
464 | "data_len %x\n", len, offset, fsp->data_len); | 470 | "data_len %x\n", len, offset, fsp->data_len); |
471 | |||
472 | /* Data is corrupted indicate scsi-ml should retry */ | ||
473 | host_bcode = FC_DATA_OVRRUN; | ||
465 | goto err; | 474 | goto err; |
466 | } | 475 | } |
467 | if (offset != fsp->xfer_len) | 476 | if (offset != fsp->xfer_len) |
@@ -498,8 +507,10 @@ crc_err: | |||
498 | * If so, we need to retry the entire operation. | 507 | * If so, we need to retry the entire operation. |
499 | * Otherwise, ignore it. | 508 | * Otherwise, ignore it. |
500 | */ | 509 | */ |
501 | if (fsp->state & FC_SRB_DISCONTIG) | 510 | if (fsp->state & FC_SRB_DISCONTIG) { |
511 | host_bcode = FC_CRC_ERROR; | ||
502 | goto err; | 512 | goto err; |
513 | } | ||
503 | return; | 514 | return; |
504 | } | 515 | } |
505 | } | 516 | } |
@@ -517,7 +528,7 @@ crc_err: | |||
517 | fc_fcp_complete_locked(fsp); | 528 | fc_fcp_complete_locked(fsp); |
518 | return; | 529 | return; |
519 | err: | 530 | err: |
520 | fc_fcp_recovery(fsp); | 531 | fc_fcp_recovery(fsp, host_bcode); |
521 | } | 532 | } |
522 | 533 | ||
523 | /** | 534 | /** |
@@ -962,7 +973,13 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) | |||
962 | } | 973 | } |
963 | lport->tt.exch_done(seq); | 974 | lport->tt.exch_done(seq); |
964 | } | 975 | } |
965 | fc_io_compl(fsp); | 976 | /* |
977 | * Some resets driven by SCSI are not I/Os and do not have | ||
978 | * SCSI commands associated with the requests. We should not | ||
979 | * call I/O completion if we do not have a SCSI command. | ||
980 | */ | ||
981 | if (fsp->cmd) | ||
982 | fc_io_compl(fsp); | ||
966 | } | 983 | } |
967 | 984 | ||
968 | /** | 985 | /** |
@@ -1073,6 +1090,21 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp) | |||
1073 | } | 1090 | } |
1074 | 1091 | ||
1075 | /** | 1092 | /** |
1093 | * get_fsp_rec_tov() - Helper function to get REC_TOV | ||
1094 | * @fsp: the FCP packet | ||
1095 | */ | ||
1096 | static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp) | ||
1097 | { | ||
1098 | struct fc_rport *rport; | ||
1099 | struct fc_rport_libfc_priv *rpriv; | ||
1100 | |||
1101 | rport = fsp->rport; | ||
1102 | rpriv = rport->dd_data; | ||
1103 | |||
1104 | return rpriv->e_d_tov + REC_TOV_CONST; | ||
1105 | } | ||
1106 | |||
1107 | /** | ||
1076 | * fc_fcp_cmd_send() - Send a FCP command | 1108 | * fc_fcp_cmd_send() - Send a FCP command |
1077 | * @lport: The local port to send the command on | 1109 | * @lport: The local port to send the command on |
1078 | * @fsp: The FCP packet the command is on | 1110 | * @fsp: The FCP packet the command is on |
@@ -1089,6 +1121,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, | |||
1089 | struct fc_rport_libfc_priv *rpriv; | 1121 | struct fc_rport_libfc_priv *rpriv; |
1090 | const size_t len = sizeof(fsp->cdb_cmd); | 1122 | const size_t len = sizeof(fsp->cdb_cmd); |
1091 | int rc = 0; | 1123 | int rc = 0; |
1124 | unsigned int rec_tov; | ||
1092 | 1125 | ||
1093 | if (fc_fcp_lock_pkt(fsp)) | 1126 | if (fc_fcp_lock_pkt(fsp)) |
1094 | return 0; | 1127 | return 0; |
@@ -1119,10 +1152,13 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, | |||
1119 | fsp->seq_ptr = seq; | 1152 | fsp->seq_ptr = seq; |
1120 | fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ | 1153 | fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ |
1121 | 1154 | ||
1155 | rec_tov = get_fsp_rec_tov(fsp); | ||
1156 | |||
1122 | setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); | 1157 | setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); |
1123 | fc_fcp_timer_set(fsp, | 1158 | |
1124 | (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ? | 1159 | if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) |
1125 | FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT); | 1160 | fc_fcp_timer_set(fsp, rec_tov); |
1161 | |||
1126 | unlock: | 1162 | unlock: |
1127 | fc_fcp_unlock_pkt(fsp); | 1163 | fc_fcp_unlock_pkt(fsp); |
1128 | return rc; | 1164 | return rc; |
@@ -1197,13 +1233,16 @@ static void fc_lun_reset_send(unsigned long data) | |||
1197 | { | 1233 | { |
1198 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; | 1234 | struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; |
1199 | struct fc_lport *lport = fsp->lp; | 1235 | struct fc_lport *lport = fsp->lp; |
1236 | unsigned int rec_tov; | ||
1237 | |||
1200 | if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { | 1238 | if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { |
1201 | if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) | 1239 | if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY) |
1202 | return; | 1240 | return; |
1203 | if (fc_fcp_lock_pkt(fsp)) | 1241 | if (fc_fcp_lock_pkt(fsp)) |
1204 | return; | 1242 | return; |
1243 | rec_tov = get_fsp_rec_tov(fsp); | ||
1205 | setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); | 1244 | setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); |
1206 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); | 1245 | fc_fcp_timer_set(fsp, rec_tov); |
1207 | fc_fcp_unlock_pkt(fsp); | 1246 | fc_fcp_unlock_pkt(fsp); |
1208 | } | 1247 | } |
1209 | } | 1248 | } |
@@ -1282,27 +1321,27 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1282 | * | 1321 | * |
1283 | * scsi-eh will escalate for when either happens. | 1322 | * scsi-eh will escalate for when either happens. |
1284 | */ | 1323 | */ |
1285 | return; | 1324 | goto out; |
1286 | } | 1325 | } |
1287 | 1326 | ||
1288 | if (fc_fcp_lock_pkt(fsp)) | 1327 | if (fc_fcp_lock_pkt(fsp)) |
1289 | return; | 1328 | goto out; |
1290 | 1329 | ||
1291 | /* | 1330 | /* |
1292 | * raced with eh timeout handler. | 1331 | * raced with eh timeout handler. |
1293 | */ | 1332 | */ |
1294 | if (!fsp->seq_ptr || !fsp->wait_for_comp) { | 1333 | if (!fsp->seq_ptr || !fsp->wait_for_comp) |
1295 | spin_unlock_bh(&fsp->scsi_pkt_lock); | 1334 | goto out_unlock; |
1296 | return; | ||
1297 | } | ||
1298 | 1335 | ||
1299 | fh = fc_frame_header_get(fp); | 1336 | fh = fc_frame_header_get(fp); |
1300 | if (fh->fh_type != FC_TYPE_BLS) | 1337 | if (fh->fh_type != FC_TYPE_BLS) |
1301 | fc_fcp_resp(fsp, fp); | 1338 | fc_fcp_resp(fsp, fp); |
1302 | fsp->seq_ptr = NULL; | 1339 | fsp->seq_ptr = NULL; |
1303 | fsp->lp->tt.exch_done(seq); | 1340 | fsp->lp->tt.exch_done(seq); |
1304 | fc_frame_free(fp); | 1341 | out_unlock: |
1305 | fc_fcp_unlock_pkt(fsp); | 1342 | fc_fcp_unlock_pkt(fsp); |
1343 | out: | ||
1344 | fc_frame_free(fp); | ||
1306 | } | 1345 | } |
1307 | 1346 | ||
1308 | /** | 1347 | /** |
@@ -1341,13 +1380,10 @@ static void fc_fcp_timeout(unsigned long data) | |||
1341 | 1380 | ||
1342 | if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) | 1381 | if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) |
1343 | fc_fcp_rec(fsp); | 1382 | fc_fcp_rec(fsp); |
1344 | else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2), | ||
1345 | jiffies)) | ||
1346 | fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); | ||
1347 | else if (fsp->state & FC_SRB_RCV_STATUS) | 1383 | else if (fsp->state & FC_SRB_RCV_STATUS) |
1348 | fc_fcp_complete_locked(fsp); | 1384 | fc_fcp_complete_locked(fsp); |
1349 | else | 1385 | else |
1350 | fc_fcp_recovery(fsp); | 1386 | fc_fcp_recovery(fsp, FC_TIMED_OUT); |
1351 | fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; | 1387 | fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO; |
1352 | unlock: | 1388 | unlock: |
1353 | fc_fcp_unlock_pkt(fsp); | 1389 | fc_fcp_unlock_pkt(fsp); |
@@ -1373,6 +1409,7 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) | |||
1373 | fc_fcp_complete_locked(fsp); | 1409 | fc_fcp_complete_locked(fsp); |
1374 | return; | 1410 | return; |
1375 | } | 1411 | } |
1412 | |||
1376 | fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); | 1413 | fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec)); |
1377 | if (!fp) | 1414 | if (!fp) |
1378 | goto retry; | 1415 | goto retry; |
@@ -1383,15 +1420,15 @@ static void fc_fcp_rec(struct fc_fcp_pkt *fsp) | |||
1383 | FC_FCTL_REQ, 0); | 1420 | FC_FCTL_REQ, 0); |
1384 | if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, | 1421 | if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC, |
1385 | fc_fcp_rec_resp, fsp, | 1422 | fc_fcp_rec_resp, fsp, |
1386 | jiffies_to_msecs(FC_SCSI_REC_TOV))) { | 1423 | 2 * lport->r_a_tov)) { |
1387 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ | 1424 | fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */ |
1388 | return; | 1425 | return; |
1389 | } | 1426 | } |
1390 | retry: | 1427 | retry: |
1391 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | 1428 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) |
1392 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); | 1429 | fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); |
1393 | else | 1430 | else |
1394 | fc_fcp_recovery(fsp); | 1431 | fc_fcp_recovery(fsp, FC_TIMED_OUT); |
1395 | } | 1432 | } |
1396 | 1433 | ||
1397 | /** | 1434 | /** |
@@ -1445,7 +1482,6 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1445 | * making progress. | 1482 | * making progress. |
1446 | */ | 1483 | */ |
1447 | rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; | 1484 | rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED; |
1448 | fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT); | ||
1449 | break; | 1485 | break; |
1450 | case ELS_RJT_LOGIC: | 1486 | case ELS_RJT_LOGIC: |
1451 | case ELS_RJT_UNAB: | 1487 | case ELS_RJT_UNAB: |
@@ -1460,7 +1496,7 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1460 | fc_fcp_retry_cmd(fsp); | 1496 | fc_fcp_retry_cmd(fsp); |
1461 | break; | 1497 | break; |
1462 | } | 1498 | } |
1463 | fc_fcp_recovery(fsp); | 1499 | fc_fcp_recovery(fsp, FC_ERROR); |
1464 | break; | 1500 | break; |
1465 | } | 1501 | } |
1466 | } else if (opcode == ELS_LS_ACC) { | 1502 | } else if (opcode == ELS_LS_ACC) { |
@@ -1498,12 +1534,12 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1498 | } | 1534 | } |
1499 | fc_fcp_srr(fsp, r_ctl, offset); | 1535 | fc_fcp_srr(fsp, r_ctl, offset); |
1500 | } else if (e_stat & ESB_ST_SEQ_INIT) { | 1536 | } else if (e_stat & ESB_ST_SEQ_INIT) { |
1501 | 1537 | unsigned int rec_tov = get_fsp_rec_tov(fsp); | |
1502 | /* | 1538 | /* |
1503 | * The remote port has the initiative, so just | 1539 | * The remote port has the initiative, so just |
1504 | * keep waiting for it to complete. | 1540 | * keep waiting for it to complete. |
1505 | */ | 1541 | */ |
1506 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); | 1542 | fc_fcp_timer_set(fsp, rec_tov); |
1507 | } else { | 1543 | } else { |
1508 | 1544 | ||
1509 | /* | 1545 | /* |
@@ -1575,7 +1611,7 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1575 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | 1611 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) |
1576 | fc_fcp_rec(fsp); | 1612 | fc_fcp_rec(fsp); |
1577 | else | 1613 | else |
1578 | fc_fcp_recovery(fsp); | 1614 | fc_fcp_recovery(fsp, FC_ERROR); |
1579 | break; | 1615 | break; |
1580 | } | 1616 | } |
1581 | fc_fcp_unlock_pkt(fsp); | 1617 | fc_fcp_unlock_pkt(fsp); |
@@ -1587,9 +1623,9 @@ out: | |||
1587 | * fc_fcp_recovery() - Handler for fcp_pkt recovery | 1623 | * fc_fcp_recovery() - Handler for fcp_pkt recovery |
1588 | * @fsp: The FCP pkt that needs to be aborted | 1624 | * @fsp: The FCP pkt that needs to be aborted |
1589 | */ | 1625 | */ |
1590 | static void fc_fcp_recovery(struct fc_fcp_pkt *fsp) | 1626 | static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code) |
1591 | { | 1627 | { |
1592 | fsp->status_code = FC_CMD_RECOVERY; | 1628 | fsp->status_code = code; |
1593 | fsp->cdb_status = 0; | 1629 | fsp->cdb_status = 0; |
1594 | fsp->io_status = 0; | 1630 | fsp->io_status = 0; |
1595 | /* | 1631 | /* |
@@ -1616,6 +1652,7 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | |||
1616 | struct fcp_srr *srr; | 1652 | struct fcp_srr *srr; |
1617 | struct fc_frame *fp; | 1653 | struct fc_frame *fp; |
1618 | u8 cdb_op; | 1654 | u8 cdb_op; |
1655 | unsigned int rec_tov; | ||
1619 | 1656 | ||
1620 | rport = fsp->rport; | 1657 | rport = fsp->rport; |
1621 | rpriv = rport->dd_data; | 1658 | rpriv = rport->dd_data; |
@@ -1640,8 +1677,9 @@ static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset) | |||
1640 | rpriv->local_port->port_id, FC_TYPE_FCP, | 1677 | rpriv->local_port->port_id, FC_TYPE_FCP, |
1641 | FC_FCTL_REQ, 0); | 1678 | FC_FCTL_REQ, 0); |
1642 | 1679 | ||
1680 | rec_tov = get_fsp_rec_tov(fsp); | ||
1643 | seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, | 1681 | seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL, |
1644 | fsp, jiffies_to_msecs(FC_SCSI_REC_TOV)); | 1682 | fsp, jiffies_to_msecs(rec_tov)); |
1645 | if (!seq) | 1683 | if (!seq) |
1646 | goto retry; | 1684 | goto retry; |
1647 | 1685 | ||
@@ -1665,6 +1703,7 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1665 | { | 1703 | { |
1666 | struct fc_fcp_pkt *fsp = arg; | 1704 | struct fc_fcp_pkt *fsp = arg; |
1667 | struct fc_frame_header *fh; | 1705 | struct fc_frame_header *fh; |
1706 | unsigned int rec_tov; | ||
1668 | 1707 | ||
1669 | if (IS_ERR(fp)) { | 1708 | if (IS_ERR(fp)) { |
1670 | fc_fcp_srr_error(fsp, fp); | 1709 | fc_fcp_srr_error(fsp, fp); |
@@ -1691,11 +1730,12 @@ static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) | |||
1691 | switch (fc_frame_payload_op(fp)) { | 1730 | switch (fc_frame_payload_op(fp)) { |
1692 | case ELS_LS_ACC: | 1731 | case ELS_LS_ACC: |
1693 | fsp->recov_retry = 0; | 1732 | fsp->recov_retry = 0; |
1694 | fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV); | 1733 | rec_tov = get_fsp_rec_tov(fsp); |
1734 | fc_fcp_timer_set(fsp, rec_tov); | ||
1695 | break; | 1735 | break; |
1696 | case ELS_LS_RJT: | 1736 | case ELS_LS_RJT: |
1697 | default: | 1737 | default: |
1698 | fc_fcp_recovery(fsp); | 1738 | fc_fcp_recovery(fsp, FC_ERROR); |
1699 | break; | 1739 | break; |
1700 | } | 1740 | } |
1701 | fc_fcp_unlock_pkt(fsp); | 1741 | fc_fcp_unlock_pkt(fsp); |
@@ -1721,7 +1761,7 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) | |||
1721 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) | 1761 | if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) |
1722 | fc_fcp_rec(fsp); | 1762 | fc_fcp_rec(fsp); |
1723 | else | 1763 | else |
1724 | fc_fcp_recovery(fsp); | 1764 | fc_fcp_recovery(fsp, FC_TIMED_OUT); |
1725 | break; | 1765 | break; |
1726 | case -FC_EX_CLOSED: /* e.g., link failure */ | 1766 | case -FC_EX_CLOSED: /* e.g., link failure */ |
1727 | /* fall through */ | 1767 | /* fall through */ |
@@ -1820,19 +1860,17 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs | |||
1820 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { | 1860 | if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { |
1821 | fsp->req_flags = FC_SRB_READ; | 1861 | fsp->req_flags = FC_SRB_READ; |
1822 | stats->InputRequests++; | 1862 | stats->InputRequests++; |
1823 | stats->InputMegabytes = fsp->data_len; | 1863 | stats->InputBytes += fsp->data_len; |
1824 | } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { | 1864 | } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { |
1825 | fsp->req_flags = FC_SRB_WRITE; | 1865 | fsp->req_flags = FC_SRB_WRITE; |
1826 | stats->OutputRequests++; | 1866 | stats->OutputRequests++; |
1827 | stats->OutputMegabytes = fsp->data_len; | 1867 | stats->OutputBytes += fsp->data_len; |
1828 | } else { | 1868 | } else { |
1829 | fsp->req_flags = 0; | 1869 | fsp->req_flags = 0; |
1830 | stats->ControlRequests++; | 1870 | stats->ControlRequests++; |
1831 | } | 1871 | } |
1832 | put_cpu(); | 1872 | put_cpu(); |
1833 | 1873 | ||
1834 | fsp->tgt_flags = rpriv->flags; | ||
1835 | |||
1836 | init_timer(&fsp->timer); | 1874 | init_timer(&fsp->timer); |
1837 | fsp->timer.data = (unsigned long)fsp; | 1875 | fsp->timer.data = (unsigned long)fsp; |
1838 | 1876 | ||
@@ -1946,18 +1984,29 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) | |||
1946 | break; | 1984 | break; |
1947 | case FC_CMD_ABORTED: | 1985 | case FC_CMD_ABORTED: |
1948 | FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " | 1986 | FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " |
1949 | "due to FC_CMD_ABORTED\n"); | 1987 | "due to FC_CMD_ABORTED\n"); |
1950 | sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; | 1988 | sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; |
1951 | break; | 1989 | break; |
1952 | case FC_CMD_RECOVERY: | ||
1953 | sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; | ||
1954 | break; | ||
1955 | case FC_CMD_RESET: | 1990 | case FC_CMD_RESET: |
1991 | FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml " | ||
1992 | "due to FC_CMD_RESET\n"); | ||
1956 | sc_cmd->result = (DID_RESET << 16); | 1993 | sc_cmd->result = (DID_RESET << 16); |
1957 | break; | 1994 | break; |
1958 | case FC_HRD_ERROR: | 1995 | case FC_HRD_ERROR: |
1996 | FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml " | ||
1997 | "due to FC_HRD_ERROR\n"); | ||
1959 | sc_cmd->result = (DID_NO_CONNECT << 16); | 1998 | sc_cmd->result = (DID_NO_CONNECT << 16); |
1960 | break; | 1999 | break; |
2000 | case FC_CRC_ERROR: | ||
2001 | FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml " | ||
2002 | "due to FC_CRC_ERROR\n"); | ||
2003 | sc_cmd->result = (DID_PARITY << 16); | ||
2004 | break; | ||
2005 | case FC_TIMED_OUT: | ||
2006 | FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml " | ||
2007 | "due to FC_TIMED_OUT\n"); | ||
2008 | sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; | ||
2009 | break; | ||
1961 | default: | 2010 | default: |
1962 | FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " | 2011 | FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml " |
1963 | "due to unknown error\n"); | 2012 | "due to unknown error\n"); |
@@ -2004,7 +2053,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) | |||
2004 | fsp = CMD_SP(sc_cmd); | 2053 | fsp = CMD_SP(sc_cmd); |
2005 | if (!fsp) { | 2054 | if (!fsp) { |
2006 | /* command completed while scsi eh was setting up */ | 2055 | /* command completed while scsi eh was setting up */ |
2007 | spin_unlock_irqrestore(lport->host->host_lock, flags); | 2056 | spin_unlock_irqrestore(&si->scsi_queue_lock, flags); |
2008 | return SUCCESS; | 2057 | return SUCCESS; |
2009 | } | 2058 | } |
2010 | /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ | 2059 | /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ |
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index 16d2162dda1f..eea0c3541b71 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h | |||
@@ -66,9 +66,21 @@ extern unsigned int fc_debug_logging; | |||
66 | 66 | ||
67 | #define FC_FCP_DBG(pkt, fmt, args...) \ | 67 | #define FC_FCP_DBG(pkt, fmt, args...) \ |
68 | FC_CHECK_LOGGING(FC_FCP_LOGGING, \ | 68 | FC_CHECK_LOGGING(FC_FCP_LOGGING, \ |
69 | printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \ | 69 | { \ |
70 | if ((pkt)->seq_ptr) { \ | ||
71 | struct fc_exch *_ep = NULL; \ | ||
72 | _ep = fc_seq_exch((pkt)->seq_ptr); \ | ||
73 | printk(KERN_INFO "host%u: fcp: %6.6x: " \ | ||
74 | "xid %04x-%04x: " fmt, \ | ||
70 | (pkt)->lp->host->host_no, \ | 75 | (pkt)->lp->host->host_no, \ |
71 | pkt->rport->port_id, ##args)) | 76 | (pkt)->rport->port_id, \ |
77 | (_ep)->oxid, (_ep)->rxid, ##args); \ | ||
78 | } else { \ | ||
79 | printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \ | ||
80 | (pkt)->lp->host->host_no, \ | ||
81 | (pkt)->rport->port_id, ##args); \ | ||
82 | } \ | ||
83 | }) | ||
72 | 84 | ||
73 | #define FC_EXCH_DBG(exch, fmt, args...) \ | 85 | #define FC_EXCH_DBG(exch, fmt, args...) \ |
74 | FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ | 86 | FC_CHECK_LOGGING(FC_EXCH_LOGGING, \ |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 9be63edbf8fb..c5a10f94f845 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -288,6 +288,8 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) | |||
288 | struct fc_lport *lport = shost_priv(shost); | 288 | struct fc_lport *lport = shost_priv(shost); |
289 | struct timespec v0, v1; | 289 | struct timespec v0, v1; |
290 | unsigned int cpu; | 290 | unsigned int cpu; |
291 | u64 fcp_in_bytes = 0; | ||
292 | u64 fcp_out_bytes = 0; | ||
291 | 293 | ||
292 | fcoe_stats = &lport->host_stats; | 294 | fcoe_stats = &lport->host_stats; |
293 | memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); | 295 | memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); |
@@ -310,10 +312,12 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) | |||
310 | fcoe_stats->fcp_input_requests += stats->InputRequests; | 312 | fcoe_stats->fcp_input_requests += stats->InputRequests; |
311 | fcoe_stats->fcp_output_requests += stats->OutputRequests; | 313 | fcoe_stats->fcp_output_requests += stats->OutputRequests; |
312 | fcoe_stats->fcp_control_requests += stats->ControlRequests; | 314 | fcoe_stats->fcp_control_requests += stats->ControlRequests; |
313 | fcoe_stats->fcp_input_megabytes += stats->InputMegabytes; | 315 | fcp_in_bytes += stats->InputBytes; |
314 | fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes; | 316 | fcp_out_bytes += stats->OutputBytes; |
315 | fcoe_stats->link_failure_count += stats->LinkFailureCount; | 317 | fcoe_stats->link_failure_count += stats->LinkFailureCount; |
316 | } | 318 | } |
319 | fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); | ||
320 | fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); | ||
317 | fcoe_stats->lip_count = -1; | 321 | fcoe_stats->lip_count = -1; |
318 | fcoe_stats->nos_count = -1; | 322 | fcoe_stats->nos_count = -1; |
319 | fcoe_stats->loss_of_sync_count = -1; | 323 | fcoe_stats->loss_of_sync_count = -1; |
@@ -1703,8 +1707,10 @@ static int fc_lport_els_request(struct fc_bsg_job *job, | |||
1703 | info->sg = job->reply_payload.sg_list; | 1707 | info->sg = job->reply_payload.sg_list; |
1704 | 1708 | ||
1705 | if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, | 1709 | if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, |
1706 | NULL, info, tov)) | 1710 | NULL, info, tov)) { |
1711 | kfree(info); | ||
1707 | return -ECOMM; | 1712 | return -ECOMM; |
1713 | } | ||
1708 | return 0; | 1714 | return 0; |
1709 | } | 1715 | } |
1710 | 1716 | ||
@@ -1762,8 +1768,10 @@ static int fc_lport_ct_request(struct fc_bsg_job *job, | |||
1762 | info->sg = job->reply_payload.sg_list; | 1768 | info->sg = job->reply_payload.sg_list; |
1763 | 1769 | ||
1764 | if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, | 1770 | if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, |
1765 | NULL, info, tov)) | 1771 | NULL, info, tov)) { |
1772 | kfree(info); | ||
1766 | return -ECOMM; | 1773 | return -ECOMM; |
1774 | } | ||
1767 | return 0; | 1775 | return 0; |
1768 | } | 1776 | } |
1769 | 1777 | ||
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a84ef13ed74a..a7175adab32d 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -652,7 +652,7 @@ void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
652 | FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); | 652 | FC_RPORT_DBG(rdata, "Received a FLOGI %s\n", fc_els_resp_type(fp)); |
653 | 653 | ||
654 | if (fp == ERR_PTR(-FC_EX_CLOSED)) | 654 | if (fp == ERR_PTR(-FC_EX_CLOSED)) |
655 | return; | 655 | goto put; |
656 | 656 | ||
657 | mutex_lock(&rdata->rp_mutex); | 657 | mutex_lock(&rdata->rp_mutex); |
658 | 658 | ||
@@ -689,6 +689,7 @@ out: | |||
689 | fc_frame_free(fp); | 689 | fc_frame_free(fp); |
690 | err: | 690 | err: |
691 | mutex_unlock(&rdata->rp_mutex); | 691 | mutex_unlock(&rdata->rp_mutex); |
692 | put: | ||
692 | kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); | 693 | kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); |
693 | return; | 694 | return; |
694 | bad: | 695 | bad: |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c15fde808c33..da8b61543ee4 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -505,6 +505,7 @@ static void iscsi_free_task(struct iscsi_task *task) | |||
505 | struct iscsi_conn *conn = task->conn; | 505 | struct iscsi_conn *conn = task->conn; |
506 | struct iscsi_session *session = conn->session; | 506 | struct iscsi_session *session = conn->session; |
507 | struct scsi_cmnd *sc = task->sc; | 507 | struct scsi_cmnd *sc = task->sc; |
508 | int oldstate = task->state; | ||
508 | 509 | ||
509 | ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", | 510 | ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", |
510 | task->itt, task->state, task->sc); | 511 | task->itt, task->state, task->sc); |
@@ -525,10 +526,10 @@ static void iscsi_free_task(struct iscsi_task *task) | |||
525 | /* SCSI eh reuses commands to verify us */ | 526 | /* SCSI eh reuses commands to verify us */ |
526 | sc->SCp.ptr = NULL; | 527 | sc->SCp.ptr = NULL; |
527 | /* | 528 | /* |
528 | * queue command may call this to free the task, but | 529 | * queue command may call this to free the task, so |
529 | * not have setup the sc callback | 530 | * it will decide how to return sc to scsi-ml. |
530 | */ | 531 | */ |
531 | if (sc->scsi_done) | 532 | if (oldstate != ISCSI_TASK_REQUEUE_SCSIQ) |
532 | sc->scsi_done(sc); | 533 | sc->scsi_done(sc); |
533 | } | 534 | } |
534 | } | 535 | } |
@@ -539,11 +540,12 @@ void __iscsi_get_task(struct iscsi_task *task) | |||
539 | } | 540 | } |
540 | EXPORT_SYMBOL_GPL(__iscsi_get_task); | 541 | EXPORT_SYMBOL_GPL(__iscsi_get_task); |
541 | 542 | ||
542 | static void __iscsi_put_task(struct iscsi_task *task) | 543 | void __iscsi_put_task(struct iscsi_task *task) |
543 | { | 544 | { |
544 | if (atomic_dec_and_test(&task->refcount)) | 545 | if (atomic_dec_and_test(&task->refcount)) |
545 | iscsi_free_task(task); | 546 | iscsi_free_task(task); |
546 | } | 547 | } |
548 | EXPORT_SYMBOL_GPL(__iscsi_put_task); | ||
547 | 549 | ||
548 | void iscsi_put_task(struct iscsi_task *task) | 550 | void iscsi_put_task(struct iscsi_task *task) |
549 | { | 551 | { |
@@ -571,7 +573,8 @@ static void iscsi_complete_task(struct iscsi_task *task, int state) | |||
571 | task->itt, task->state, task->sc); | 573 | task->itt, task->state, task->sc); |
572 | if (task->state == ISCSI_TASK_COMPLETED || | 574 | if (task->state == ISCSI_TASK_COMPLETED || |
573 | task->state == ISCSI_TASK_ABRT_TMF || | 575 | task->state == ISCSI_TASK_ABRT_TMF || |
574 | task->state == ISCSI_TASK_ABRT_SESS_RECOV) | 576 | task->state == ISCSI_TASK_ABRT_SESS_RECOV || |
577 | task->state == ISCSI_TASK_REQUEUE_SCSIQ) | ||
575 | return; | 578 | return; |
576 | WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); | 579 | WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); |
577 | task->state = state; | 580 | task->state = state; |
@@ -1335,17 +1338,16 @@ void iscsi_session_failure(struct iscsi_session *session, | |||
1335 | { | 1338 | { |
1336 | struct iscsi_conn *conn; | 1339 | struct iscsi_conn *conn; |
1337 | struct device *dev; | 1340 | struct device *dev; |
1338 | unsigned long flags; | ||
1339 | 1341 | ||
1340 | spin_lock_irqsave(&session->lock, flags); | 1342 | spin_lock_bh(&session->lock); |
1341 | conn = session->leadconn; | 1343 | conn = session->leadconn; |
1342 | if (session->state == ISCSI_STATE_TERMINATE || !conn) { | 1344 | if (session->state == ISCSI_STATE_TERMINATE || !conn) { |
1343 | spin_unlock_irqrestore(&session->lock, flags); | 1345 | spin_unlock_bh(&session->lock); |
1344 | return; | 1346 | return; |
1345 | } | 1347 | } |
1346 | 1348 | ||
1347 | dev = get_device(&conn->cls_conn->dev); | 1349 | dev = get_device(&conn->cls_conn->dev); |
1348 | spin_unlock_irqrestore(&session->lock, flags); | 1350 | spin_unlock_bh(&session->lock); |
1349 | if (!dev) | 1351 | if (!dev) |
1350 | return; | 1352 | return; |
1351 | /* | 1353 | /* |
@@ -1364,17 +1366,16 @@ EXPORT_SYMBOL_GPL(iscsi_session_failure); | |||
1364 | void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) | 1366 | void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err) |
1365 | { | 1367 | { |
1366 | struct iscsi_session *session = conn->session; | 1368 | struct iscsi_session *session = conn->session; |
1367 | unsigned long flags; | ||
1368 | 1369 | ||
1369 | spin_lock_irqsave(&session->lock, flags); | 1370 | spin_lock_bh(&session->lock); |
1370 | if (session->state == ISCSI_STATE_FAILED) { | 1371 | if (session->state == ISCSI_STATE_FAILED) { |
1371 | spin_unlock_irqrestore(&session->lock, flags); | 1372 | spin_unlock_bh(&session->lock); |
1372 | return; | 1373 | return; |
1373 | } | 1374 | } |
1374 | 1375 | ||
1375 | if (conn->stop_stage == 0) | 1376 | if (conn->stop_stage == 0) |
1376 | session->state = ISCSI_STATE_FAILED; | 1377 | session->state = ISCSI_STATE_FAILED; |
1377 | spin_unlock_irqrestore(&session->lock, flags); | 1378 | spin_unlock_bh(&session->lock); |
1378 | 1379 | ||
1379 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); | 1380 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); |
1380 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); | 1381 | set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); |
@@ -1599,27 +1600,23 @@ enum { | |||
1599 | FAILURE_SESSION_NOT_READY, | 1600 | FAILURE_SESSION_NOT_READY, |
1600 | }; | 1601 | }; |
1601 | 1602 | ||
1602 | static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) | 1603 | int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) |
1603 | { | 1604 | { |
1604 | struct iscsi_cls_session *cls_session; | 1605 | struct iscsi_cls_session *cls_session; |
1605 | struct Scsi_Host *host; | ||
1606 | struct iscsi_host *ihost; | 1606 | struct iscsi_host *ihost; |
1607 | int reason = 0; | 1607 | int reason = 0; |
1608 | struct iscsi_session *session; | 1608 | struct iscsi_session *session; |
1609 | struct iscsi_conn *conn; | 1609 | struct iscsi_conn *conn; |
1610 | struct iscsi_task *task = NULL; | 1610 | struct iscsi_task *task = NULL; |
1611 | 1611 | ||
1612 | sc->scsi_done = done; | ||
1613 | sc->result = 0; | 1612 | sc->result = 0; |
1614 | sc->SCp.ptr = NULL; | 1613 | sc->SCp.ptr = NULL; |
1615 | 1614 | ||
1616 | host = sc->device->host; | ||
1617 | ihost = shost_priv(host); | 1615 | ihost = shost_priv(host); |
1618 | spin_unlock(host->host_lock); | ||
1619 | 1616 | ||
1620 | cls_session = starget_to_session(scsi_target(sc->device)); | 1617 | cls_session = starget_to_session(scsi_target(sc->device)); |
1621 | session = cls_session->dd_data; | 1618 | session = cls_session->dd_data; |
1622 | spin_lock(&session->lock); | 1619 | spin_lock_bh(&session->lock); |
1623 | 1620 | ||
1624 | reason = iscsi_session_chkready(cls_session); | 1621 | reason = iscsi_session_chkready(cls_session); |
1625 | if (reason) { | 1622 | if (reason) { |
@@ -1705,25 +1702,21 @@ static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi | |||
1705 | } | 1702 | } |
1706 | 1703 | ||
1707 | session->queued_cmdsn++; | 1704 | session->queued_cmdsn++; |
1708 | spin_unlock(&session->lock); | 1705 | spin_unlock_bh(&session->lock); |
1709 | spin_lock(host->host_lock); | ||
1710 | return 0; | 1706 | return 0; |
1711 | 1707 | ||
1712 | prepd_reject: | 1708 | prepd_reject: |
1713 | sc->scsi_done = NULL; | 1709 | iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); |
1714 | iscsi_complete_task(task, ISCSI_TASK_COMPLETED); | ||
1715 | reject: | 1710 | reject: |
1716 | spin_unlock(&session->lock); | 1711 | spin_unlock_bh(&session->lock); |
1717 | ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", | 1712 | ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", |
1718 | sc->cmnd[0], reason); | 1713 | sc->cmnd[0], reason); |
1719 | spin_lock(host->host_lock); | ||
1720 | return SCSI_MLQUEUE_TARGET_BUSY; | 1714 | return SCSI_MLQUEUE_TARGET_BUSY; |
1721 | 1715 | ||
1722 | prepd_fault: | 1716 | prepd_fault: |
1723 | sc->scsi_done = NULL; | 1717 | iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); |
1724 | iscsi_complete_task(task, ISCSI_TASK_COMPLETED); | ||
1725 | fault: | 1718 | fault: |
1726 | spin_unlock(&session->lock); | 1719 | spin_unlock_bh(&session->lock); |
1727 | ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", | 1720 | ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", |
1728 | sc->cmnd[0], reason); | 1721 | sc->cmnd[0], reason); |
1729 | if (!scsi_bidi_cmnd(sc)) | 1722 | if (!scsi_bidi_cmnd(sc)) |
@@ -1732,12 +1725,9 @@ fault: | |||
1732 | scsi_out(sc)->resid = scsi_out(sc)->length; | 1725 | scsi_out(sc)->resid = scsi_out(sc)->length; |
1733 | scsi_in(sc)->resid = scsi_in(sc)->length; | 1726 | scsi_in(sc)->resid = scsi_in(sc)->length; |
1734 | } | 1727 | } |
1735 | done(sc); | 1728 | sc->scsi_done(sc); |
1736 | spin_lock(host->host_lock); | ||
1737 | return 0; | 1729 | return 0; |
1738 | } | 1730 | } |
1739 | |||
1740 | DEF_SCSI_QCMD(iscsi_queuecommand) | ||
1741 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); | 1731 | EXPORT_SYMBOL_GPL(iscsi_queuecommand); |
1742 | 1732 | ||
1743 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) | 1733 | int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) |
@@ -1795,9 +1785,9 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | |||
1795 | NULL, 0); | 1785 | NULL, 0); |
1796 | if (!task) { | 1786 | if (!task) { |
1797 | spin_unlock_bh(&session->lock); | 1787 | spin_unlock_bh(&session->lock); |
1788 | iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n"); | ||
1798 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 1789 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); |
1799 | spin_lock_bh(&session->lock); | 1790 | spin_lock_bh(&session->lock); |
1800 | ISCSI_DBG_EH(session, "tmf exec failure\n"); | ||
1801 | return -EPERM; | 1791 | return -EPERM; |
1802 | } | 1792 | } |
1803 | conn->tmfcmd_pdus_cnt++; | 1793 | conn->tmfcmd_pdus_cnt++; |
@@ -2202,7 +2192,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) | |||
2202 | goto success_unlocked; | 2192 | goto success_unlocked; |
2203 | case TMF_TIMEDOUT: | 2193 | case TMF_TIMEDOUT: |
2204 | spin_unlock_bh(&session->lock); | 2194 | spin_unlock_bh(&session->lock); |
2205 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 2195 | iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); |
2206 | goto failed_unlocked; | 2196 | goto failed_unlocked; |
2207 | case TMF_NOT_FOUND: | 2197 | case TMF_NOT_FOUND: |
2208 | if (!sc->SCp.ptr) { | 2198 | if (!sc->SCp.ptr) { |
@@ -2289,7 +2279,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
2289 | break; | 2279 | break; |
2290 | case TMF_TIMEDOUT: | 2280 | case TMF_TIMEDOUT: |
2291 | spin_unlock_bh(&session->lock); | 2281 | spin_unlock_bh(&session->lock); |
2292 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 2282 | iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); |
2293 | goto done; | 2283 | goto done; |
2294 | default: | 2284 | default: |
2295 | conn->tmf_state = TMF_INITIAL; | 2285 | conn->tmf_state = TMF_INITIAL; |
@@ -2370,7 +2360,7 @@ failed: | |||
2370 | * we drop the lock here but the leadconn cannot be destoyed while | 2360 | * we drop the lock here but the leadconn cannot be destoyed while |
2371 | * we are in the scsi eh | 2361 | * we are in the scsi eh |
2372 | */ | 2362 | */ |
2373 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 2363 | iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); |
2374 | 2364 | ||
2375 | ISCSI_DBG_EH(session, "wait for relogin\n"); | 2365 | ISCSI_DBG_EH(session, "wait for relogin\n"); |
2376 | wait_event_interruptible(conn->ehwait, | 2366 | wait_event_interruptible(conn->ehwait, |
@@ -2452,7 +2442,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc) | |||
2452 | break; | 2442 | break; |
2453 | case TMF_TIMEDOUT: | 2443 | case TMF_TIMEDOUT: |
2454 | spin_unlock_bh(&session->lock); | 2444 | spin_unlock_bh(&session->lock); |
2455 | iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); | 2445 | iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST); |
2456 | goto done; | 2446 | goto done; |
2457 | default: | 2447 | default: |
2458 | conn->tmf_state = TMF_INITIAL; | 2448 | conn->tmf_state = TMF_INITIAL; |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index fe8b74c706d2..5257fdfe699a 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -28,6 +28,17 @@ | |||
28 | #include <scsi/scsi_transport_sas.h> | 28 | #include <scsi/scsi_transport_sas.h> |
29 | #include "../scsi_sas_internal.h" | 29 | #include "../scsi_sas_internal.h" |
30 | 30 | ||
31 | static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy) | ||
32 | { | ||
33 | struct sas_ha_struct *sas_ha = phy->ha; | ||
34 | |||
35 | if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, | ||
36 | SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports && | ||
37 | memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0)) | ||
38 | return false; | ||
39 | return true; | ||
40 | } | ||
41 | |||
31 | /** | 42 | /** |
32 | * sas_form_port -- add this phy to a port | 43 | * sas_form_port -- add this phy to a port |
33 | * @phy: the phy of interest | 44 | * @phy: the phy of interest |
@@ -45,8 +56,7 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
45 | unsigned long flags; | 56 | unsigned long flags; |
46 | 57 | ||
47 | if (port) { | 58 | if (port) { |
48 | if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, | 59 | if (!phy_is_wideport_member(port, phy)) |
49 | SAS_ADDR_SIZE) != 0) | ||
50 | sas_deform_port(phy); | 60 | sas_deform_port(phy); |
51 | else { | 61 | else { |
52 | SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", | 62 | SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", |
@@ -62,9 +72,7 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
62 | port = sas_ha->sas_port[i]; | 72 | port = sas_ha->sas_port[i]; |
63 | spin_lock(&port->phy_list_lock); | 73 | spin_lock(&port->phy_list_lock); |
64 | if (*(u64 *) port->sas_addr && | 74 | if (*(u64 *) port->sas_addr && |
65 | memcmp(port->attached_sas_addr, | 75 | phy_is_wideport_member(port, phy) && port->num_phys > 0) { |
66 | phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 && | ||
67 | port->num_phys > 0) { | ||
68 | /* wide port */ | 76 | /* wide port */ |
69 | SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, | 77 | SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, |
70 | port->id); | 78 | port->id); |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 196de40b906c..746dd3d7a092 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -464,12 +464,29 @@ struct unsol_rcv_ct_ctx { | |||
464 | #define UNSOL_VALID 0x00000001 | 464 | #define UNSOL_VALID 0x00000001 |
465 | }; | 465 | }; |
466 | 466 | ||
467 | #define LPFC_USER_LINK_SPEED_AUTO 0 /* auto select (default)*/ | ||
468 | #define LPFC_USER_LINK_SPEED_1G 1 /* 1 Gigabaud */ | ||
469 | #define LPFC_USER_LINK_SPEED_2G 2 /* 2 Gigabaud */ | ||
470 | #define LPFC_USER_LINK_SPEED_4G 4 /* 4 Gigabaud */ | ||
471 | #define LPFC_USER_LINK_SPEED_8G 8 /* 8 Gigabaud */ | ||
472 | #define LPFC_USER_LINK_SPEED_10G 10 /* 10 Gigabaud */ | ||
473 | #define LPFC_USER_LINK_SPEED_16G 16 /* 16 Gigabaud */ | ||
474 | #define LPFC_USER_LINK_SPEED_MAX LPFC_USER_LINK_SPEED_16G | ||
475 | #define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \ | ||
476 | (1 << LPFC_USER_LINK_SPEED_10G) | \ | ||
477 | (1 << LPFC_USER_LINK_SPEED_8G) | \ | ||
478 | (1 << LPFC_USER_LINK_SPEED_4G) | \ | ||
479 | (1 << LPFC_USER_LINK_SPEED_2G) | \ | ||
480 | (1 << LPFC_USER_LINK_SPEED_1G) | \ | ||
481 | (1 << LPFC_USER_LINK_SPEED_AUTO)) | ||
482 | #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16" | ||
483 | |||
467 | struct lpfc_hba { | 484 | struct lpfc_hba { |
468 | /* SCSI interface function jump table entries */ | 485 | /* SCSI interface function jump table entries */ |
469 | int (*lpfc_new_scsi_buf) | 486 | int (*lpfc_new_scsi_buf) |
470 | (struct lpfc_vport *, int); | 487 | (struct lpfc_vport *, int); |
471 | struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) | 488 | struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) |
472 | (struct lpfc_hba *); | 489 | (struct lpfc_hba *, struct lpfc_nodelist *); |
473 | int (*lpfc_scsi_prep_dma_buf) | 490 | int (*lpfc_scsi_prep_dma_buf) |
474 | (struct lpfc_hba *, struct lpfc_scsi_buf *); | 491 | (struct lpfc_hba *, struct lpfc_scsi_buf *); |
475 | void (*lpfc_scsi_unprep_dma_buf) | 492 | void (*lpfc_scsi_unprep_dma_buf) |
@@ -545,7 +562,7 @@ struct lpfc_hba { | |||
545 | uint32_t hba_flag; /* hba generic flags */ | 562 | uint32_t hba_flag; /* hba generic flags */ |
546 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 563 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
547 | #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ | 564 | #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ |
548 | #define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ | 565 | #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ |
549 | #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ | 566 | #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ |
550 | #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ | 567 | #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ |
551 | #define FCP_XRI_ABORT_EVENT 0x20 | 568 | #define FCP_XRI_ABORT_EVENT 0x20 |
@@ -557,6 +574,7 @@ struct lpfc_hba { | |||
557 | #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ | 574 | #define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ |
558 | #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ | 575 | #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ |
559 | #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ | 576 | #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ |
577 | #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ | ||
560 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ | 578 | uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ |
561 | struct lpfc_dmabuf slim2p; | 579 | struct lpfc_dmabuf slim2p; |
562 | 580 | ||
@@ -606,6 +624,7 @@ struct lpfc_hba { | |||
606 | /* HBA Config Parameters */ | 624 | /* HBA Config Parameters */ |
607 | uint32_t cfg_ack0; | 625 | uint32_t cfg_ack0; |
608 | uint32_t cfg_enable_npiv; | 626 | uint32_t cfg_enable_npiv; |
627 | uint32_t cfg_enable_rrq; | ||
609 | uint32_t cfg_topology; | 628 | uint32_t cfg_topology; |
610 | uint32_t cfg_link_speed; | 629 | uint32_t cfg_link_speed; |
611 | uint32_t cfg_cr_delay; | 630 | uint32_t cfg_cr_delay; |
@@ -716,6 +735,7 @@ struct lpfc_hba { | |||
716 | uint32_t total_scsi_bufs; | 735 | uint32_t total_scsi_bufs; |
717 | struct list_head lpfc_iocb_list; | 736 | struct list_head lpfc_iocb_list; |
718 | uint32_t total_iocbq_bufs; | 737 | uint32_t total_iocbq_bufs; |
738 | struct list_head active_rrq_list; | ||
719 | spinlock_t hbalock; | 739 | spinlock_t hbalock; |
720 | 740 | ||
721 | /* pci_mem_pools */ | 741 | /* pci_mem_pools */ |
@@ -728,6 +748,7 @@ struct lpfc_hba { | |||
728 | 748 | ||
729 | mempool_t *mbox_mem_pool; | 749 | mempool_t *mbox_mem_pool; |
730 | mempool_t *nlp_mem_pool; | 750 | mempool_t *nlp_mem_pool; |
751 | mempool_t *rrq_pool; | ||
731 | 752 | ||
732 | struct fc_host_statistics link_stats; | 753 | struct fc_host_statistics link_stats; |
733 | enum intr_type_t intr_type; | 754 | enum intr_type_t intr_type; |
@@ -784,6 +805,7 @@ struct lpfc_hba { | |||
784 | unsigned long skipped_hb; | 805 | unsigned long skipped_hb; |
785 | struct timer_list hb_tmofunc; | 806 | struct timer_list hb_tmofunc; |
786 | uint8_t hb_outstanding; | 807 | uint8_t hb_outstanding; |
808 | struct timer_list rrq_tmr; | ||
787 | enum hba_temp_state over_temp_state; | 809 | enum hba_temp_state over_temp_state; |
788 | /* ndlp reference management */ | 810 | /* ndlp reference management */ |
789 | spinlock_t ndlp_lock; | 811 | spinlock_t ndlp_lock; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c1cbec01345d..c06491b5862f 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -52,10 +52,6 @@ | |||
52 | #define LPFC_MIN_DEVLOSS_TMO 1 | 52 | #define LPFC_MIN_DEVLOSS_TMO 1 |
53 | #define LPFC_MAX_DEVLOSS_TMO 255 | 53 | #define LPFC_MAX_DEVLOSS_TMO 255 |
54 | 54 | ||
55 | #define LPFC_MAX_LINK_SPEED 8 | ||
56 | #define LPFC_LINK_SPEED_BITMAP 0x00000117 | ||
57 | #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8" | ||
58 | |||
59 | /** | 55 | /** |
60 | * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules | 56 | * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules |
61 | * @incr: integer to convert. | 57 | * @incr: integer to convert. |
@@ -463,7 +459,7 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr, | |||
463 | if (phba->sli.sli_flag & LPFC_MENLO_MAINT) | 459 | if (phba->sli.sli_flag & LPFC_MENLO_MAINT) |
464 | len += snprintf(buf + len, PAGE_SIZE-len, | 460 | len += snprintf(buf + len, PAGE_SIZE-len, |
465 | " Menlo Maint Mode\n"); | 461 | " Menlo Maint Mode\n"); |
466 | else if (phba->fc_topology == TOPOLOGY_LOOP) { | 462 | else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
467 | if (vport->fc_flag & FC_PUBLIC_LOOP) | 463 | if (vport->fc_flag & FC_PUBLIC_LOOP) |
468 | len += snprintf(buf + len, PAGE_SIZE-len, | 464 | len += snprintf(buf + len, PAGE_SIZE-len, |
469 | " Public Loop\n"); | 465 | " Public Loop\n"); |
@@ -1981,6 +1977,13 @@ lpfc_param_show(enable_npiv); | |||
1981 | lpfc_param_init(enable_npiv, 1, 0, 1); | 1977 | lpfc_param_init(enable_npiv, 1, 0, 1); |
1982 | static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); | 1978 | static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); |
1983 | 1979 | ||
1980 | int lpfc_enable_rrq; | ||
1981 | module_param(lpfc_enable_rrq, int, 0); | ||
1982 | MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); | ||
1983 | lpfc_param_show(enable_rrq); | ||
1984 | lpfc_param_init(enable_rrq, 0, 0, 1); | ||
1985 | static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL); | ||
1986 | |||
1984 | /* | 1987 | /* |
1985 | # lpfc_suppress_link_up: Bring link up at initialization | 1988 | # lpfc_suppress_link_up: Bring link up at initialization |
1986 | # 0x0 = bring link up (issue MBX_INIT_LINK) | 1989 | # 0x0 = bring link up (issue MBX_INIT_LINK) |
@@ -2837,14 +2840,8 @@ static struct bin_attribute sysfs_drvr_stat_data_attr = { | |||
2837 | /* | 2840 | /* |
2838 | # lpfc_link_speed: Link speed selection for initializing the Fibre Channel | 2841 | # lpfc_link_speed: Link speed selection for initializing the Fibre Channel |
2839 | # connection. | 2842 | # connection. |
2840 | # 0 = auto select (default) | 2843 | # Value range is [0,16]. Default value is 0. |
2841 | # 1 = 1 Gigabaud | ||
2842 | # 2 = 2 Gigabaud | ||
2843 | # 4 = 4 Gigabaud | ||
2844 | # 8 = 8 Gigabaud | ||
2845 | # Value range is [0,8]. Default value is 0. | ||
2846 | */ | 2844 | */ |
2847 | |||
2848 | /** | 2845 | /** |
2849 | * lpfc_link_speed_set - Set the adapters link speed | 2846 | * lpfc_link_speed_set - Set the adapters link speed |
2850 | * @phba: lpfc_hba pointer. | 2847 | * @phba: lpfc_hba pointer. |
@@ -2869,7 +2866,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, | |||
2869 | struct Scsi_Host *shost = class_to_shost(dev); | 2866 | struct Scsi_Host *shost = class_to_shost(dev); |
2870 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | 2867 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; |
2871 | struct lpfc_hba *phba = vport->phba; | 2868 | struct lpfc_hba *phba = vport->phba; |
2872 | int val = 0; | 2869 | int val = LPFC_USER_LINK_SPEED_AUTO; |
2873 | int nolip = 0; | 2870 | int nolip = 0; |
2874 | const char *val_buf = buf; | 2871 | const char *val_buf = buf; |
2875 | int err; | 2872 | int err; |
@@ -2885,15 +2882,20 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, | |||
2885 | if (sscanf(val_buf, "%i", &val) != 1) | 2882 | if (sscanf(val_buf, "%i", &val) != 1) |
2886 | return -EINVAL; | 2883 | return -EINVAL; |
2887 | 2884 | ||
2888 | if (((val == LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || | 2885 | if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) || |
2889 | ((val == LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || | 2886 | ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) || |
2890 | ((val == LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || | 2887 | ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) || |
2891 | ((val == LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || | 2888 | ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) || |
2892 | ((val == LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb))) | 2889 | ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) || |
2890 | ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { | ||
2891 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2892 | "2879 lpfc_link_speed attribute cannot be set " | ||
2893 | "to %d. Speed is not supported by this port.\n", | ||
2894 | val); | ||
2893 | return -EINVAL; | 2895 | return -EINVAL; |
2894 | 2896 | } | |
2895 | if ((val >= 0 && val <= 8) | 2897 | if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && |
2896 | && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { | 2898 | (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { |
2897 | prev_val = phba->cfg_link_speed; | 2899 | prev_val = phba->cfg_link_speed; |
2898 | phba->cfg_link_speed = val; | 2900 | phba->cfg_link_speed = val; |
2899 | if (nolip) | 2901 | if (nolip) |
@@ -2906,11 +2908,9 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, | |||
2906 | } else | 2908 | } else |
2907 | return strlen(buf); | 2909 | return strlen(buf); |
2908 | } | 2910 | } |
2909 | |||
2910 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2911 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2911 | "%d:0469 lpfc_link_speed attribute cannot be set to %d, " | 2912 | "0469 lpfc_link_speed attribute cannot be set to %d, " |
2912 | "allowed range is [0, 8]\n", | 2913 | "allowed values are ["LPFC_LINK_SPEED_STRING"]\n", val); |
2913 | phba->brd_no, val); | ||
2914 | return -EINVAL; | 2914 | return -EINVAL; |
2915 | } | 2915 | } |
2916 | 2916 | ||
@@ -2938,8 +2938,8 @@ lpfc_param_show(link_speed) | |||
2938 | static int | 2938 | static int |
2939 | lpfc_link_speed_init(struct lpfc_hba *phba, int val) | 2939 | lpfc_link_speed_init(struct lpfc_hba *phba, int val) |
2940 | { | 2940 | { |
2941 | if ((val >= 0 && val <= LPFC_MAX_LINK_SPEED) | 2941 | if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) && |
2942 | && (LPFC_LINK_SPEED_BITMAP & (1 << val))) { | 2942 | (LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) { |
2943 | phba->cfg_link_speed = val; | 2943 | phba->cfg_link_speed = val; |
2944 | return 0; | 2944 | return 0; |
2945 | } | 2945 | } |
@@ -2947,12 +2947,12 @@ lpfc_link_speed_init(struct lpfc_hba *phba, int val) | |||
2947 | "0405 lpfc_link_speed attribute cannot " | 2947 | "0405 lpfc_link_speed attribute cannot " |
2948 | "be set to %d, allowed values are " | 2948 | "be set to %d, allowed values are " |
2949 | "["LPFC_LINK_SPEED_STRING"]\n", val); | 2949 | "["LPFC_LINK_SPEED_STRING"]\n", val); |
2950 | phba->cfg_link_speed = 0; | 2950 | phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; |
2951 | return -EINVAL; | 2951 | return -EINVAL; |
2952 | } | 2952 | } |
2953 | 2953 | ||
2954 | static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, | 2954 | static DEVICE_ATTR(lpfc_link_speed, S_IRUGO | S_IWUSR, |
2955 | lpfc_link_speed_show, lpfc_link_speed_store); | 2955 | lpfc_link_speed_show, lpfc_link_speed_store); |
2956 | 2956 | ||
2957 | /* | 2957 | /* |
2958 | # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) | 2958 | # lpfc_aer_support: Support PCIe device Advanced Error Reporting (AER) |
@@ -3305,12 +3305,12 @@ LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, | |||
3305 | LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); | 3305 | LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver."); |
3306 | 3306 | ||
3307 | /* | 3307 | /* |
3308 | # lpfc_enable_hba_heartbeat: Enable HBA heartbeat timer.. | 3308 | # lpfc_enable_hba_heartbeat: Disable HBA heartbeat timer.. |
3309 | # 0 = HBA Heartbeat disabled | 3309 | # 0 = HBA Heartbeat disabled |
3310 | # 1 = HBA Heartbeat enabled (default) | 3310 | # 1 = HBA Heartbeat enabled (default) |
3311 | # Value range is [0,1]. Default value is 1. | 3311 | # Value range is [0,1]. Default value is 1. |
3312 | */ | 3312 | */ |
3313 | LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); | 3313 | LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat."); |
3314 | 3314 | ||
3315 | /* | 3315 | /* |
3316 | # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) | 3316 | # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) |
@@ -3401,6 +3401,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
3401 | &dev_attr_lpfc_fdmi_on, | 3401 | &dev_attr_lpfc_fdmi_on, |
3402 | &dev_attr_lpfc_max_luns, | 3402 | &dev_attr_lpfc_max_luns, |
3403 | &dev_attr_lpfc_enable_npiv, | 3403 | &dev_attr_lpfc_enable_npiv, |
3404 | &dev_attr_lpfc_enable_rrq, | ||
3404 | &dev_attr_nport_evt_cnt, | 3405 | &dev_attr_nport_evt_cnt, |
3405 | &dev_attr_board_mode, | 3406 | &dev_attr_board_mode, |
3406 | &dev_attr_max_vpi, | 3407 | &dev_attr_max_vpi, |
@@ -3798,8 +3799,7 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, | |||
3798 | } | 3799 | } |
3799 | break; | 3800 | break; |
3800 | case MBX_READ_SPARM64: | 3801 | case MBX_READ_SPARM64: |
3801 | case MBX_READ_LA: | 3802 | case MBX_READ_TOPOLOGY: |
3802 | case MBX_READ_LA64: | ||
3803 | case MBX_REG_LOGIN: | 3803 | case MBX_REG_LOGIN: |
3804 | case MBX_REG_LOGIN64: | 3804 | case MBX_REG_LOGIN64: |
3805 | case MBX_CONFIG_PORT: | 3805 | case MBX_CONFIG_PORT: |
@@ -3989,7 +3989,7 @@ lpfc_get_host_port_type(struct Scsi_Host *shost) | |||
3989 | if (vport->port_type == LPFC_NPIV_PORT) { | 3989 | if (vport->port_type == LPFC_NPIV_PORT) { |
3990 | fc_host_port_type(shost) = FC_PORTTYPE_NPIV; | 3990 | fc_host_port_type(shost) = FC_PORTTYPE_NPIV; |
3991 | } else if (lpfc_is_link_up(phba)) { | 3991 | } else if (lpfc_is_link_up(phba)) { |
3992 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 3992 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3993 | if (vport->fc_flag & FC_PUBLIC_LOOP) | 3993 | if (vport->fc_flag & FC_PUBLIC_LOOP) |
3994 | fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; | 3994 | fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; |
3995 | else | 3995 | else |
@@ -4058,23 +4058,26 @@ lpfc_get_host_speed(struct Scsi_Host *shost) | |||
4058 | 4058 | ||
4059 | if (lpfc_is_link_up(phba)) { | 4059 | if (lpfc_is_link_up(phba)) { |
4060 | switch(phba->fc_linkspeed) { | 4060 | switch(phba->fc_linkspeed) { |
4061 | case LA_1GHZ_LINK: | 4061 | case LPFC_LINK_SPEED_1GHZ: |
4062 | fc_host_speed(shost) = FC_PORTSPEED_1GBIT; | 4062 | fc_host_speed(shost) = FC_PORTSPEED_1GBIT; |
4063 | break; | 4063 | break; |
4064 | case LA_2GHZ_LINK: | 4064 | case LPFC_LINK_SPEED_2GHZ: |
4065 | fc_host_speed(shost) = FC_PORTSPEED_2GBIT; | 4065 | fc_host_speed(shost) = FC_PORTSPEED_2GBIT; |
4066 | break; | 4066 | break; |
4067 | case LA_4GHZ_LINK: | 4067 | case LPFC_LINK_SPEED_4GHZ: |
4068 | fc_host_speed(shost) = FC_PORTSPEED_4GBIT; | 4068 | fc_host_speed(shost) = FC_PORTSPEED_4GBIT; |
4069 | break; | 4069 | break; |
4070 | case LA_8GHZ_LINK: | 4070 | case LPFC_LINK_SPEED_8GHZ: |
4071 | fc_host_speed(shost) = FC_PORTSPEED_8GBIT; | 4071 | fc_host_speed(shost) = FC_PORTSPEED_8GBIT; |
4072 | break; | 4072 | break; |
4073 | case LA_10GHZ_LINK: | 4073 | case LPFC_LINK_SPEED_10GHZ: |
4074 | fc_host_speed(shost) = FC_PORTSPEED_10GBIT; | 4074 | fc_host_speed(shost) = FC_PORTSPEED_10GBIT; |
4075 | break; | 4075 | break; |
4076 | default: | 4076 | case LPFC_LINK_SPEED_16GHZ: |
4077 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | 4077 | fc_host_speed(shost) = FC_PORTSPEED_16GBIT; |
4078 | break; | ||
4079 | default: | ||
4080 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | ||
4078 | break; | 4081 | break; |
4079 | } | 4082 | } |
4080 | } else | 4083 | } else |
@@ -4097,7 +4100,7 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost) | |||
4097 | spin_lock_irq(shost->host_lock); | 4100 | spin_lock_irq(shost->host_lock); |
4098 | 4101 | ||
4099 | if ((vport->fc_flag & FC_FABRIC) || | 4102 | if ((vport->fc_flag & FC_FABRIC) || |
4100 | ((phba->fc_topology == TOPOLOGY_LOOP) && | 4103 | ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && |
4101 | (vport->fc_flag & FC_PUBLIC_LOOP))) | 4104 | (vport->fc_flag & FC_PUBLIC_LOOP))) |
4102 | node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); | 4105 | node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); |
4103 | else | 4106 | else |
@@ -4208,11 +4211,11 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
4208 | hs->invalid_crc_count -= lso->invalid_crc_count; | 4211 | hs->invalid_crc_count -= lso->invalid_crc_count; |
4209 | hs->error_frames -= lso->error_frames; | 4212 | hs->error_frames -= lso->error_frames; |
4210 | 4213 | ||
4211 | if (phba->hba_flag & HBA_FCOE_SUPPORT) { | 4214 | if (phba->hba_flag & HBA_FCOE_MODE) { |
4212 | hs->lip_count = -1; | 4215 | hs->lip_count = -1; |
4213 | hs->nos_count = (phba->link_events >> 1); | 4216 | hs->nos_count = (phba->link_events >> 1); |
4214 | hs->nos_count -= lso->link_events; | 4217 | hs->nos_count -= lso->link_events; |
4215 | } else if (phba->fc_topology == TOPOLOGY_LOOP) { | 4218 | } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
4216 | hs->lip_count = (phba->fc_eventTag >> 1); | 4219 | hs->lip_count = (phba->fc_eventTag >> 1); |
4217 | hs->lip_count -= lso->link_events; | 4220 | hs->lip_count -= lso->link_events; |
4218 | hs->nos_count = -1; | 4221 | hs->nos_count = -1; |
@@ -4303,7 +4306,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) | |||
4303 | lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; | 4306 | lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord; |
4304 | lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; | 4307 | lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt; |
4305 | lso->error_frames = pmb->un.varRdLnk.crcCnt; | 4308 | lso->error_frames = pmb->un.varRdLnk.crcCnt; |
4306 | if (phba->hba_flag & HBA_FCOE_SUPPORT) | 4309 | if (phba->hba_flag & HBA_FCOE_MODE) |
4307 | lso->link_events = (phba->link_events >> 1); | 4310 | lso->link_events = (phba->link_events >> 1); |
4308 | else | 4311 | else |
4309 | lso->link_events = (phba->fc_eventTag >> 1); | 4312 | lso->link_events = (phba->fc_eventTag >> 1); |
@@ -4615,6 +4618,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
4615 | lpfc_link_speed_init(phba, lpfc_link_speed); | 4618 | lpfc_link_speed_init(phba, lpfc_link_speed); |
4616 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); | 4619 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); |
4617 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); | 4620 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); |
4621 | lpfc_enable_rrq_init(phba, lpfc_enable_rrq); | ||
4618 | lpfc_use_msi_init(phba, lpfc_use_msi); | 4622 | lpfc_use_msi_init(phba, lpfc_use_msi); |
4619 | lpfc_fcp_imax_init(phba, lpfc_fcp_imax); | 4623 | lpfc_fcp_imax_init(phba, lpfc_fcp_imax); |
4620 | lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); | 4624 | lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 7260c3af555a..0dd43bb91618 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -162,7 +162,6 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | |||
162 | struct lpfc_iocbq *cmdiocbq, | 162 | struct lpfc_iocbq *cmdiocbq, |
163 | struct lpfc_iocbq *rspiocbq) | 163 | struct lpfc_iocbq *rspiocbq) |
164 | { | 164 | { |
165 | unsigned long iflags; | ||
166 | struct bsg_job_data *dd_data; | 165 | struct bsg_job_data *dd_data; |
167 | struct fc_bsg_job *job; | 166 | struct fc_bsg_job *job; |
168 | IOCB_t *rsp; | 167 | IOCB_t *rsp; |
@@ -173,9 +172,10 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | |||
173 | int rc = 0; | 172 | int rc = 0; |
174 | 173 | ||
175 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | 174 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
176 | dd_data = cmdiocbq->context1; | 175 | dd_data = cmdiocbq->context2; |
177 | if (!dd_data) { | 176 | if (!dd_data) { |
178 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 177 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
178 | lpfc_sli_release_iocbq(phba, cmdiocbq); | ||
179 | return; | 179 | return; |
180 | } | 180 | } |
181 | 181 | ||
@@ -183,17 +183,9 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | |||
183 | job = iocb->set_job; | 183 | job = iocb->set_job; |
184 | job->dd_data = NULL; /* so timeout handler does not reply */ | 184 | job->dd_data = NULL; /* so timeout handler does not reply */ |
185 | 185 | ||
186 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
187 | cmdiocbq->iocb_flag |= LPFC_IO_WAKE; | ||
188 | if (cmdiocbq->context2 && rspiocbq) | ||
189 | memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, | ||
190 | &rspiocbq->iocb, sizeof(IOCB_t)); | ||
191 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
192 | |||
193 | bmp = iocb->bmp; | 186 | bmp = iocb->bmp; |
194 | rspiocbq = iocb->rspiocbq; | ||
195 | rsp = &rspiocbq->iocb; | 187 | rsp = &rspiocbq->iocb; |
196 | ndlp = iocb->ndlp; | 188 | ndlp = cmdiocbq->context1; |
197 | 189 | ||
198 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, | 190 | pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, |
199 | job->request_payload.sg_cnt, DMA_TO_DEVICE); | 191 | job->request_payload.sg_cnt, DMA_TO_DEVICE); |
@@ -220,7 +212,6 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | |||
220 | rsp->un.genreq64.bdl.bdeSize; | 212 | rsp->un.genreq64.bdl.bdeSize; |
221 | 213 | ||
222 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 214 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
223 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
224 | lpfc_sli_release_iocbq(phba, cmdiocbq); | 215 | lpfc_sli_release_iocbq(phba, cmdiocbq); |
225 | lpfc_nlp_put(ndlp); | 216 | lpfc_nlp_put(ndlp); |
226 | kfree(bmp); | 217 | kfree(bmp); |
@@ -247,9 +238,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
247 | struct ulp_bde64 *bpl = NULL; | 238 | struct ulp_bde64 *bpl = NULL; |
248 | uint32_t timeout; | 239 | uint32_t timeout; |
249 | struct lpfc_iocbq *cmdiocbq = NULL; | 240 | struct lpfc_iocbq *cmdiocbq = NULL; |
250 | struct lpfc_iocbq *rspiocbq = NULL; | ||
251 | IOCB_t *cmd; | 241 | IOCB_t *cmd; |
252 | IOCB_t *rsp; | ||
253 | struct lpfc_dmabuf *bmp = NULL; | 242 | struct lpfc_dmabuf *bmp = NULL; |
254 | int request_nseg; | 243 | int request_nseg; |
255 | int reply_nseg; | 244 | int reply_nseg; |
@@ -296,17 +285,10 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
296 | } | 285 | } |
297 | 286 | ||
298 | cmd = &cmdiocbq->iocb; | 287 | cmd = &cmdiocbq->iocb; |
299 | rspiocbq = lpfc_sli_get_iocbq(phba); | ||
300 | if (!rspiocbq) { | ||
301 | rc = -ENOMEM; | ||
302 | goto free_cmdiocbq; | ||
303 | } | ||
304 | |||
305 | rsp = &rspiocbq->iocb; | ||
306 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); | 288 | bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); |
307 | if (!bmp->virt) { | 289 | if (!bmp->virt) { |
308 | rc = -ENOMEM; | 290 | rc = -ENOMEM; |
309 | goto free_rspiocbq; | 291 | goto free_cmdiocbq; |
310 | } | 292 | } |
311 | 293 | ||
312 | INIT_LIST_HEAD(&bmp->list); | 294 | INIT_LIST_HEAD(&bmp->list); |
@@ -358,14 +340,12 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
358 | cmd->ulpTimeout = timeout; | 340 | cmd->ulpTimeout = timeout; |
359 | 341 | ||
360 | cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; | 342 | cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp; |
361 | cmdiocbq->context1 = dd_data; | 343 | cmdiocbq->context1 = ndlp; |
362 | cmdiocbq->context2 = rspiocbq; | 344 | cmdiocbq->context2 = dd_data; |
363 | dd_data->type = TYPE_IOCB; | 345 | dd_data->type = TYPE_IOCB; |
364 | dd_data->context_un.iocb.cmdiocbq = cmdiocbq; | 346 | dd_data->context_un.iocb.cmdiocbq = cmdiocbq; |
365 | dd_data->context_un.iocb.rspiocbq = rspiocbq; | ||
366 | dd_data->context_un.iocb.set_job = job; | 347 | dd_data->context_un.iocb.set_job = job; |
367 | dd_data->context_un.iocb.bmp = bmp; | 348 | dd_data->context_un.iocb.bmp = bmp; |
368 | dd_data->context_un.iocb.ndlp = ndlp; | ||
369 | 349 | ||
370 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { | 350 | if (phba->cfg_poll & DISABLE_FCP_RING_INT) { |
371 | creg_val = readl(phba->HCregaddr); | 351 | creg_val = readl(phba->HCregaddr); |
@@ -391,8 +371,6 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
391 | 371 | ||
392 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); | 372 | lpfc_mbuf_free(phba, bmp->virt, bmp->phys); |
393 | 373 | ||
394 | free_rspiocbq: | ||
395 | lpfc_sli_release_iocbq(phba, rspiocbq); | ||
396 | free_cmdiocbq: | 374 | free_cmdiocbq: |
397 | lpfc_sli_release_iocbq(phba, cmdiocbq); | 375 | lpfc_sli_release_iocbq(phba, cmdiocbq); |
398 | free_bmp: | 376 | free_bmp: |
@@ -1220,7 +1198,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | |||
1220 | int rc = 0; | 1198 | int rc = 0; |
1221 | 1199 | ||
1222 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | 1200 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
1223 | dd_data = cmdiocbq->context1; | 1201 | dd_data = cmdiocbq->context2; |
1224 | /* normal completion and timeout crossed paths, already done */ | 1202 | /* normal completion and timeout crossed paths, already done */ |
1225 | if (!dd_data) { | 1203 | if (!dd_data) { |
1226 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 1204 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
@@ -1369,8 +1347,8 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1369 | ctiocb->context3 = bmp; | 1347 | ctiocb->context3 = bmp; |
1370 | 1348 | ||
1371 | ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; | 1349 | ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; |
1372 | ctiocb->context1 = dd_data; | 1350 | ctiocb->context2 = dd_data; |
1373 | ctiocb->context2 = NULL; | 1351 | ctiocb->context1 = ndlp; |
1374 | dd_data->type = TYPE_IOCB; | 1352 | dd_data->type = TYPE_IOCB; |
1375 | dd_data->context_un.iocb.cmdiocbq = ctiocb; | 1353 | dd_data->context_un.iocb.cmdiocbq = ctiocb; |
1376 | dd_data->context_un.iocb.rspiocbq = NULL; | 1354 | dd_data->context_un.iocb.rspiocbq = NULL; |
@@ -1641,7 +1619,7 @@ job_error: | |||
1641 | * This function obtains a remote port login id so the diag loopback test | 1619 | * This function obtains a remote port login id so the diag loopback test |
1642 | * can send and receive its own unsolicited CT command. | 1620 | * can send and receive its own unsolicited CT command. |
1643 | **/ | 1621 | **/ |
1644 | static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) | 1622 | static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi) |
1645 | { | 1623 | { |
1646 | LPFC_MBOXQ_t *mbox; | 1624 | LPFC_MBOXQ_t *mbox; |
1647 | struct lpfc_dmabuf *dmabuff; | 1625 | struct lpfc_dmabuf *dmabuff; |
@@ -1651,10 +1629,14 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) | |||
1651 | if (!mbox) | 1629 | if (!mbox) |
1652 | return -ENOMEM; | 1630 | return -ENOMEM; |
1653 | 1631 | ||
1632 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1633 | *rpi = lpfc_sli4_alloc_rpi(phba); | ||
1654 | status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, | 1634 | status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID, |
1655 | (uint8_t *)&phba->pport->fc_sparam, mbox, 0); | 1635 | (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi); |
1656 | if (status) { | 1636 | if (status) { |
1657 | mempool_free(mbox, phba->mbox_mem_pool); | 1637 | mempool_free(mbox, phba->mbox_mem_pool); |
1638 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1639 | lpfc_sli4_free_rpi(phba, *rpi); | ||
1658 | return -ENOMEM; | 1640 | return -ENOMEM; |
1659 | } | 1641 | } |
1660 | 1642 | ||
@@ -1668,6 +1650,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi) | |||
1668 | kfree(dmabuff); | 1650 | kfree(dmabuff); |
1669 | if (status != MBX_TIMEOUT) | 1651 | if (status != MBX_TIMEOUT) |
1670 | mempool_free(mbox, phba->mbox_mem_pool); | 1652 | mempool_free(mbox, phba->mbox_mem_pool); |
1653 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1654 | lpfc_sli4_free_rpi(phba, *rpi); | ||
1671 | return -ENODEV; | 1655 | return -ENODEV; |
1672 | } | 1656 | } |
1673 | 1657 | ||
@@ -1704,8 +1688,9 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) | |||
1704 | mempool_free(mbox, phba->mbox_mem_pool); | 1688 | mempool_free(mbox, phba->mbox_mem_pool); |
1705 | return -EIO; | 1689 | return -EIO; |
1706 | } | 1690 | } |
1707 | |||
1708 | mempool_free(mbox, phba->mbox_mem_pool); | 1691 | mempool_free(mbox, phba->mbox_mem_pool); |
1692 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1693 | lpfc_sli4_free_rpi(phba, rpi); | ||
1709 | return 0; | 1694 | return 0; |
1710 | } | 1695 | } |
1711 | 1696 | ||
@@ -2102,7 +2087,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) | |||
2102 | uint32_t size; | 2087 | uint32_t size; |
2103 | uint32_t full_size; | 2088 | uint32_t full_size; |
2104 | size_t segment_len = 0, segment_offset = 0, current_offset = 0; | 2089 | size_t segment_len = 0, segment_offset = 0, current_offset = 0; |
2105 | uint16_t rpi; | 2090 | uint16_t rpi = 0; |
2106 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; | 2091 | struct lpfc_iocbq *cmdiocbq, *rspiocbq; |
2107 | IOCB_t *cmd, *rsp; | 2092 | IOCB_t *cmd, *rsp; |
2108 | struct lpfc_sli_ct_request *ctreq; | 2093 | struct lpfc_sli_ct_request *ctreq; |
@@ -2162,7 +2147,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) | |||
2162 | goto loopback_test_exit; | 2147 | goto loopback_test_exit; |
2163 | } | 2148 | } |
2164 | 2149 | ||
2165 | if (size >= BUF_SZ_4K) { | 2150 | if (full_size >= BUF_SZ_4K) { |
2166 | /* | 2151 | /* |
2167 | * Allocate memory for ioctl data. If buffer is bigger than 64k, | 2152 | * Allocate memory for ioctl data. If buffer is bigger than 64k, |
2168 | * then we allocate 64k and re-use that buffer over and over to | 2153 | * then we allocate 64k and re-use that buffer over and over to |
@@ -2171,7 +2156,7 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) | |||
2171 | * problem with GET_FCPTARGETMAPPING... | 2156 | * problem with GET_FCPTARGETMAPPING... |
2172 | */ | 2157 | */ |
2173 | if (size <= (64 * 1024)) | 2158 | if (size <= (64 * 1024)) |
2174 | total_mem = size; | 2159 | total_mem = full_size; |
2175 | else | 2160 | else |
2176 | total_mem = 64 * 1024; | 2161 | total_mem = 64 * 1024; |
2177 | } else | 2162 | } else |
@@ -2189,7 +2174,6 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) | |||
2189 | sg_copy_to_buffer(job->request_payload.sg_list, | 2174 | sg_copy_to_buffer(job->request_payload.sg_list, |
2190 | job->request_payload.sg_cnt, | 2175 | job->request_payload.sg_cnt, |
2191 | ptr, size); | 2176 | ptr, size); |
2192 | |||
2193 | rc = lpfcdiag_loop_self_reg(phba, &rpi); | 2177 | rc = lpfcdiag_loop_self_reg(phba, &rpi); |
2194 | if (rc) | 2178 | if (rc) |
2195 | goto loopback_test_exit; | 2179 | goto loopback_test_exit; |
@@ -2601,12 +2585,11 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, | |||
2601 | phba->wait_4_mlo_maint_flg = 1; | 2585 | phba->wait_4_mlo_maint_flg = 1; |
2602 | } else if (mb->un.varWords[0] == SETVAR_MLORST) { | 2586 | } else if (mb->un.varWords[0] == SETVAR_MLORST) { |
2603 | phba->link_flag &= ~LS_LOOPBACK_MODE; | 2587 | phba->link_flag &= ~LS_LOOPBACK_MODE; |
2604 | phba->fc_topology = TOPOLOGY_PT_PT; | 2588 | phba->fc_topology = LPFC_TOPOLOGY_PT_PT; |
2605 | } | 2589 | } |
2606 | break; | 2590 | break; |
2607 | case MBX_READ_SPARM64: | 2591 | case MBX_READ_SPARM64: |
2608 | case MBX_READ_LA: | 2592 | case MBX_READ_TOPOLOGY: |
2609 | case MBX_READ_LA64: | ||
2610 | case MBX_REG_LOGIN: | 2593 | case MBX_REG_LOGIN: |
2611 | case MBX_REG_LOGIN64: | 2594 | case MBX_REG_LOGIN64: |
2612 | case MBX_CONFIG_PORT: | 2595 | case MBX_CONFIG_PORT: |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index a5f5a093a8a4..17fde522c84a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -31,7 +31,7 @@ void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
31 | void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 31 | void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
32 | 32 | ||
33 | void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); | 33 | void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *); |
34 | int lpfc_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *); | 34 | int lpfc_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *, struct lpfc_dmabuf *); |
35 | void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); | 35 | void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); |
36 | void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *); | 36 | void lpfc_issue_clear_la(struct lpfc_hba *, struct lpfc_vport *); |
37 | void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); | 37 | void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); |
@@ -40,7 +40,7 @@ int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); | |||
40 | void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); | 40 | void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); |
41 | void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); | 41 | void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); |
42 | int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, | 42 | int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, |
43 | LPFC_MBOXQ_t *, uint32_t); | 43 | LPFC_MBOXQ_t *, uint16_t); |
44 | void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); | 44 | void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); |
45 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 45 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
46 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 46 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
@@ -64,7 +64,7 @@ void lpfc_cleanup_pending_mbox(struct lpfc_vport *); | |||
64 | int lpfc_linkdown(struct lpfc_hba *); | 64 | int lpfc_linkdown(struct lpfc_hba *); |
65 | void lpfc_linkdown_port(struct lpfc_vport *); | 65 | void lpfc_linkdown_port(struct lpfc_vport *); |
66 | void lpfc_port_link_failure(struct lpfc_vport *); | 66 | void lpfc_port_link_failure(struct lpfc_vport *); |
67 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); | 67 | void lpfc_mbx_cmpl_read_topology(struct lpfc_hba *, LPFC_MBOXQ_t *); |
68 | void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 68 | void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
69 | void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); | 69 | void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); |
70 | void lpfc_retry_pport_discovery(struct lpfc_hba *); | 70 | void lpfc_retry_pport_discovery(struct lpfc_hba *); |
@@ -121,6 +121,7 @@ void lpfc_end_rscn(struct lpfc_vport *); | |||
121 | int lpfc_els_chk_latt(struct lpfc_vport *); | 121 | int lpfc_els_chk_latt(struct lpfc_vport *); |
122 | int lpfc_els_abort_flogi(struct lpfc_hba *); | 122 | int lpfc_els_abort_flogi(struct lpfc_hba *); |
123 | int lpfc_initial_flogi(struct lpfc_vport *); | 123 | int lpfc_initial_flogi(struct lpfc_vport *); |
124 | void lpfc_issue_init_vfi(struct lpfc_vport *); | ||
124 | int lpfc_initial_fdisc(struct lpfc_vport *); | 125 | int lpfc_initial_fdisc(struct lpfc_vport *); |
125 | int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); | 126 | int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t); |
126 | int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); | 127 | int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); |
@@ -415,5 +416,13 @@ struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *, | |||
415 | int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, | 416 | int __lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, |
416 | struct lpfc_iocbq *, uint32_t); | 417 | struct lpfc_iocbq *, uint32_t); |
417 | uint32_t lpfc_drain_txq(struct lpfc_hba *); | 418 | uint32_t lpfc_drain_txq(struct lpfc_hba *); |
418 | 419 | void lpfc_clr_rrq_active(struct lpfc_hba *, uint16_t, struct lpfc_node_rrq *); | |
419 | 420 | int lpfc_test_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t); | |
421 | void lpfc_handle_rrq_active(struct lpfc_hba *); | ||
422 | int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); | ||
423 | int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, | ||
424 | uint16_t, uint16_t, uint16_t); | ||
425 | void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); | ||
426 | void lpfc_cleanup_vports_rrqs(struct lpfc_vport *); | ||
427 | struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, | ||
428 | uint32_t); | ||
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 463b74902ac4..c004fa9a681e 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -48,14 +48,14 @@ | |||
48 | #include "lpfc_vport.h" | 48 | #include "lpfc_vport.h" |
49 | #include "lpfc_debugfs.h" | 49 | #include "lpfc_debugfs.h" |
50 | 50 | ||
51 | #define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver | 51 | /* FDMI Port Speed definitions */ |
52 | * incapable of reporting */ | 52 | #define HBA_PORTSPEED_1GBIT 0x0001 /* 1 GBit/sec */ |
53 | #define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */ | 53 | #define HBA_PORTSPEED_2GBIT 0x0002 /* 2 GBit/sec */ |
54 | #define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */ | 54 | #define HBA_PORTSPEED_4GBIT 0x0008 /* 4 GBit/sec */ |
55 | #define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */ | 55 | #define HBA_PORTSPEED_10GBIT 0x0004 /* 10 GBit/sec */ |
56 | #define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */ | 56 | #define HBA_PORTSPEED_8GBIT 0x0010 /* 8 GBit/sec */ |
57 | #define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */ | 57 | #define HBA_PORTSPEED_16GBIT 0x0020 /* 16 GBit/sec */ |
58 | #define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */ | 58 | #define HBA_PORTSPEED_UNKNOWN 0x0800 /* Unknown */ |
59 | 59 | ||
60 | #define FOURBYTES 4 | 60 | #define FOURBYTES 4 |
61 | 61 | ||
@@ -1593,8 +1593,10 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) | |||
1593 | ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); | 1593 | ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); |
1594 | 1594 | ||
1595 | ae->un.SupportSpeed = 0; | 1595 | ae->un.SupportSpeed = 0; |
1596 | if (phba->lmt & LMT_16Gb) | ||
1597 | ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT; | ||
1596 | if (phba->lmt & LMT_10Gb) | 1598 | if (phba->lmt & LMT_10Gb) |
1597 | ae->un.SupportSpeed = HBA_PORTSPEED_10GBIT; | 1599 | ae->un.SupportSpeed |= HBA_PORTSPEED_10GBIT; |
1598 | if (phba->lmt & LMT_8Gb) | 1600 | if (phba->lmt & LMT_8Gb) |
1599 | ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT; | 1601 | ae->un.SupportSpeed |= HBA_PORTSPEED_8GBIT; |
1600 | if (phba->lmt & LMT_4Gb) | 1602 | if (phba->lmt & LMT_4Gb) |
@@ -1612,24 +1614,26 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) | |||
1612 | ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED); | 1614 | ae->ad.bits.AttrType = be16_to_cpu(PORT_SPEED); |
1613 | ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); | 1615 | ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); |
1614 | switch(phba->fc_linkspeed) { | 1616 | switch(phba->fc_linkspeed) { |
1615 | case LA_1GHZ_LINK: | 1617 | case LPFC_LINK_SPEED_1GHZ: |
1616 | ae->un.PortSpeed = HBA_PORTSPEED_1GBIT; | 1618 | ae->un.PortSpeed = HBA_PORTSPEED_1GBIT; |
1617 | break; | 1619 | break; |
1618 | case LA_2GHZ_LINK: | 1620 | case LPFC_LINK_SPEED_2GHZ: |
1619 | ae->un.PortSpeed = HBA_PORTSPEED_2GBIT; | 1621 | ae->un.PortSpeed = HBA_PORTSPEED_2GBIT; |
1620 | break; | 1622 | break; |
1621 | case LA_4GHZ_LINK: | 1623 | case LPFC_LINK_SPEED_4GHZ: |
1622 | ae->un.PortSpeed = HBA_PORTSPEED_4GBIT; | 1624 | ae->un.PortSpeed = HBA_PORTSPEED_4GBIT; |
1623 | break; | 1625 | break; |
1624 | case LA_8GHZ_LINK: | 1626 | case LPFC_LINK_SPEED_8GHZ: |
1625 | ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; | 1627 | ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; |
1626 | break; | 1628 | break; |
1627 | case LA_10GHZ_LINK: | 1629 | case LPFC_LINK_SPEED_10GHZ: |
1628 | ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; | 1630 | ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; |
1629 | break; | 1631 | break; |
1630 | default: | 1632 | case LPFC_LINK_SPEED_16GHZ: |
1631 | ae->un.PortSpeed = | 1633 | ae->un.PortSpeed = HBA_PORTSPEED_16GBIT; |
1632 | HBA_PORTSPEED_UNKNOWN; | 1634 | break; |
1635 | default: | ||
1636 | ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN; | ||
1633 | break; | 1637 | break; |
1634 | } | 1638 | } |
1635 | pab->ab.EntryCnt++; | 1639 | pab->ab.EntryCnt++; |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 7cae69de36f7..1d84b63fccad 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -68,6 +68,12 @@ struct lpfc_fast_path_event { | |||
68 | } un; | 68 | } un; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | #define LPFC_SLI4_MAX_XRI 1024 /* Used to make the ndlp's xri_bitmap */ | ||
72 | #define XRI_BITMAP_ULONGS (LPFC_SLI4_MAX_XRI / BITS_PER_LONG) | ||
73 | struct lpfc_node_rrqs { | ||
74 | unsigned long xri_bitmap[XRI_BITMAP_ULONGS]; | ||
75 | }; | ||
76 | |||
71 | struct lpfc_nodelist { | 77 | struct lpfc_nodelist { |
72 | struct list_head nlp_listp; | 78 | struct list_head nlp_listp; |
73 | struct lpfc_name nlp_portname; | 79 | struct lpfc_name nlp_portname; |
@@ -110,8 +116,19 @@ struct lpfc_nodelist { | |||
110 | atomic_t cmd_pending; | 116 | atomic_t cmd_pending; |
111 | uint32_t cmd_qdepth; | 117 | uint32_t cmd_qdepth; |
112 | unsigned long last_change_time; | 118 | unsigned long last_change_time; |
119 | struct lpfc_node_rrqs active_rrqs; | ||
113 | struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ | 120 | struct lpfc_scsicmd_bkt *lat_data; /* Latency data */ |
114 | }; | 121 | }; |
122 | struct lpfc_node_rrq { | ||
123 | struct list_head list; | ||
124 | uint16_t xritag; | ||
125 | uint16_t send_rrq; | ||
126 | uint16_t rxid; | ||
127 | uint32_t nlp_DID; /* FC D_ID of entry */ | ||
128 | struct lpfc_vport *vport; | ||
129 | struct lpfc_nodelist *ndlp; | ||
130 | unsigned long rrq_stop_time; | ||
131 | }; | ||
115 | 132 | ||
116 | /* Defines for nlp_flag (uint32) */ | 133 | /* Defines for nlp_flag (uint32) */ |
117 | #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ | 134 | #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ |
@@ -136,7 +153,7 @@ struct lpfc_nodelist { | |||
136 | #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ | 153 | #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ |
137 | #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ | 154 | #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ |
138 | #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ | 155 | #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ |
139 | #define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */ | 156 | #define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ |
140 | 157 | ||
141 | /* ndlp usage management macros */ | 158 | /* ndlp usage management macros */ |
142 | #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ | 159 | #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 884f4d321799..c62d567cc845 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -375,7 +375,8 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) | |||
375 | err = 4; | 375 | err = 4; |
376 | goto fail; | 376 | goto fail; |
377 | } | 377 | } |
378 | rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); | 378 | rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, |
379 | ndlp->nlp_rpi); | ||
379 | if (rc) { | 380 | if (rc) { |
380 | err = 5; | 381 | err = 5; |
381 | goto fail_free_mbox; | 382 | goto fail_free_mbox; |
@@ -523,7 +524,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
523 | phba->fc_edtovResol = sp->cmn.edtovResolution; | 524 | phba->fc_edtovResol = sp->cmn.edtovResolution; |
524 | phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; | 525 | phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; |
525 | 526 | ||
526 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 527 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
527 | spin_lock_irq(shost->host_lock); | 528 | spin_lock_irq(shost->host_lock); |
528 | vport->fc_flag |= FC_PUBLIC_LOOP; | 529 | vport->fc_flag |= FC_PUBLIC_LOOP; |
529 | spin_unlock_irq(shost->host_lock); | 530 | spin_unlock_irq(shost->host_lock); |
@@ -832,6 +833,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
832 | if (lpfc_els_retry(phba, cmdiocb, rspiocb)) | 833 | if (lpfc_els_retry(phba, cmdiocb, rspiocb)) |
833 | goto out; | 834 | goto out; |
834 | 835 | ||
836 | /* FLOGI failure */ | ||
837 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
838 | "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", | ||
839 | irsp->ulpStatus, irsp->un.ulpWord[4], | ||
840 | irsp->ulpTimeout); | ||
841 | |||
835 | /* FLOGI failed, so there is no fabric */ | 842 | /* FLOGI failed, so there is no fabric */ |
836 | spin_lock_irq(shost->host_lock); | 843 | spin_lock_irq(shost->host_lock); |
837 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); | 844 | vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); |
@@ -843,13 +850,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
843 | */ | 850 | */ |
844 | if (phba->alpa_map[0] == 0) { | 851 | if (phba->alpa_map[0] == 0) { |
845 | vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; | 852 | vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; |
853 | if ((phba->sli_rev == LPFC_SLI_REV4) && | ||
854 | (!(vport->fc_flag & FC_VFI_REGISTERED) || | ||
855 | (vport->fc_prevDID != vport->fc_myDID))) { | ||
856 | if (vport->fc_flag & FC_VFI_REGISTERED) | ||
857 | lpfc_sli4_unreg_all_rpis(vport); | ||
858 | lpfc_issue_reg_vfi(vport); | ||
859 | lpfc_nlp_put(ndlp); | ||
860 | goto out; | ||
861 | } | ||
846 | } | 862 | } |
847 | |||
848 | /* FLOGI failure */ | ||
849 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
850 | "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", | ||
851 | irsp->ulpStatus, irsp->un.ulpWord[4], | ||
852 | irsp->ulpTimeout); | ||
853 | goto flogifail; | 863 | goto flogifail; |
854 | } | 864 | } |
855 | spin_lock_irq(shost->host_lock); | 865 | spin_lock_irq(shost->host_lock); |
@@ -879,7 +889,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
879 | */ | 889 | */ |
880 | if (sp->cmn.fPort) | 890 | if (sp->cmn.fPort) |
881 | rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); | 891 | rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); |
882 | else if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) | 892 | else if (!(phba->hba_flag & HBA_FCOE_MODE)) |
883 | rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); | 893 | rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); |
884 | else { | 894 | else { |
885 | lpfc_printf_vlog(vport, KERN_ERR, | 895 | lpfc_printf_vlog(vport, KERN_ERR, |
@@ -1014,7 +1024,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1014 | if (sp->cmn.fcphHigh < FC_PH3) | 1024 | if (sp->cmn.fcphHigh < FC_PH3) |
1015 | sp->cmn.fcphHigh = FC_PH3; | 1025 | sp->cmn.fcphHigh = FC_PH3; |
1016 | 1026 | ||
1017 | if (phba->sli_rev == LPFC_SLI_REV4) { | 1027 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
1028 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == | ||
1029 | LPFC_SLI_INTF_IF_TYPE_0)) { | ||
1018 | elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); | 1030 | elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); |
1019 | elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); | 1031 | elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); |
1020 | /* FLOGI needs to be 3 for WQE FCFI */ | 1032 | /* FLOGI needs to be 3 for WQE FCFI */ |
@@ -1027,7 +1039,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1027 | icmd->ulpCt_l = 0; | 1039 | icmd->ulpCt_l = 0; |
1028 | } | 1040 | } |
1029 | 1041 | ||
1030 | if (phba->fc_topology != TOPOLOGY_LOOP) { | 1042 | if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { |
1031 | icmd->un.elsreq64.myID = 0; | 1043 | icmd->un.elsreq64.myID = 0; |
1032 | icmd->un.elsreq64.fl = 1; | 1044 | icmd->un.elsreq64.fl = 1; |
1033 | } | 1045 | } |
@@ -1281,6 +1293,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
1281 | uint32_t rc, keepDID = 0; | 1293 | uint32_t rc, keepDID = 0; |
1282 | int put_node; | 1294 | int put_node; |
1283 | int put_rport; | 1295 | int put_rport; |
1296 | struct lpfc_node_rrqs rrq; | ||
1284 | 1297 | ||
1285 | /* Fabric nodes can have the same WWPN so we don't bother searching | 1298 | /* Fabric nodes can have the same WWPN so we don't bother searching |
1286 | * by WWPN. Just return the ndlp that was given to us. | 1299 | * by WWPN. Just return the ndlp that was given to us. |
@@ -1298,6 +1311,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
1298 | 1311 | ||
1299 | if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) | 1312 | if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) |
1300 | return ndlp; | 1313 | return ndlp; |
1314 | memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); | ||
1301 | 1315 | ||
1302 | if (!new_ndlp) { | 1316 | if (!new_ndlp) { |
1303 | rc = memcmp(&ndlp->nlp_portname, name, | 1317 | rc = memcmp(&ndlp->nlp_portname, name, |
@@ -1318,12 +1332,25 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
1318 | if (!new_ndlp) | 1332 | if (!new_ndlp) |
1319 | return ndlp; | 1333 | return ndlp; |
1320 | keepDID = new_ndlp->nlp_DID; | 1334 | keepDID = new_ndlp->nlp_DID; |
1321 | } else | 1335 | if (phba->sli_rev == LPFC_SLI_REV4) |
1336 | memcpy(&rrq.xri_bitmap, | ||
1337 | &new_ndlp->active_rrqs.xri_bitmap, | ||
1338 | sizeof(new_ndlp->active_rrqs.xri_bitmap)); | ||
1339 | } else { | ||
1322 | keepDID = new_ndlp->nlp_DID; | 1340 | keepDID = new_ndlp->nlp_DID; |
1341 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1342 | memcpy(&rrq.xri_bitmap, | ||
1343 | &new_ndlp->active_rrqs.xri_bitmap, | ||
1344 | sizeof(new_ndlp->active_rrqs.xri_bitmap)); | ||
1345 | } | ||
1323 | 1346 | ||
1324 | lpfc_unreg_rpi(vport, new_ndlp); | 1347 | lpfc_unreg_rpi(vport, new_ndlp); |
1325 | new_ndlp->nlp_DID = ndlp->nlp_DID; | 1348 | new_ndlp->nlp_DID = ndlp->nlp_DID; |
1326 | new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; | 1349 | new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; |
1350 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1351 | memcpy(new_ndlp->active_rrqs.xri_bitmap, | ||
1352 | &ndlp->active_rrqs.xri_bitmap, | ||
1353 | sizeof(ndlp->active_rrqs.xri_bitmap)); | ||
1327 | 1354 | ||
1328 | if (ndlp->nlp_flag & NLP_NPR_2B_DISC) | 1355 | if (ndlp->nlp_flag & NLP_NPR_2B_DISC) |
1329 | new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; | 1356 | new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; |
@@ -1362,12 +1389,20 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, | |||
1362 | 1389 | ||
1363 | /* Two ndlps cannot have the same did on the nodelist */ | 1390 | /* Two ndlps cannot have the same did on the nodelist */ |
1364 | ndlp->nlp_DID = keepDID; | 1391 | ndlp->nlp_DID = keepDID; |
1392 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1393 | memcpy(&ndlp->active_rrqs.xri_bitmap, | ||
1394 | &rrq.xri_bitmap, | ||
1395 | sizeof(ndlp->active_rrqs.xri_bitmap)); | ||
1365 | lpfc_drop_node(vport, ndlp); | 1396 | lpfc_drop_node(vport, ndlp); |
1366 | } | 1397 | } |
1367 | else { | 1398 | else { |
1368 | lpfc_unreg_rpi(vport, ndlp); | 1399 | lpfc_unreg_rpi(vport, ndlp); |
1369 | /* Two ndlps cannot have the same did */ | 1400 | /* Two ndlps cannot have the same did */ |
1370 | ndlp->nlp_DID = keepDID; | 1401 | ndlp->nlp_DID = keepDID; |
1402 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
1403 | memcpy(&ndlp->active_rrqs.xri_bitmap, | ||
1404 | &rrq.xri_bitmap, | ||
1405 | sizeof(ndlp->active_rrqs.xri_bitmap)); | ||
1371 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); | 1406 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); |
1372 | /* Since we are swapping the ndlp passed in with the new one | 1407 | /* Since we are swapping the ndlp passed in with the new one |
1373 | * and the did has already been swapped, copy over the | 1408 | * and the did has already been swapped, copy over the |
@@ -1428,6 +1463,73 @@ lpfc_end_rscn(struct lpfc_vport *vport) | |||
1428 | } | 1463 | } |
1429 | 1464 | ||
1430 | /** | 1465 | /** |
1466 | * lpfc_cmpl_els_rrq - Completion handled for els RRQs. | ||
1467 | * @phba: pointer to lpfc hba data structure. | ||
1468 | * @cmdiocb: pointer to lpfc command iocb data structure. | ||
1469 | * @rspiocb: pointer to lpfc response iocb data structure. | ||
1470 | * | ||
1471 | * This routine will call the clear rrq function to free the rrq and | ||
1472 | * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not | ||
1473 | * exist then the clear_rrq is still called because the rrq needs to | ||
1474 | * be freed. | ||
1475 | **/ | ||
1476 | |||
1477 | static void | ||
1478 | lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | ||
1479 | struct lpfc_iocbq *rspiocb) | ||
1480 | { | ||
1481 | struct lpfc_vport *vport = cmdiocb->vport; | ||
1482 | IOCB_t *irsp; | ||
1483 | struct lpfc_nodelist *ndlp; | ||
1484 | struct lpfc_node_rrq *rrq; | ||
1485 | |||
1486 | /* we pass cmdiocb to state machine which needs rspiocb as well */ | ||
1487 | rrq = cmdiocb->context_un.rrq; | ||
1488 | cmdiocb->context_un.rsp_iocb = rspiocb; | ||
1489 | |||
1490 | irsp = &rspiocb->iocb; | ||
1491 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, | ||
1492 | "RRQ cmpl: status:x%x/x%x did:x%x", | ||
1493 | irsp->ulpStatus, irsp->un.ulpWord[4], | ||
1494 | irsp->un.elsreq64.remoteID); | ||
1495 | |||
1496 | ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); | ||
1497 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { | ||
1498 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
1499 | "2882 RRQ completes to NPort x%x " | ||
1500 | "with no ndlp. Data: x%x x%x x%x\n", | ||
1501 | irsp->un.elsreq64.remoteID, | ||
1502 | irsp->ulpStatus, irsp->un.ulpWord[4], | ||
1503 | irsp->ulpIoTag); | ||
1504 | goto out; | ||
1505 | } | ||
1506 | |||
1507 | /* rrq completes to NPort <nlp_DID> */ | ||
1508 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, | ||
1509 | "2880 RRQ completes to NPort x%x " | ||
1510 | "Data: x%x x%x x%x x%x x%x\n", | ||
1511 | ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], | ||
1512 | irsp->ulpTimeout, rrq->xritag, rrq->rxid); | ||
1513 | |||
1514 | if (irsp->ulpStatus) { | ||
1515 | /* Check for retry */ | ||
1516 | /* RRQ failed Don't print the vport to vport rjts */ | ||
1517 | if (irsp->ulpStatus != IOSTAT_LS_RJT || | ||
1518 | (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && | ||
1519 | ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || | ||
1520 | (phba)->pport->cfg_log_verbose & LOG_ELS) | ||
1521 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | ||
1522 | "2881 RRQ failure DID:%06X Status:x%x/x%x\n", | ||
1523 | ndlp->nlp_DID, irsp->ulpStatus, | ||
1524 | irsp->un.ulpWord[4]); | ||
1525 | } | ||
1526 | out: | ||
1527 | if (rrq) | ||
1528 | lpfc_clr_rrq_active(phba, rrq->xritag, rrq); | ||
1529 | lpfc_els_free_iocb(phba, cmdiocb); | ||
1530 | return; | ||
1531 | } | ||
1532 | /** | ||
1431 | * lpfc_cmpl_els_plogi - Completion callback function for plogi | 1533 | * lpfc_cmpl_els_plogi - Completion callback function for plogi |
1432 | * @phba: pointer to lpfc hba data structure. | 1534 | * @phba: pointer to lpfc hba data structure. |
1433 | * @cmdiocb: pointer to lpfc command iocb data structure. | 1535 | * @cmdiocb: pointer to lpfc command iocb data structure. |
@@ -2722,7 +2824,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2722 | if (cmd == ELS_CMD_FLOGI) { | 2824 | if (cmd == ELS_CMD_FLOGI) { |
2723 | if (PCI_DEVICE_ID_HORNET == | 2825 | if (PCI_DEVICE_ID_HORNET == |
2724 | phba->pcidev->device) { | 2826 | phba->pcidev->device) { |
2725 | phba->fc_topology = TOPOLOGY_LOOP; | 2827 | phba->fc_topology = LPFC_TOPOLOGY_LOOP; |
2726 | phba->pport->fc_myDID = 0; | 2828 | phba->pport->fc_myDID = 0; |
2727 | phba->alpa_map[0] = 0; | 2829 | phba->alpa_map[0] = 0; |
2728 | phba->alpa_map[1] = 0; | 2830 | phba->alpa_map[1] = 0; |
@@ -2877,7 +2979,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
2877 | retry = 1; | 2979 | retry = 1; |
2878 | 2980 | ||
2879 | if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) && | 2981 | if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) && |
2880 | (phba->fc_topology != TOPOLOGY_LOOP) && | 2982 | (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && |
2881 | !lpfc_error_lost_link(irsp)) { | 2983 | !lpfc_error_lost_link(irsp)) { |
2882 | /* FLOGI retry policy */ | 2984 | /* FLOGI retry policy */ |
2883 | retry = 1; | 2985 | retry = 1; |
@@ -3219,14 +3321,6 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3219 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); | 3321 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
3220 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; | 3322 | struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; |
3221 | 3323 | ||
3222 | /* | ||
3223 | * This routine is used to register and unregister in previous SLI | ||
3224 | * modes. | ||
3225 | */ | ||
3226 | if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && | ||
3227 | (phba->sli_rev == LPFC_SLI_REV4)) | ||
3228 | lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); | ||
3229 | |||
3230 | pmb->context1 = NULL; | 3324 | pmb->context1 = NULL; |
3231 | pmb->context2 = NULL; | 3325 | pmb->context2 = NULL; |
3232 | 3326 | ||
@@ -3904,6 +3998,47 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, | |||
3904 | } | 3998 | } |
3905 | 3999 | ||
3906 | /** | 4000 | /** |
4001 | * lpfc_els_clear_rrq - Clear the rq that this rrq describes. | ||
4002 | * @vport: pointer to a virtual N_Port data structure. | ||
4003 | * @iocb: pointer to the lpfc command iocb data structure. | ||
4004 | * @ndlp: pointer to a node-list data structure. | ||
4005 | * | ||
4006 | * Return | ||
4007 | **/ | ||
4008 | static void | ||
4009 | lpfc_els_clear_rrq(struct lpfc_vport *vport, | ||
4010 | struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) | ||
4011 | { | ||
4012 | struct lpfc_hba *phba = vport->phba; | ||
4013 | uint8_t *pcmd; | ||
4014 | struct RRQ *rrq; | ||
4015 | uint16_t rxid; | ||
4016 | struct lpfc_node_rrq *prrq; | ||
4017 | |||
4018 | |||
4019 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); | ||
4020 | pcmd += sizeof(uint32_t); | ||
4021 | rrq = (struct RRQ *)pcmd; | ||
4022 | rxid = bf_get(rrq_oxid, rrq); | ||
4023 | |||
4024 | lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, | ||
4025 | "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" | ||
4026 | " x%x x%x\n", | ||
4027 | bf_get(rrq_did, rrq), | ||
4028 | bf_get(rrq_oxid, rrq), | ||
4029 | rxid, | ||
4030 | iocb->iotag, iocb->iocb.ulpContext); | ||
4031 | |||
4032 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, | ||
4033 | "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", | ||
4034 | ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); | ||
4035 | prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID); | ||
4036 | if (prrq) | ||
4037 | lpfc_clr_rrq_active(phba, rxid, prrq); | ||
4038 | return; | ||
4039 | } | ||
4040 | |||
4041 | /** | ||
3907 | * lpfc_els_rsp_echo_acc - Issue echo acc response | 4042 | * lpfc_els_rsp_echo_acc - Issue echo acc response |
3908 | * @vport: pointer to a virtual N_Port data structure. | 4043 | * @vport: pointer to a virtual N_Port data structure. |
3909 | * @data: pointer to echo data to return in the accept. | 4044 | * @data: pointer to echo data to return in the accept. |
@@ -4597,7 +4732,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
4597 | 4732 | ||
4598 | lpfc_set_disctmo(vport); | 4733 | lpfc_set_disctmo(vport); |
4599 | 4734 | ||
4600 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 4735 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
4601 | /* We should never receive a FLOGI in loop mode, ignore it */ | 4736 | /* We should never receive a FLOGI in loop mode, ignore it */ |
4602 | did = icmd->un.elsreq64.remoteID; | 4737 | did = icmd->un.elsreq64.remoteID; |
4603 | 4738 | ||
@@ -4792,6 +4927,8 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
4792 | struct lpfc_nodelist *ndlp) | 4927 | struct lpfc_nodelist *ndlp) |
4793 | { | 4928 | { |
4794 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); | 4929 | lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); |
4930 | if (vport->phba->sli_rev == LPFC_SLI_REV4) | ||
4931 | lpfc_els_clear_rrq(vport, cmdiocb, ndlp); | ||
4795 | } | 4932 | } |
4796 | 4933 | ||
4797 | /** | 4934 | /** |
@@ -4940,7 +5077,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4940 | pcmd += sizeof(uint32_t); /* Skip past command */ | 5077 | pcmd += sizeof(uint32_t); /* Skip past command */ |
4941 | rps_rsp = (RPS_RSP *)pcmd; | 5078 | rps_rsp = (RPS_RSP *)pcmd; |
4942 | 5079 | ||
4943 | if (phba->fc_topology != TOPOLOGY_LOOP) | 5080 | if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) |
4944 | status = 0x10; | 5081 | status = 0x10; |
4945 | else | 5082 | else |
4946 | status = 0x8; | 5083 | status = 0x8; |
@@ -5194,6 +5331,97 @@ reject_out: | |||
5194 | return 0; | 5331 | return 0; |
5195 | } | 5332 | } |
5196 | 5333 | ||
5334 | /* lpfc_issue_els_rrq - Process an unsolicited rps iocb | ||
5335 | * @vport: pointer to a host virtual N_Port data structure. | ||
5336 | * @ndlp: pointer to a node-list data structure. | ||
5337 | * @did: DID of the target. | ||
5338 | * @rrq: Pointer to the rrq struct. | ||
5339 | * | ||
5340 | * Build a ELS RRQ command and send it to the target. If the issue_iocb is | ||
5341 | * Successful the the completion handler will clear the RRQ. | ||
5342 | * | ||
5343 | * Return codes | ||
5344 | * 0 - Successfully sent rrq els iocb. | ||
5345 | * 1 - Failed to send rrq els iocb. | ||
5346 | **/ | ||
5347 | static int | ||
5348 | lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | ||
5349 | uint32_t did, struct lpfc_node_rrq *rrq) | ||
5350 | { | ||
5351 | struct lpfc_hba *phba = vport->phba; | ||
5352 | struct RRQ *els_rrq; | ||
5353 | IOCB_t *icmd; | ||
5354 | struct lpfc_iocbq *elsiocb; | ||
5355 | uint8_t *pcmd; | ||
5356 | uint16_t cmdsize; | ||
5357 | int ret; | ||
5358 | |||
5359 | |||
5360 | if (ndlp != rrq->ndlp) | ||
5361 | ndlp = rrq->ndlp; | ||
5362 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) | ||
5363 | return 1; | ||
5364 | |||
5365 | /* If ndlp is not NULL, we will bump the reference count on it */ | ||
5366 | cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); | ||
5367 | elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, | ||
5368 | ELS_CMD_RRQ); | ||
5369 | if (!elsiocb) | ||
5370 | return 1; | ||
5371 | |||
5372 | icmd = &elsiocb->iocb; | ||
5373 | pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); | ||
5374 | |||
5375 | /* For RRQ request, remainder of payload is Exchange IDs */ | ||
5376 | *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; | ||
5377 | pcmd += sizeof(uint32_t); | ||
5378 | els_rrq = (struct RRQ *) pcmd; | ||
5379 | |||
5380 | bf_set(rrq_oxid, els_rrq, rrq->xritag); | ||
5381 | bf_set(rrq_rxid, els_rrq, rrq->rxid); | ||
5382 | bf_set(rrq_did, els_rrq, vport->fc_myDID); | ||
5383 | els_rrq->rrq = cpu_to_be32(els_rrq->rrq); | ||
5384 | els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); | ||
5385 | |||
5386 | |||
5387 | lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, | ||
5388 | "Issue RRQ: did:x%x", | ||
5389 | did, rrq->xritag, rrq->rxid); | ||
5390 | elsiocb->context_un.rrq = rrq; | ||
5391 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; | ||
5392 | ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); | ||
5393 | |||
5394 | if (ret == IOCB_ERROR) { | ||
5395 | lpfc_els_free_iocb(phba, elsiocb); | ||
5396 | return 1; | ||
5397 | } | ||
5398 | return 0; | ||
5399 | } | ||
5400 | |||
5401 | /** | ||
5402 | * lpfc_send_rrq - Sends ELS RRQ if needed. | ||
5403 | * @phba: pointer to lpfc hba data structure. | ||
5404 | * @rrq: pointer to the active rrq. | ||
5405 | * | ||
5406 | * This routine will call the lpfc_issue_els_rrq if the rrq is | ||
5407 | * still active for the xri. If this function returns a failure then | ||
5408 | * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. | ||
5409 | * | ||
5410 | * Returns 0 Success. | ||
5411 | * 1 Failure. | ||
5412 | **/ | ||
5413 | int | ||
5414 | lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) | ||
5415 | { | ||
5416 | struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, | ||
5417 | rrq->nlp_DID); | ||
5418 | if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) | ||
5419 | return lpfc_issue_els_rrq(rrq->vport, ndlp, | ||
5420 | rrq->nlp_DID, rrq); | ||
5421 | else | ||
5422 | return 1; | ||
5423 | } | ||
5424 | |||
5197 | /** | 5425 | /** |
5198 | * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command | 5426 | * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command |
5199 | * @vport: pointer to a host virtual N_Port data structure. | 5427 | * @vport: pointer to a host virtual N_Port data structure. |
@@ -5482,7 +5710,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
5482 | (memcmp(&phba->fc_fabparam.portName, &fp->FportName, | 5710 | (memcmp(&phba->fc_fabparam.portName, &fp->FportName, |
5483 | sizeof(struct lpfc_name)))) { | 5711 | sizeof(struct lpfc_name)))) { |
5484 | /* This port has switched fabrics. FLOGI is required */ | 5712 | /* This port has switched fabrics. FLOGI is required */ |
5485 | lpfc_initial_flogi(vport); | 5713 | lpfc_issue_init_vfi(vport); |
5486 | } else { | 5714 | } else { |
5487 | /* FAN verified - skip FLOGI */ | 5715 | /* FAN verified - skip FLOGI */ |
5488 | vport->fc_myDID = vport->fc_prevDID; | 5716 | vport->fc_myDID = vport->fc_prevDID; |
@@ -6201,7 +6429,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
6201 | cmd, did, vport->port_state); | 6429 | cmd, did, vport->port_state); |
6202 | 6430 | ||
6203 | /* Unsupported ELS command, reject */ | 6431 | /* Unsupported ELS command, reject */ |
6204 | rjt_err = LSRJT_INVALID_CMD; | 6432 | rjt_err = LSRJT_CMD_UNSUPPORTED; |
6205 | 6433 | ||
6206 | /* Unknown ELS command <elsCmd> received from NPORT <did> */ | 6434 | /* Unknown ELS command <elsCmd> received from NPORT <did> */ |
6207 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, | 6435 | lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, |
@@ -6373,7 +6601,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
6373 | if (!ndlp) { | 6601 | if (!ndlp) { |
6374 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); | 6602 | ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); |
6375 | if (!ndlp) { | 6603 | if (!ndlp) { |
6376 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 6604 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
6377 | lpfc_disc_start(vport); | 6605 | lpfc_disc_start(vport); |
6378 | return; | 6606 | return; |
6379 | } | 6607 | } |
@@ -6386,7 +6614,7 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
6386 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { | 6614 | } else if (!NLP_CHK_NODE_ACT(ndlp)) { |
6387 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); | 6615 | ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); |
6388 | if (!ndlp) { | 6616 | if (!ndlp) { |
6389 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 6617 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
6390 | lpfc_disc_start(vport); | 6618 | lpfc_disc_start(vport); |
6391 | return; | 6619 | return; |
6392 | } | 6620 | } |
@@ -6408,18 +6636,31 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
6408 | } | 6636 | } |
6409 | 6637 | ||
6410 | if (vport->cfg_fdmi_on) { | 6638 | if (vport->cfg_fdmi_on) { |
6411 | ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, | 6639 | /* If this is the first time, allocate an ndlp and initialize |
6412 | GFP_KERNEL); | 6640 | * it. Otherwise, make sure the node is enabled and then do the |
6641 | * login. | ||
6642 | */ | ||
6643 | ndlp_fdmi = lpfc_findnode_did(vport, FDMI_DID); | ||
6644 | if (!ndlp_fdmi) { | ||
6645 | ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool, | ||
6646 | GFP_KERNEL); | ||
6647 | if (ndlp_fdmi) { | ||
6648 | lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); | ||
6649 | ndlp_fdmi->nlp_type |= NLP_FABRIC; | ||
6650 | } else | ||
6651 | return; | ||
6652 | } | ||
6653 | if (!NLP_CHK_NODE_ACT(ndlp_fdmi)) | ||
6654 | ndlp_fdmi = lpfc_enable_node(vport, | ||
6655 | ndlp_fdmi, | ||
6656 | NLP_STE_NPR_NODE); | ||
6657 | |||
6413 | if (ndlp_fdmi) { | 6658 | if (ndlp_fdmi) { |
6414 | lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID); | ||
6415 | ndlp_fdmi->nlp_type |= NLP_FABRIC; | ||
6416 | lpfc_nlp_set_state(vport, ndlp_fdmi, | 6659 | lpfc_nlp_set_state(vport, ndlp_fdmi, |
6417 | NLP_STE_PLOGI_ISSUE); | 6660 | NLP_STE_PLOGI_ISSUE); |
6418 | lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, | 6661 | lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID, 0); |
6419 | 0); | ||
6420 | } | 6662 | } |
6421 | } | 6663 | } |
6422 | return; | ||
6423 | } | 6664 | } |
6424 | 6665 | ||
6425 | /** | 6666 | /** |
@@ -6497,7 +6738,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
6497 | spin_unlock_irq(shost->host_lock); | 6738 | spin_unlock_irq(shost->host_lock); |
6498 | if (vport->port_type == LPFC_PHYSICAL_PORT | 6739 | if (vport->port_type == LPFC_PHYSICAL_PORT |
6499 | && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) | 6740 | && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) |
6500 | lpfc_initial_flogi(vport); | 6741 | lpfc_issue_init_vfi(vport); |
6501 | else | 6742 | else |
6502 | lpfc_initial_fdisc(vport); | 6743 | lpfc_initial_fdisc(vport); |
6503 | break; | 6744 | break; |
@@ -6734,7 +6975,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
6734 | vport->fc_flag &= ~FC_VPORT_CVL_RCVD; | 6975 | vport->fc_flag &= ~FC_VPORT_CVL_RCVD; |
6735 | vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; | 6976 | vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; |
6736 | vport->fc_flag |= FC_FABRIC; | 6977 | vport->fc_flag |= FC_FABRIC; |
6737 | if (vport->phba->fc_topology == TOPOLOGY_LOOP) | 6978 | if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) |
6738 | vport->fc_flag |= FC_PUBLIC_LOOP; | 6979 | vport->fc_flag |= FC_PUBLIC_LOOP; |
6739 | spin_unlock_irq(shost->host_lock); | 6980 | spin_unlock_irq(shost->host_lock); |
6740 | 6981 | ||
@@ -6844,7 +7085,9 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
6844 | icmd->un.elsreq64.myID = 0; | 7085 | icmd->un.elsreq64.myID = 0; |
6845 | icmd->un.elsreq64.fl = 1; | 7086 | icmd->un.elsreq64.fl = 1; |
6846 | 7087 | ||
6847 | if (phba->sli_rev == LPFC_SLI_REV4) { | 7088 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
7089 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == | ||
7090 | LPFC_SLI_INTF_IF_TYPE_0)) { | ||
6848 | /* FDISC needs to be 1 for WQE VPI */ | 7091 | /* FDISC needs to be 1 for WQE VPI */ |
6849 | elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; | 7092 | elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; |
6850 | elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; | 7093 | elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; |
@@ -7351,8 +7594,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, | |||
7351 | struct sli4_wcqe_xri_aborted *axri) | 7594 | struct sli4_wcqe_xri_aborted *axri) |
7352 | { | 7595 | { |
7353 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); | 7596 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); |
7597 | uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); | ||
7598 | |||
7354 | struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; | 7599 | struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; |
7355 | unsigned long iflag = 0; | 7600 | unsigned long iflag = 0; |
7601 | struct lpfc_nodelist *ndlp; | ||
7356 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | 7602 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; |
7357 | 7603 | ||
7358 | spin_lock_irqsave(&phba->hbalock, iflag); | 7604 | spin_lock_irqsave(&phba->hbalock, iflag); |
@@ -7361,11 +7607,14 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, | |||
7361 | &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { | 7607 | &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { |
7362 | if (sglq_entry->sli4_xritag == xri) { | 7608 | if (sglq_entry->sli4_xritag == xri) { |
7363 | list_del(&sglq_entry->list); | 7609 | list_del(&sglq_entry->list); |
7610 | ndlp = sglq_entry->ndlp; | ||
7611 | sglq_entry->ndlp = NULL; | ||
7364 | list_add_tail(&sglq_entry->list, | 7612 | list_add_tail(&sglq_entry->list, |
7365 | &phba->sli4_hba.lpfc_sgl_list); | 7613 | &phba->sli4_hba.lpfc_sgl_list); |
7366 | sglq_entry->state = SGL_FREED; | 7614 | sglq_entry->state = SGL_FREED; |
7367 | spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); | 7615 | spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); |
7368 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 7616 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
7617 | lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); | ||
7369 | 7618 | ||
7370 | /* Check if TXQ queue needs to be serviced */ | 7619 | /* Check if TXQ queue needs to be serviced */ |
7371 | if (pring->txq_cnt) | 7620 | if (pring->txq_cnt) |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index a5d1695dac3d..f9f160ab2ee9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -607,6 +607,8 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
607 | 607 | ||
608 | /* Process SLI4 events */ | 608 | /* Process SLI4 events */ |
609 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { | 609 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { |
610 | if (phba->hba_flag & HBA_RRQ_ACTIVE) | ||
611 | lpfc_handle_rrq_active(phba); | ||
610 | if (phba->hba_flag & FCP_XRI_ABORT_EVENT) | 612 | if (phba->hba_flag & FCP_XRI_ABORT_EVENT) |
611 | lpfc_sli4_fcp_xri_abort_event_proc(phba); | 613 | lpfc_sli4_fcp_xri_abort_event_proc(phba); |
612 | if (phba->hba_flag & ELS_XRI_ABORT_EVENT) | 614 | if (phba->hba_flag & ELS_XRI_ABORT_EVENT) |
@@ -966,6 +968,7 @@ lpfc_linkup(struct lpfc_hba *phba) | |||
966 | struct lpfc_vport **vports; | 968 | struct lpfc_vport **vports; |
967 | int i; | 969 | int i; |
968 | 970 | ||
971 | lpfc_cleanup_wt_rrqs(phba); | ||
969 | phba->link_state = LPFC_LINK_UP; | 972 | phba->link_state = LPFC_LINK_UP; |
970 | 973 | ||
971 | /* Unblock fabric iocbs if they are blocked */ | 974 | /* Unblock fabric iocbs if they are blocked */ |
@@ -1064,7 +1067,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1064 | 1067 | ||
1065 | mempool_free(pmb, phba->mbox_mem_pool); | 1068 | mempool_free(pmb, phba->mbox_mem_pool); |
1066 | 1069 | ||
1067 | if (phba->fc_topology == TOPOLOGY_LOOP && | 1070 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && |
1068 | vport->fc_flag & FC_PUBLIC_LOOP && | 1071 | vport->fc_flag & FC_PUBLIC_LOOP && |
1069 | !(vport->fc_flag & FC_LBIT)) { | 1072 | !(vport->fc_flag & FC_LBIT)) { |
1070 | /* Need to wait for FAN - use discovery timer | 1073 | /* Need to wait for FAN - use discovery timer |
@@ -1078,9 +1081,8 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1078 | /* Start discovery by sending a FLOGI. port_state is identically | 1081 | /* Start discovery by sending a FLOGI. port_state is identically |
1079 | * LPFC_FLOGI while waiting for FLOGI cmpl | 1082 | * LPFC_FLOGI while waiting for FLOGI cmpl |
1080 | */ | 1083 | */ |
1081 | if (vport->port_state != LPFC_FLOGI) { | 1084 | if (vport->port_state != LPFC_FLOGI) |
1082 | lpfc_initial_flogi(vport); | 1085 | lpfc_initial_flogi(vport); |
1083 | } | ||
1084 | return; | 1086 | return; |
1085 | 1087 | ||
1086 | out: | 1088 | out: |
@@ -1131,7 +1133,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
1131 | if (vport->port_state != LPFC_FLOGI) { | 1133 | if (vport->port_state != LPFC_FLOGI) { |
1132 | phba->hba_flag |= FCF_RR_INPROG; | 1134 | phba->hba_flag |= FCF_RR_INPROG; |
1133 | spin_unlock_irq(&phba->hbalock); | 1135 | spin_unlock_irq(&phba->hbalock); |
1134 | lpfc_initial_flogi(vport); | 1136 | lpfc_issue_init_vfi(vport); |
1135 | goto out; | 1137 | goto out; |
1136 | } | 1138 | } |
1137 | spin_unlock_irq(&phba->hbalock); | 1139 | spin_unlock_irq(&phba->hbalock); |
@@ -1353,7 +1355,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) | |||
1353 | if (phba->pport->port_state != LPFC_FLOGI) { | 1355 | if (phba->pport->port_state != LPFC_FLOGI) { |
1354 | phba->hba_flag |= FCF_RR_INPROG; | 1356 | phba->hba_flag |= FCF_RR_INPROG; |
1355 | spin_unlock_irq(&phba->hbalock); | 1357 | spin_unlock_irq(&phba->hbalock); |
1356 | lpfc_initial_flogi(phba->pport); | 1358 | lpfc_issue_init_vfi(phba->pport); |
1357 | return; | 1359 | return; |
1358 | } | 1360 | } |
1359 | spin_unlock_irq(&phba->hbalock); | 1361 | spin_unlock_irq(&phba->hbalock); |
@@ -2331,7 +2333,7 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2331 | phba->fcf.current_rec.fcf_indx, fcf_index); | 2333 | phba->fcf.current_rec.fcf_indx, fcf_index); |
2332 | /* Wait 500 ms before retrying FLOGI to current FCF */ | 2334 | /* Wait 500 ms before retrying FLOGI to current FCF */ |
2333 | msleep(500); | 2335 | msleep(500); |
2334 | lpfc_initial_flogi(phba->pport); | 2336 | lpfc_issue_init_vfi(phba->pport); |
2335 | goto out; | 2337 | goto out; |
2336 | } | 2338 | } |
2337 | 2339 | ||
@@ -2422,6 +2424,63 @@ out: | |||
2422 | } | 2424 | } |
2423 | 2425 | ||
2424 | /** | 2426 | /** |
2427 | * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. | ||
2428 | * @phba: pointer to lpfc hba data structure. | ||
2429 | * @mboxq: pointer to mailbox data structure. | ||
2430 | * | ||
2431 | * This function handles completion of init vfi mailbox command. | ||
2432 | */ | ||
2433 | void | ||
2434 | lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | ||
2435 | { | ||
2436 | struct lpfc_vport *vport = mboxq->vport; | ||
2437 | |||
2438 | if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) { | ||
2439 | lpfc_printf_vlog(vport, KERN_ERR, | ||
2440 | LOG_MBOX, | ||
2441 | "2891 Init VFI mailbox failed 0x%x\n", | ||
2442 | mboxq->u.mb.mbxStatus); | ||
2443 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
2444 | lpfc_vport_set_state(vport, FC_VPORT_FAILED); | ||
2445 | return; | ||
2446 | } | ||
2447 | lpfc_initial_flogi(vport); | ||
2448 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
2449 | return; | ||
2450 | } | ||
2451 | |||
2452 | /** | ||
2453 | * lpfc_issue_init_vfi - Issue init_vfi mailbox command. | ||
2454 | * @vport: pointer to lpfc_vport data structure. | ||
2455 | * | ||
2456 | * This function issue a init_vfi mailbox command to initialize the VFI and | ||
2457 | * VPI for the physical port. | ||
2458 | */ | ||
2459 | void | ||
2460 | lpfc_issue_init_vfi(struct lpfc_vport *vport) | ||
2461 | { | ||
2462 | LPFC_MBOXQ_t *mboxq; | ||
2463 | int rc; | ||
2464 | struct lpfc_hba *phba = vport->phba; | ||
2465 | |||
2466 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2467 | if (!mboxq) { | ||
2468 | lpfc_printf_vlog(vport, KERN_ERR, | ||
2469 | LOG_MBOX, "2892 Failed to allocate " | ||
2470 | "init_vfi mailbox\n"); | ||
2471 | return; | ||
2472 | } | ||
2473 | lpfc_init_vfi(mboxq, vport); | ||
2474 | mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; | ||
2475 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | ||
2476 | if (rc == MBX_NOT_FINISHED) { | ||
2477 | lpfc_printf_vlog(vport, KERN_ERR, | ||
2478 | LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n"); | ||
2479 | mempool_free(mboxq, vport->phba->mbox_mem_pool); | ||
2480 | } | ||
2481 | } | ||
2482 | |||
2483 | /** | ||
2425 | * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. | 2484 | * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. |
2426 | * @phba: pointer to lpfc hba data structure. | 2485 | * @phba: pointer to lpfc hba data structure. |
2427 | * @mboxq: pointer to mailbox data structure. | 2486 | * @mboxq: pointer to mailbox data structure. |
@@ -2528,7 +2587,7 @@ lpfc_start_fdiscs(struct lpfc_hba *phba) | |||
2528 | FC_VPORT_FAILED); | 2587 | FC_VPORT_FAILED); |
2529 | continue; | 2588 | continue; |
2530 | } | 2589 | } |
2531 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 2590 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
2532 | lpfc_vport_set_state(vports[i], | 2591 | lpfc_vport_set_state(vports[i], |
2533 | FC_VPORT_LINKDOWN); | 2592 | FC_VPORT_LINKDOWN); |
2534 | continue; | 2593 | continue; |
@@ -2564,7 +2623,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2564 | "2018 REG_VFI mbxStatus error x%x " | 2623 | "2018 REG_VFI mbxStatus error x%x " |
2565 | "HBA state x%x\n", | 2624 | "HBA state x%x\n", |
2566 | mboxq->u.mb.mbxStatus, vport->port_state); | 2625 | mboxq->u.mb.mbxStatus, vport->port_state); |
2567 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 2626 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
2568 | /* FLOGI failed, use loop map to make discovery list */ | 2627 | /* FLOGI failed, use loop map to make discovery list */ |
2569 | lpfc_disc_list_loopmap(vport); | 2628 | lpfc_disc_list_loopmap(vport); |
2570 | /* Start discovery */ | 2629 | /* Start discovery */ |
@@ -2582,8 +2641,18 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
2582 | spin_unlock_irq(shost->host_lock); | 2641 | spin_unlock_irq(shost->host_lock); |
2583 | 2642 | ||
2584 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { | 2643 | if (vport->port_state == LPFC_FABRIC_CFG_LINK) { |
2585 | lpfc_start_fdiscs(phba); | 2644 | /* For private loop just start discovery and we are done. */ |
2586 | lpfc_do_scr_ns_plogi(phba, vport); | 2645 | if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && |
2646 | (phba->alpa_map[0] == 0) && | ||
2647 | !(vport->fc_flag & FC_PUBLIC_LOOP)) { | ||
2648 | /* Use loop map to make discovery list */ | ||
2649 | lpfc_disc_list_loopmap(vport); | ||
2650 | /* Start discovery */ | ||
2651 | lpfc_disc_start(vport); | ||
2652 | } else { | ||
2653 | lpfc_start_fdiscs(phba); | ||
2654 | lpfc_do_scr_ns_plogi(phba, vport); | ||
2655 | } | ||
2587 | } | 2656 | } |
2588 | 2657 | ||
2589 | fail_free_mem: | 2658 | fail_free_mem: |
@@ -2644,7 +2713,7 @@ out: | |||
2644 | } | 2713 | } |
2645 | 2714 | ||
2646 | static void | 2715 | static void |
2647 | lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | 2716 | lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) |
2648 | { | 2717 | { |
2649 | struct lpfc_vport *vport = phba->pport; | 2718 | struct lpfc_vport *vport = phba->pport; |
2650 | LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; | 2719 | LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; |
@@ -2654,31 +2723,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
2654 | struct fcf_record *fcf_record; | 2723 | struct fcf_record *fcf_record; |
2655 | 2724 | ||
2656 | spin_lock_irq(&phba->hbalock); | 2725 | spin_lock_irq(&phba->hbalock); |
2657 | switch (la->UlnkSpeed) { | 2726 | switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { |
2658 | case LA_1GHZ_LINK: | 2727 | case LPFC_LINK_SPEED_1GHZ: |
2659 | phba->fc_linkspeed = LA_1GHZ_LINK; | 2728 | case LPFC_LINK_SPEED_2GHZ: |
2660 | break; | 2729 | case LPFC_LINK_SPEED_4GHZ: |
2661 | case LA_2GHZ_LINK: | 2730 | case LPFC_LINK_SPEED_8GHZ: |
2662 | phba->fc_linkspeed = LA_2GHZ_LINK; | 2731 | case LPFC_LINK_SPEED_10GHZ: |
2663 | break; | 2732 | case LPFC_LINK_SPEED_16GHZ: |
2664 | case LA_4GHZ_LINK: | 2733 | phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); |
2665 | phba->fc_linkspeed = LA_4GHZ_LINK; | ||
2666 | break; | ||
2667 | case LA_8GHZ_LINK: | ||
2668 | phba->fc_linkspeed = LA_8GHZ_LINK; | ||
2669 | break; | ||
2670 | case LA_10GHZ_LINK: | ||
2671 | phba->fc_linkspeed = LA_10GHZ_LINK; | ||
2672 | break; | 2734 | break; |
2673 | default: | 2735 | default: |
2674 | phba->fc_linkspeed = LA_UNKNW_LINK; | 2736 | phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; |
2675 | break; | 2737 | break; |
2676 | } | 2738 | } |
2677 | 2739 | ||
2678 | phba->fc_topology = la->topology; | 2740 | phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); |
2679 | phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; | 2741 | phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; |
2680 | 2742 | ||
2681 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 2743 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
2682 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; | 2744 | phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; |
2683 | 2745 | ||
2684 | /* if npiv is enabled and this adapter supports npiv log | 2746 | /* if npiv is enabled and this adapter supports npiv log |
@@ -2689,11 +2751,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
2689 | "1309 Link Up Event npiv not supported in loop " | 2751 | "1309 Link Up Event npiv not supported in loop " |
2690 | "topology\n"); | 2752 | "topology\n"); |
2691 | /* Get Loop Map information */ | 2753 | /* Get Loop Map information */ |
2692 | if (la->il) | 2754 | if (bf_get(lpfc_mbx_read_top_il, la)) |
2693 | vport->fc_flag |= FC_LBIT; | 2755 | vport->fc_flag |= FC_LBIT; |
2694 | 2756 | ||
2695 | vport->fc_myDID = la->granted_AL_PA; | 2757 | vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); |
2696 | i = la->un.lilpBde64.tus.f.bdeSize; | 2758 | i = la->lilpBde64.tus.f.bdeSize; |
2697 | 2759 | ||
2698 | if (i == 0) { | 2760 | if (i == 0) { |
2699 | phba->alpa_map[0] = 0; | 2761 | phba->alpa_map[0] = 0; |
@@ -2764,7 +2826,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) | |||
2764 | goto out; | 2826 | goto out; |
2765 | } | 2827 | } |
2766 | 2828 | ||
2767 | if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { | 2829 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { |
2768 | cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2830 | cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
2769 | if (!cfglink_mbox) | 2831 | if (!cfglink_mbox) |
2770 | goto out; | 2832 | goto out; |
@@ -2874,17 +2936,17 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) | |||
2874 | 2936 | ||
2875 | 2937 | ||
2876 | /* | 2938 | /* |
2877 | * This routine handles processing a READ_LA mailbox | 2939 | * This routine handles processing a READ_TOPOLOGY mailbox |
2878 | * command upon completion. It is setup in the LPFC_MBOXQ | 2940 | * command upon completion. It is setup in the LPFC_MBOXQ |
2879 | * as the completion routine when the command is | 2941 | * as the completion routine when the command is |
2880 | * handed off to the SLI layer. | 2942 | * handed off to the SLI layer. |
2881 | */ | 2943 | */ |
2882 | void | 2944 | void |
2883 | lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | 2945 | lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) |
2884 | { | 2946 | { |
2885 | struct lpfc_vport *vport = pmb->vport; | 2947 | struct lpfc_vport *vport = pmb->vport; |
2886 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 2948 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
2887 | READ_LA_VAR *la; | 2949 | struct lpfc_mbx_read_top *la; |
2888 | MAILBOX_t *mb = &pmb->u.mb; | 2950 | MAILBOX_t *mb = &pmb->u.mb; |
2889 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); | 2951 | struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); |
2890 | 2952 | ||
@@ -2897,15 +2959,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2897 | mb->mbxStatus, vport->port_state); | 2959 | mb->mbxStatus, vport->port_state); |
2898 | lpfc_mbx_issue_link_down(phba); | 2960 | lpfc_mbx_issue_link_down(phba); |
2899 | phba->link_state = LPFC_HBA_ERROR; | 2961 | phba->link_state = LPFC_HBA_ERROR; |
2900 | goto lpfc_mbx_cmpl_read_la_free_mbuf; | 2962 | goto lpfc_mbx_cmpl_read_topology_free_mbuf; |
2901 | } | 2963 | } |
2902 | 2964 | ||
2903 | la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; | 2965 | la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; |
2904 | 2966 | ||
2905 | memcpy(&phba->alpa_map[0], mp->virt, 128); | 2967 | memcpy(&phba->alpa_map[0], mp->virt, 128); |
2906 | 2968 | ||
2907 | spin_lock_irq(shost->host_lock); | 2969 | spin_lock_irq(shost->host_lock); |
2908 | if (la->pb) | 2970 | if (bf_get(lpfc_mbx_read_top_pb, la)) |
2909 | vport->fc_flag |= FC_BYPASSED_MODE; | 2971 | vport->fc_flag |= FC_BYPASSED_MODE; |
2910 | else | 2972 | else |
2911 | vport->fc_flag &= ~FC_BYPASSED_MODE; | 2973 | vport->fc_flag &= ~FC_BYPASSED_MODE; |
@@ -2914,41 +2976,48 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2914 | if ((phba->fc_eventTag < la->eventTag) || | 2976 | if ((phba->fc_eventTag < la->eventTag) || |
2915 | (phba->fc_eventTag == la->eventTag)) { | 2977 | (phba->fc_eventTag == la->eventTag)) { |
2916 | phba->fc_stat.LinkMultiEvent++; | 2978 | phba->fc_stat.LinkMultiEvent++; |
2917 | if (la->attType == AT_LINK_UP) | 2979 | if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) |
2918 | if (phba->fc_eventTag != 0) | 2980 | if (phba->fc_eventTag != 0) |
2919 | lpfc_linkdown(phba); | 2981 | lpfc_linkdown(phba); |
2920 | } | 2982 | } |
2921 | 2983 | ||
2922 | phba->fc_eventTag = la->eventTag; | 2984 | phba->fc_eventTag = la->eventTag; |
2923 | spin_lock_irq(&phba->hbalock); | 2985 | spin_lock_irq(&phba->hbalock); |
2924 | if (la->mm) | 2986 | if (bf_get(lpfc_mbx_read_top_mm, la)) |
2925 | phba->sli.sli_flag |= LPFC_MENLO_MAINT; | 2987 | phba->sli.sli_flag |= LPFC_MENLO_MAINT; |
2926 | else | 2988 | else |
2927 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; | 2989 | phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; |
2928 | spin_unlock_irq(&phba->hbalock); | 2990 | spin_unlock_irq(&phba->hbalock); |
2929 | 2991 | ||
2930 | phba->link_events++; | 2992 | phba->link_events++; |
2931 | if (la->attType == AT_LINK_UP && (!la->mm)) { | 2993 | if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && |
2994 | (!bf_get(lpfc_mbx_read_top_mm, la))) { | ||
2932 | phba->fc_stat.LinkUp++; | 2995 | phba->fc_stat.LinkUp++; |
2933 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 2996 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
2934 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 2997 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
2935 | "1306 Link Up Event in loop back mode " | 2998 | "1306 Link Up Event in loop back mode " |
2936 | "x%x received Data: x%x x%x x%x x%x\n", | 2999 | "x%x received Data: x%x x%x x%x x%x\n", |
2937 | la->eventTag, phba->fc_eventTag, | 3000 | la->eventTag, phba->fc_eventTag, |
2938 | la->granted_AL_PA, la->UlnkSpeed, | 3001 | bf_get(lpfc_mbx_read_top_alpa_granted, |
3002 | la), | ||
3003 | bf_get(lpfc_mbx_read_top_link_spd, la), | ||
2939 | phba->alpa_map[0]); | 3004 | phba->alpa_map[0]); |
2940 | } else { | 3005 | } else { |
2941 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 3006 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
2942 | "1303 Link Up Event x%x received " | 3007 | "1303 Link Up Event x%x received " |
2943 | "Data: x%x x%x x%x x%x x%x x%x %d\n", | 3008 | "Data: x%x x%x x%x x%x x%x x%x %d\n", |
2944 | la->eventTag, phba->fc_eventTag, | 3009 | la->eventTag, phba->fc_eventTag, |
2945 | la->granted_AL_PA, la->UlnkSpeed, | 3010 | bf_get(lpfc_mbx_read_top_alpa_granted, |
3011 | la), | ||
3012 | bf_get(lpfc_mbx_read_top_link_spd, la), | ||
2946 | phba->alpa_map[0], | 3013 | phba->alpa_map[0], |
2947 | la->mm, la->fa, | 3014 | bf_get(lpfc_mbx_read_top_mm, la), |
3015 | bf_get(lpfc_mbx_read_top_fa, la), | ||
2948 | phba->wait_4_mlo_maint_flg); | 3016 | phba->wait_4_mlo_maint_flg); |
2949 | } | 3017 | } |
2950 | lpfc_mbx_process_link_up(phba, la); | 3018 | lpfc_mbx_process_link_up(phba, la); |
2951 | } else if (la->attType == AT_LINK_DOWN) { | 3019 | } else if (bf_get(lpfc_mbx_read_top_att_type, la) == |
3020 | LPFC_ATT_LINK_DOWN) { | ||
2952 | phba->fc_stat.LinkDown++; | 3021 | phba->fc_stat.LinkDown++; |
2953 | if (phba->link_flag & LS_LOOPBACK_MODE) { | 3022 | if (phba->link_flag & LS_LOOPBACK_MODE) { |
2954 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 3023 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
@@ -2964,11 +3033,13 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2964 | "Data: x%x x%x x%x x%x x%x\n", | 3033 | "Data: x%x x%x x%x x%x x%x\n", |
2965 | la->eventTag, phba->fc_eventTag, | 3034 | la->eventTag, phba->fc_eventTag, |
2966 | phba->pport->port_state, vport->fc_flag, | 3035 | phba->pport->port_state, vport->fc_flag, |
2967 | la->mm, la->fa); | 3036 | bf_get(lpfc_mbx_read_top_mm, la), |
3037 | bf_get(lpfc_mbx_read_top_fa, la)); | ||
2968 | } | 3038 | } |
2969 | lpfc_mbx_issue_link_down(phba); | 3039 | lpfc_mbx_issue_link_down(phba); |
2970 | } | 3040 | } |
2971 | if (la->mm && la->attType == AT_LINK_UP) { | 3041 | if ((bf_get(lpfc_mbx_read_top_mm, la)) && |
3042 | (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { | ||
2972 | if (phba->link_state != LPFC_LINK_DOWN) { | 3043 | if (phba->link_state != LPFC_LINK_DOWN) { |
2973 | phba->fc_stat.LinkDown++; | 3044 | phba->fc_stat.LinkDown++; |
2974 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, | 3045 | lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, |
@@ -2996,14 +3067,15 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
2996 | } | 3067 | } |
2997 | } | 3068 | } |
2998 | 3069 | ||
2999 | if (la->fa) { | 3070 | if (bf_get(lpfc_mbx_read_top_fa, la)) { |
3000 | if (la->mm) | 3071 | if (bf_get(lpfc_mbx_read_top_mm, la)) |
3001 | lpfc_issue_clear_la(phba, vport); | 3072 | lpfc_issue_clear_la(phba, vport); |
3002 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, | 3073 | lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, |
3003 | "1311 fa %d\n", la->fa); | 3074 | "1311 fa %d\n", |
3075 | bf_get(lpfc_mbx_read_top_fa, la)); | ||
3004 | } | 3076 | } |
3005 | 3077 | ||
3006 | lpfc_mbx_cmpl_read_la_free_mbuf: | 3078 | lpfc_mbx_cmpl_read_topology_free_mbuf: |
3007 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | 3079 | lpfc_mbuf_free(phba, mp->virt, mp->phys); |
3008 | kfree(mp); | 3080 | kfree(mp); |
3009 | mempool_free(pmb, phba->mbox_mem_pool); | 3081 | mempool_free(pmb, phba->mbox_mem_pool); |
@@ -3030,8 +3102,8 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3030 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) | 3102 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) |
3031 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; | 3103 | ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; |
3032 | 3104 | ||
3033 | if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || | 3105 | if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || |
3034 | ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { | 3106 | ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { |
3035 | /* We rcvd a rscn after issuing this | 3107 | /* We rcvd a rscn after issuing this |
3036 | * mbox reg login, we may have cycled | 3108 | * mbox reg login, we may have cycled |
3037 | * back through the state and be | 3109 | * back through the state and be |
@@ -3043,10 +3115,6 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3043 | spin_lock_irq(shost->host_lock); | 3115 | spin_lock_irq(shost->host_lock); |
3044 | ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; | 3116 | ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; |
3045 | spin_unlock_irq(shost->host_lock); | 3117 | spin_unlock_irq(shost->host_lock); |
3046 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
3047 | lpfc_sli4_free_rpi(phba, | ||
3048 | pmb->u.mb.un.varRegLogin.rpi); | ||
3049 | |||
3050 | } else | 3118 | } else |
3051 | /* Good status, call state machine */ | 3119 | /* Good status, call state machine */ |
3052 | lpfc_disc_state_machine(vport, ndlp, pmb, | 3120 | lpfc_disc_state_machine(vport, ndlp, pmb, |
@@ -3092,6 +3160,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3092 | spin_unlock_irq(shost->host_lock); | 3160 | spin_unlock_irq(shost->host_lock); |
3093 | vport->unreg_vpi_cmpl = VPORT_OK; | 3161 | vport->unreg_vpi_cmpl = VPORT_OK; |
3094 | mempool_free(pmb, phba->mbox_mem_pool); | 3162 | mempool_free(pmb, phba->mbox_mem_pool); |
3163 | lpfc_cleanup_vports_rrqs(vport); | ||
3095 | /* | 3164 | /* |
3096 | * This shost reference might have been taken at the beginning of | 3165 | * This shost reference might have been taken at the beginning of |
3097 | * lpfc_vport_delete() | 3166 | * lpfc_vport_delete() |
@@ -3333,7 +3402,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3333 | kfree(mp); | 3402 | kfree(mp); |
3334 | mempool_free(pmb, phba->mbox_mem_pool); | 3403 | mempool_free(pmb, phba->mbox_mem_pool); |
3335 | 3404 | ||
3336 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 3405 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3337 | /* FLOGI failed, use loop map to make discovery list */ | 3406 | /* FLOGI failed, use loop map to make discovery list */ |
3338 | lpfc_disc_list_loopmap(vport); | 3407 | lpfc_disc_list_loopmap(vport); |
3339 | 3408 | ||
@@ -3355,7 +3424,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
3355 | } | 3424 | } |
3356 | 3425 | ||
3357 | ndlp->nlp_rpi = mb->un.varWords[0]; | 3426 | ndlp->nlp_rpi = mb->un.varWords[0]; |
3358 | ndlp->nlp_flag |= NLP_RPI_VALID; | 3427 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
3359 | ndlp->nlp_type |= NLP_FABRIC; | 3428 | ndlp->nlp_type |= NLP_FABRIC; |
3360 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 3429 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
3361 | 3430 | ||
@@ -3413,7 +3482,7 @@ out: | |||
3413 | /* If no other thread is using the ndlp, free it */ | 3482 | /* If no other thread is using the ndlp, free it */ |
3414 | lpfc_nlp_not_used(ndlp); | 3483 | lpfc_nlp_not_used(ndlp); |
3415 | 3484 | ||
3416 | if (phba->fc_topology == TOPOLOGY_LOOP) { | 3485 | if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { |
3417 | /* | 3486 | /* |
3418 | * RegLogin failed, use loop map to make discovery | 3487 | * RegLogin failed, use loop map to make discovery |
3419 | * list | 3488 | * list |
@@ -3429,7 +3498,7 @@ out: | |||
3429 | } | 3498 | } |
3430 | 3499 | ||
3431 | ndlp->nlp_rpi = mb->un.varWords[0]; | 3500 | ndlp->nlp_rpi = mb->un.varWords[0]; |
3432 | ndlp->nlp_flag |= NLP_RPI_VALID; | 3501 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
3433 | ndlp->nlp_type |= NLP_FABRIC; | 3502 | ndlp->nlp_type |= NLP_FABRIC; |
3434 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 3503 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
3435 | 3504 | ||
@@ -3762,6 +3831,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
3762 | NLP_INT_NODE_ACT(ndlp); | 3831 | NLP_INT_NODE_ACT(ndlp); |
3763 | atomic_set(&ndlp->cmd_pending, 0); | 3832 | atomic_set(&ndlp->cmd_pending, 0); |
3764 | ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; | 3833 | ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; |
3834 | if (vport->phba->sli_rev == LPFC_SLI_REV4) | ||
3835 | ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); | ||
3765 | } | 3836 | } |
3766 | 3837 | ||
3767 | struct lpfc_nodelist * | 3838 | struct lpfc_nodelist * |
@@ -3975,7 +4046,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
3975 | * by firmware with a no rpi error. | 4046 | * by firmware with a no rpi error. |
3976 | */ | 4047 | */ |
3977 | psli = &phba->sli; | 4048 | psli = &phba->sli; |
3978 | if (ndlp->nlp_flag & NLP_RPI_VALID) { | 4049 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
3979 | /* Now process each ring */ | 4050 | /* Now process each ring */ |
3980 | for (i = 0; i < psli->num_rings; i++) { | 4051 | for (i = 0; i < psli->num_rings; i++) { |
3981 | pring = &psli->ring[i]; | 4052 | pring = &psli->ring[i]; |
@@ -4023,7 +4094,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4023 | LPFC_MBOXQ_t *mbox; | 4094 | LPFC_MBOXQ_t *mbox; |
4024 | int rc; | 4095 | int rc; |
4025 | 4096 | ||
4026 | if (ndlp->nlp_flag & NLP_RPI_VALID) { | 4097 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
4027 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 4098 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
4028 | if (mbox) { | 4099 | if (mbox) { |
4029 | lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); | 4100 | lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); |
@@ -4035,8 +4106,9 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4035 | } | 4106 | } |
4036 | lpfc_no_rpi(phba, ndlp); | 4107 | lpfc_no_rpi(phba, ndlp); |
4037 | 4108 | ||
4038 | ndlp->nlp_rpi = 0; | 4109 | if (phba->sli_rev != LPFC_SLI_REV4) |
4039 | ndlp->nlp_flag &= ~NLP_RPI_VALID; | 4110 | ndlp->nlp_rpi = 0; |
4111 | ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; | ||
4040 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | 4112 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
4041 | return 1; | 4113 | return 1; |
4042 | } | 4114 | } |
@@ -4059,11 +4131,16 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) | |||
4059 | int i; | 4131 | int i; |
4060 | 4132 | ||
4061 | vports = lpfc_create_vport_work_array(phba); | 4133 | vports = lpfc_create_vport_work_array(phba); |
4134 | if (!vports) { | ||
4135 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
4136 | "2884 Vport array allocation failed \n"); | ||
4137 | return; | ||
4138 | } | ||
4062 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 4139 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
4063 | shost = lpfc_shost_from_vport(vports[i]); | 4140 | shost = lpfc_shost_from_vport(vports[i]); |
4064 | spin_lock_irq(shost->host_lock); | 4141 | spin_lock_irq(shost->host_lock); |
4065 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { | 4142 | list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { |
4066 | if (ndlp->nlp_flag & NLP_RPI_VALID) { | 4143 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { |
4067 | /* The mempool_alloc might sleep */ | 4144 | /* The mempool_alloc might sleep */ |
4068 | spin_unlock_irq(shost->host_lock); | 4145 | spin_unlock_irq(shost->host_lock); |
4069 | lpfc_unreg_rpi(vports[i], ndlp); | 4146 | lpfc_unreg_rpi(vports[i], ndlp); |
@@ -4192,9 +4269,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4192 | kfree(mp); | 4269 | kfree(mp); |
4193 | } | 4270 | } |
4194 | list_del(&mb->list); | 4271 | list_del(&mb->list); |
4195 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4196 | lpfc_sli4_free_rpi(phba, | ||
4197 | mb->u.mb.un.varRegLogin.rpi); | ||
4198 | mempool_free(mb, phba->mbox_mem_pool); | 4272 | mempool_free(mb, phba->mbox_mem_pool); |
4199 | /* We shall not invoke the lpfc_nlp_put to decrement | 4273 | /* We shall not invoke the lpfc_nlp_put to decrement |
4200 | * the ndlp reference count as we are in the process | 4274 | * the ndlp reference count as we are in the process |
@@ -4236,15 +4310,15 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4236 | 4310 | ||
4237 | lpfc_cancel_retry_delay_tmo(vport, ndlp); | 4311 | lpfc_cancel_retry_delay_tmo(vport, ndlp); |
4238 | if ((ndlp->nlp_flag & NLP_DEFER_RM) && | 4312 | if ((ndlp->nlp_flag & NLP_DEFER_RM) && |
4239 | !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && | 4313 | !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && |
4240 | !(ndlp->nlp_flag & NLP_RPI_VALID)) { | 4314 | !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { |
4241 | /* For this case we need to cleanup the default rpi | 4315 | /* For this case we need to cleanup the default rpi |
4242 | * allocated by the firmware. | 4316 | * allocated by the firmware. |
4243 | */ | 4317 | */ |
4244 | if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) | 4318 | if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) |
4245 | != NULL) { | 4319 | != NULL) { |
4246 | rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, | 4320 | rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, |
4247 | (uint8_t *) &vport->fc_sparam, mbox, 0); | 4321 | (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi); |
4248 | if (rc) { | 4322 | if (rc) { |
4249 | mempool_free(mbox, phba->mbox_mem_pool); | 4323 | mempool_free(mbox, phba->mbox_mem_pool); |
4250 | } | 4324 | } |
@@ -4436,7 +4510,7 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport) | |||
4436 | if (!lpfc_is_link_up(phba)) | 4510 | if (!lpfc_is_link_up(phba)) |
4437 | return; | 4511 | return; |
4438 | 4512 | ||
4439 | if (phba->fc_topology != TOPOLOGY_LOOP) | 4513 | if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) |
4440 | return; | 4514 | return; |
4441 | 4515 | ||
4442 | /* Check for loop map present or not */ | 4516 | /* Check for loop map present or not */ |
@@ -4788,7 +4862,10 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) | |||
4788 | } | 4862 | } |
4789 | } | 4863 | } |
4790 | if (vport->port_state != LPFC_FLOGI) { | 4864 | if (vport->port_state != LPFC_FLOGI) { |
4791 | lpfc_initial_flogi(vport); | 4865 | if (phba->sli_rev <= LPFC_SLI_REV3) |
4866 | lpfc_initial_flogi(vport); | ||
4867 | else | ||
4868 | lpfc_issue_init_vfi(vport); | ||
4792 | return; | 4869 | return; |
4793 | } | 4870 | } |
4794 | break; | 4871 | break; |
@@ -4979,7 +5056,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4979 | pmb->context2 = NULL; | 5056 | pmb->context2 = NULL; |
4980 | 5057 | ||
4981 | ndlp->nlp_rpi = mb->un.varWords[0]; | 5058 | ndlp->nlp_rpi = mb->un.varWords[0]; |
4982 | ndlp->nlp_flag |= NLP_RPI_VALID; | 5059 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
4983 | ndlp->nlp_type |= NLP_FABRIC; | 5060 | ndlp->nlp_type |= NLP_FABRIC; |
4984 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); | 5061 | lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); |
4985 | 5062 | ||
@@ -5103,6 +5180,8 @@ lpfc_nlp_release(struct kref *kref) | |||
5103 | spin_lock_irqsave(&phba->ndlp_lock, flags); | 5180 | spin_lock_irqsave(&phba->ndlp_lock, flags); |
5104 | NLP_CLR_NODE_ACT(ndlp); | 5181 | NLP_CLR_NODE_ACT(ndlp); |
5105 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); | 5182 | spin_unlock_irqrestore(&phba->ndlp_lock, flags); |
5183 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
5184 | lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); | ||
5106 | 5185 | ||
5107 | /* free ndlp memory for final ndlp release */ | 5186 | /* free ndlp memory for final ndlp release */ |
5108 | if (NLP_CHK_FREE_REQ(ndlp)) { | 5187 | if (NLP_CHK_FREE_REQ(ndlp)) { |
@@ -5254,6 +5333,10 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) | |||
5254 | 5333 | ||
5255 | vports = lpfc_create_vport_work_array(phba); | 5334 | vports = lpfc_create_vport_work_array(phba); |
5256 | 5335 | ||
5336 | /* If driver cannot allocate memory, indicate fcf is in use */ | ||
5337 | if (!vports) | ||
5338 | return 1; | ||
5339 | |||
5257 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { | 5340 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
5258 | shost = lpfc_shost_from_vport(vports[i]); | 5341 | shost = lpfc_shost_from_vport(vports[i]); |
5259 | spin_lock_irq(shost->host_lock); | 5342 | spin_lock_irq(shost->host_lock); |
@@ -5269,7 +5352,7 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) | |||
5269 | "logged in\n", | 5352 | "logged in\n", |
5270 | ndlp->nlp_rpi, ndlp->nlp_DID, | 5353 | ndlp->nlp_rpi, ndlp->nlp_DID, |
5271 | ndlp->nlp_flag); | 5354 | ndlp->nlp_flag); |
5272 | if (ndlp->nlp_flag & NLP_RPI_VALID) | 5355 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) |
5273 | ret = 1; | 5356 | ret = 1; |
5274 | } | 5357 | } |
5275 | } | 5358 | } |
@@ -5550,7 +5633,7 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba) | |||
5550 | * registered, do nothing. | 5633 | * registered, do nothing. |
5551 | */ | 5634 | */ |
5552 | spin_lock_irq(&phba->hbalock); | 5635 | spin_lock_irq(&phba->hbalock); |
5553 | if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || | 5636 | if (!(phba->hba_flag & HBA_FCOE_MODE) || |
5554 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || | 5637 | !(phba->fcf.fcf_flag & FCF_REGISTERED) || |
5555 | !(phba->hba_flag & HBA_FIP_SUPPORT) || | 5638 | !(phba->hba_flag & HBA_FIP_SUPPORT) || |
5556 | (phba->fcf.fcf_flag & FCF_DISCOVERY) || | 5639 | (phba->fcf.fcf_flag & FCF_DISCOVERY) || |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 9b8333456465..96ed3ba6ba95 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -880,6 +880,24 @@ struct RLS_RSP { /* Structure is in Big Endian format */ | |||
880 | uint32_t crcCnt; | 880 | uint32_t crcCnt; |
881 | }; | 881 | }; |
882 | 882 | ||
883 | struct RRQ { /* Structure is in Big Endian format */ | ||
884 | uint32_t rrq; | ||
885 | #define rrq_rsvd_SHIFT 24 | ||
886 | #define rrq_rsvd_MASK 0x000000ff | ||
887 | #define rrq_rsvd_WORD rrq | ||
888 | #define rrq_did_SHIFT 0 | ||
889 | #define rrq_did_MASK 0x00ffffff | ||
890 | #define rrq_did_WORD rrq | ||
891 | uint32_t rrq_exchg; | ||
892 | #define rrq_oxid_SHIFT 16 | ||
893 | #define rrq_oxid_MASK 0xffff | ||
894 | #define rrq_oxid_WORD rrq_exchg | ||
895 | #define rrq_rxid_SHIFT 0 | ||
896 | #define rrq_rxid_MASK 0xffff | ||
897 | #define rrq_rxid_WORD rrq_exchg | ||
898 | }; | ||
899 | |||
900 | |||
883 | struct RTV_RSP { /* Structure is in Big Endian format */ | 901 | struct RTV_RSP { /* Structure is in Big Endian format */ |
884 | uint32_t ratov; | 902 | uint32_t ratov; |
885 | uint32_t edtov; | 903 | uint32_t edtov; |
@@ -1172,7 +1190,10 @@ typedef struct { | |||
1172 | #define PCI_VENDOR_ID_EMULEX 0x10df | 1190 | #define PCI_VENDOR_ID_EMULEX 0x10df |
1173 | #define PCI_DEVICE_ID_FIREFLY 0x1ae5 | 1191 | #define PCI_DEVICE_ID_FIREFLY 0x1ae5 |
1174 | #define PCI_DEVICE_ID_PROTEUS_VF 0xe100 | 1192 | #define PCI_DEVICE_ID_PROTEUS_VF 0xe100 |
1193 | #define PCI_DEVICE_ID_BALIUS 0xe131 | ||
1175 | #define PCI_DEVICE_ID_PROTEUS_PF 0xe180 | 1194 | #define PCI_DEVICE_ID_PROTEUS_PF 0xe180 |
1195 | #define PCI_DEVICE_ID_LANCER_FC 0xe200 | ||
1196 | #define PCI_DEVICE_ID_LANCER_FCOE 0xe260 | ||
1176 | #define PCI_DEVICE_ID_SAT_SMB 0xf011 | 1197 | #define PCI_DEVICE_ID_SAT_SMB 0xf011 |
1177 | #define PCI_DEVICE_ID_SAT_MID 0xf015 | 1198 | #define PCI_DEVICE_ID_SAT_MID 0xf015 |
1178 | #define PCI_DEVICE_ID_RFLY 0xf095 | 1199 | #define PCI_DEVICE_ID_RFLY 0xf095 |
@@ -1189,6 +1210,7 @@ typedef struct { | |||
1189 | #define PCI_DEVICE_ID_SAT 0xf100 | 1210 | #define PCI_DEVICE_ID_SAT 0xf100 |
1190 | #define PCI_DEVICE_ID_SAT_SCSP 0xf111 | 1211 | #define PCI_DEVICE_ID_SAT_SCSP 0xf111 |
1191 | #define PCI_DEVICE_ID_SAT_DCSP 0xf112 | 1212 | #define PCI_DEVICE_ID_SAT_DCSP 0xf112 |
1213 | #define PCI_DEVICE_ID_FALCON 0xf180 | ||
1192 | #define PCI_DEVICE_ID_SUPERFLY 0xf700 | 1214 | #define PCI_DEVICE_ID_SUPERFLY 0xf700 |
1193 | #define PCI_DEVICE_ID_DRAGONFLY 0xf800 | 1215 | #define PCI_DEVICE_ID_DRAGONFLY 0xf800 |
1194 | #define PCI_DEVICE_ID_CENTAUR 0xf900 | 1216 | #define PCI_DEVICE_ID_CENTAUR 0xf900 |
@@ -1210,8 +1232,6 @@ typedef struct { | |||
1210 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 | 1232 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 |
1211 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 | 1233 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 |
1212 | #define PCI_DEVICE_ID_TOMCAT 0x0714 | 1234 | #define PCI_DEVICE_ID_TOMCAT 0x0714 |
1213 | #define PCI_DEVICE_ID_FALCON 0xf180 | ||
1214 | #define PCI_DEVICE_ID_BALIUS 0xe131 | ||
1215 | 1235 | ||
1216 | #define JEDEC_ID_ADDRESS 0x0080001c | 1236 | #define JEDEC_ID_ADDRESS 0x0080001c |
1217 | #define FIREFLY_JEDEC_ID 0x1ACC | 1237 | #define FIREFLY_JEDEC_ID 0x1ACC |
@@ -1368,7 +1388,6 @@ typedef struct { /* FireFly BIU registers */ | |||
1368 | #define MBX_READ_LNK_STAT 0x12 | 1388 | #define MBX_READ_LNK_STAT 0x12 |
1369 | #define MBX_REG_LOGIN 0x13 | 1389 | #define MBX_REG_LOGIN 0x13 |
1370 | #define MBX_UNREG_LOGIN 0x14 | 1390 | #define MBX_UNREG_LOGIN 0x14 |
1371 | #define MBX_READ_LA 0x15 | ||
1372 | #define MBX_CLEAR_LA 0x16 | 1391 | #define MBX_CLEAR_LA 0x16 |
1373 | #define MBX_DUMP_MEMORY 0x17 | 1392 | #define MBX_DUMP_MEMORY 0x17 |
1374 | #define MBX_DUMP_CONTEXT 0x18 | 1393 | #define MBX_DUMP_CONTEXT 0x18 |
@@ -1402,7 +1421,7 @@ typedef struct { /* FireFly BIU registers */ | |||
1402 | #define MBX_READ_SPARM64 0x8D | 1421 | #define MBX_READ_SPARM64 0x8D |
1403 | #define MBX_READ_RPI64 0x8F | 1422 | #define MBX_READ_RPI64 0x8F |
1404 | #define MBX_REG_LOGIN64 0x93 | 1423 | #define MBX_REG_LOGIN64 0x93 |
1405 | #define MBX_READ_LA64 0x95 | 1424 | #define MBX_READ_TOPOLOGY 0x95 |
1406 | #define MBX_REG_VPI 0x96 | 1425 | #define MBX_REG_VPI 0x96 |
1407 | #define MBX_UNREG_VPI 0x97 | 1426 | #define MBX_UNREG_VPI 0x97 |
1408 | 1427 | ||
@@ -1823,12 +1842,13 @@ typedef struct { | |||
1823 | #define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ | 1842 | #define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ |
1824 | 1843 | ||
1825 | uint32_t link_speed; | 1844 | uint32_t link_speed; |
1826 | #define LINK_SPEED_AUTO 0 /* Auto selection */ | 1845 | #define LINK_SPEED_AUTO 0x0 /* Auto selection */ |
1827 | #define LINK_SPEED_1G 1 /* 1 Gigabaud */ | 1846 | #define LINK_SPEED_1G 0x1 /* 1 Gigabaud */ |
1828 | #define LINK_SPEED_2G 2 /* 2 Gigabaud */ | 1847 | #define LINK_SPEED_2G 0x2 /* 2 Gigabaud */ |
1829 | #define LINK_SPEED_4G 4 /* 4 Gigabaud */ | 1848 | #define LINK_SPEED_4G 0x4 /* 4 Gigabaud */ |
1830 | #define LINK_SPEED_8G 8 /* 8 Gigabaud */ | 1849 | #define LINK_SPEED_8G 0x8 /* 8 Gigabaud */ |
1831 | #define LINK_SPEED_10G 16 /* 10 Gigabaud */ | 1850 | #define LINK_SPEED_10G 0x10 /* 10 Gigabaud */ |
1851 | #define LINK_SPEED_16G 0x11 /* 16 Gigabaud */ | ||
1832 | 1852 | ||
1833 | } INIT_LINK_VAR; | 1853 | } INIT_LINK_VAR; |
1834 | 1854 | ||
@@ -1999,6 +2019,7 @@ typedef struct { | |||
1999 | #define LMT_4Gb 0x040 | 2019 | #define LMT_4Gb 0x040 |
2000 | #define LMT_8Gb 0x080 | 2020 | #define LMT_8Gb 0x080 |
2001 | #define LMT_10Gb 0x100 | 2021 | #define LMT_10Gb 0x100 |
2022 | #define LMT_16Gb 0x200 | ||
2002 | uint32_t rsvd2; | 2023 | uint32_t rsvd2; |
2003 | uint32_t rsvd3; | 2024 | uint32_t rsvd3; |
2004 | uint32_t max_xri; | 2025 | uint32_t max_xri; |
@@ -2394,100 +2415,93 @@ typedef struct { | |||
2394 | #endif | 2415 | #endif |
2395 | } UNREG_D_ID_VAR; | 2416 | } UNREG_D_ID_VAR; |
2396 | 2417 | ||
2397 | /* Structure for MB Command READ_LA (21) */ | 2418 | /* Structure for MB Command READ_TOPOLOGY (0x95) */ |
2398 | /* Structure for MB Command READ_LA64 (0x95) */ | 2419 | struct lpfc_mbx_read_top { |
2399 | |||
2400 | typedef struct { | ||
2401 | uint32_t eventTag; /* Event tag */ | 2420 | uint32_t eventTag; /* Event tag */ |
2402 | #ifdef __BIG_ENDIAN_BITFIELD | 2421 | uint32_t word2; |
2403 | uint32_t rsvd1:19; | 2422 | #define lpfc_mbx_read_top_fa_SHIFT 12 |
2404 | uint32_t fa:1; | 2423 | #define lpfc_mbx_read_top_fa_MASK 0x00000001 |
2405 | uint32_t mm:1; /* Menlo Maintenance mode enabled */ | 2424 | #define lpfc_mbx_read_top_fa_WORD word2 |
2406 | uint32_t rx:1; | 2425 | #define lpfc_mbx_read_top_mm_SHIFT 11 |
2407 | uint32_t pb:1; | 2426 | #define lpfc_mbx_read_top_mm_MASK 0x00000001 |
2408 | uint32_t il:1; | 2427 | #define lpfc_mbx_read_top_mm_WORD word2 |
2409 | uint32_t attType:8; | 2428 | #define lpfc_mbx_read_top_pb_SHIFT 9 |
2410 | #else /* __LITTLE_ENDIAN_BITFIELD */ | 2429 | #define lpfc_mbx_read_top_pb_MASK 0X00000001 |
2411 | uint32_t attType:8; | 2430 | #define lpfc_mbx_read_top_pb_WORD word2 |
2412 | uint32_t il:1; | 2431 | #define lpfc_mbx_read_top_il_SHIFT 8 |
2413 | uint32_t pb:1; | 2432 | #define lpfc_mbx_read_top_il_MASK 0x00000001 |
2414 | uint32_t rx:1; | 2433 | #define lpfc_mbx_read_top_il_WORD word2 |
2415 | uint32_t mm:1; | 2434 | #define lpfc_mbx_read_top_att_type_SHIFT 0 |
2416 | uint32_t fa:1; | 2435 | #define lpfc_mbx_read_top_att_type_MASK 0x000000FF |
2417 | uint32_t rsvd1:19; | 2436 | #define lpfc_mbx_read_top_att_type_WORD word2 |
2418 | #endif | 2437 | #define LPFC_ATT_RESERVED 0x00 /* Reserved - attType */ |
2419 | 2438 | #define LPFC_ATT_LINK_UP 0x01 /* Link is up */ | |
2420 | #define AT_RESERVED 0x00 /* Reserved - attType */ | 2439 | #define LPFC_ATT_LINK_DOWN 0x02 /* Link is down */ |
2421 | #define AT_LINK_UP 0x01 /* Link is up */ | 2440 | uint32_t word3; |
2422 | #define AT_LINK_DOWN 0x02 /* Link is down */ | 2441 | #define lpfc_mbx_read_top_alpa_granted_SHIFT 24 |
2423 | 2442 | #define lpfc_mbx_read_top_alpa_granted_MASK 0x000000FF | |
2424 | #ifdef __BIG_ENDIAN_BITFIELD | 2443 | #define lpfc_mbx_read_top_alpa_granted_WORD word3 |
2425 | uint8_t granted_AL_PA; | 2444 | #define lpfc_mbx_read_top_lip_alps_SHIFT 16 |
2426 | uint8_t lipAlPs; | 2445 | #define lpfc_mbx_read_top_lip_alps_MASK 0x000000FF |
2427 | uint8_t lipType; | 2446 | #define lpfc_mbx_read_top_lip_alps_WORD word3 |
2428 | uint8_t topology; | 2447 | #define lpfc_mbx_read_top_lip_type_SHIFT 8 |
2429 | #else /* __LITTLE_ENDIAN_BITFIELD */ | 2448 | #define lpfc_mbx_read_top_lip_type_MASK 0x000000FF |
2430 | uint8_t topology; | 2449 | #define lpfc_mbx_read_top_lip_type_WORD word3 |
2431 | uint8_t lipType; | 2450 | #define lpfc_mbx_read_top_topology_SHIFT 0 |
2432 | uint8_t lipAlPs; | 2451 | #define lpfc_mbx_read_top_topology_MASK 0x000000FF |
2433 | uint8_t granted_AL_PA; | 2452 | #define lpfc_mbx_read_top_topology_WORD word3 |
2434 | #endif | 2453 | #define LPFC_TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ |
2435 | 2454 | #define LPFC_TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ | |
2436 | #define TOPOLOGY_PT_PT 0x01 /* Topology is pt-pt / pt-fabric */ | 2455 | #define LPFC_TOPOLOGY_MM 0x05 /* maint mode zephtr to menlo */ |
2437 | #define TOPOLOGY_LOOP 0x02 /* Topology is FC-AL */ | 2456 | /* store the LILP AL_PA position map into */ |
2438 | #define TOPOLOGY_LNK_MENLO_MAINTENANCE 0x05 /* maint mode zephtr to menlo */ | 2457 | struct ulp_bde64 lilpBde64; |
2439 | 2458 | #define LPFC_ALPA_MAP_SIZE 128 | |
2440 | union { | 2459 | uint32_t word7; |
2441 | struct ulp_bde lilpBde; /* This BDE points to a 128 byte buffer | 2460 | #define lpfc_mbx_read_top_ld_lu_SHIFT 31 |
2442 | to */ | 2461 | #define lpfc_mbx_read_top_ld_lu_MASK 0x00000001 |
2443 | /* store the LILP AL_PA position map into */ | 2462 | #define lpfc_mbx_read_top_ld_lu_WORD word7 |
2444 | struct ulp_bde64 lilpBde64; | 2463 | #define lpfc_mbx_read_top_ld_tf_SHIFT 30 |
2445 | } un; | 2464 | #define lpfc_mbx_read_top_ld_tf_MASK 0x00000001 |
2446 | 2465 | #define lpfc_mbx_read_top_ld_tf_WORD word7 | |
2447 | #ifdef __BIG_ENDIAN_BITFIELD | 2466 | #define lpfc_mbx_read_top_ld_link_spd_SHIFT 8 |
2448 | uint32_t Dlu:1; | 2467 | #define lpfc_mbx_read_top_ld_link_spd_MASK 0x000000FF |
2449 | uint32_t Dtf:1; | 2468 | #define lpfc_mbx_read_top_ld_link_spd_WORD word7 |
2450 | uint32_t Drsvd2:14; | 2469 | #define lpfc_mbx_read_top_ld_nl_port_SHIFT 4 |
2451 | uint32_t DlnkSpeed:8; | 2470 | #define lpfc_mbx_read_top_ld_nl_port_MASK 0x0000000F |
2452 | uint32_t DnlPort:4; | 2471 | #define lpfc_mbx_read_top_ld_nl_port_WORD word7 |
2453 | uint32_t Dtx:2; | 2472 | #define lpfc_mbx_read_top_ld_tx_SHIFT 2 |
2454 | uint32_t Drx:2; | 2473 | #define lpfc_mbx_read_top_ld_tx_MASK 0x00000003 |
2455 | #else /* __LITTLE_ENDIAN_BITFIELD */ | 2474 | #define lpfc_mbx_read_top_ld_tx_WORD word7 |
2456 | uint32_t Drx:2; | 2475 | #define lpfc_mbx_read_top_ld_rx_SHIFT 0 |
2457 | uint32_t Dtx:2; | 2476 | #define lpfc_mbx_read_top_ld_rx_MASK 0x00000003 |
2458 | uint32_t DnlPort:4; | 2477 | #define lpfc_mbx_read_top_ld_rx_WORD word7 |
2459 | uint32_t DlnkSpeed:8; | 2478 | uint32_t word8; |
2460 | uint32_t Drsvd2:14; | 2479 | #define lpfc_mbx_read_top_lu_SHIFT 31 |
2461 | uint32_t Dtf:1; | 2480 | #define lpfc_mbx_read_top_lu_MASK 0x00000001 |
2462 | uint32_t Dlu:1; | 2481 | #define lpfc_mbx_read_top_lu_WORD word8 |
2463 | #endif | 2482 | #define lpfc_mbx_read_top_tf_SHIFT 30 |
2464 | 2483 | #define lpfc_mbx_read_top_tf_MASK 0x00000001 | |
2465 | #ifdef __BIG_ENDIAN_BITFIELD | 2484 | #define lpfc_mbx_read_top_tf_WORD word8 |
2466 | uint32_t Ulu:1; | 2485 | #define lpfc_mbx_read_top_link_spd_SHIFT 8 |
2467 | uint32_t Utf:1; | 2486 | #define lpfc_mbx_read_top_link_spd_MASK 0x000000FF |
2468 | uint32_t Ursvd2:14; | 2487 | #define lpfc_mbx_read_top_link_spd_WORD word8 |
2469 | uint32_t UlnkSpeed:8; | 2488 | #define lpfc_mbx_read_top_nl_port_SHIFT 4 |
2470 | uint32_t UnlPort:4; | 2489 | #define lpfc_mbx_read_top_nl_port_MASK 0x0000000F |
2471 | uint32_t Utx:2; | 2490 | #define lpfc_mbx_read_top_nl_port_WORD word8 |
2472 | uint32_t Urx:2; | 2491 | #define lpfc_mbx_read_top_tx_SHIFT 2 |
2473 | #else /* __LITTLE_ENDIAN_BITFIELD */ | 2492 | #define lpfc_mbx_read_top_tx_MASK 0x00000003 |
2474 | uint32_t Urx:2; | 2493 | #define lpfc_mbx_read_top_tx_WORD word8 |
2475 | uint32_t Utx:2; | 2494 | #define lpfc_mbx_read_top_rx_SHIFT 0 |
2476 | uint32_t UnlPort:4; | 2495 | #define lpfc_mbx_read_top_rx_MASK 0x00000003 |
2477 | uint32_t UlnkSpeed:8; | 2496 | #define lpfc_mbx_read_top_rx_WORD word8 |
2478 | uint32_t Ursvd2:14; | 2497 | #define LPFC_LINK_SPEED_UNKNOWN 0x0 |
2479 | uint32_t Utf:1; | 2498 | #define LPFC_LINK_SPEED_1GHZ 0x04 |
2480 | uint32_t Ulu:1; | 2499 | #define LPFC_LINK_SPEED_2GHZ 0x08 |
2481 | #endif | 2500 | #define LPFC_LINK_SPEED_4GHZ 0x10 |
2482 | 2501 | #define LPFC_LINK_SPEED_8GHZ 0x20 | |
2483 | #define LA_UNKNW_LINK 0x0 /* lnkSpeed */ | 2502 | #define LPFC_LINK_SPEED_10GHZ 0x40 |
2484 | #define LA_1GHZ_LINK 0x04 /* lnkSpeed */ | 2503 | #define LPFC_LINK_SPEED_16GHZ 0x80 |
2485 | #define LA_2GHZ_LINK 0x08 /* lnkSpeed */ | 2504 | }; |
2486 | #define LA_4GHZ_LINK 0x10 /* lnkSpeed */ | ||
2487 | #define LA_8GHZ_LINK 0x20 /* lnkSpeed */ | ||
2488 | #define LA_10GHZ_LINK 0x40 /* lnkSpeed */ | ||
2489 | |||
2490 | } READ_LA_VAR; | ||
2491 | 2505 | ||
2492 | /* Structure for MB Command CLEAR_LA (22) */ | 2506 | /* Structure for MB Command CLEAR_LA (22) */ |
2493 | 2507 | ||
@@ -3016,7 +3030,6 @@ typedef union { | |||
3016 | READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ | 3030 | READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ |
3017 | REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ | 3031 | REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ |
3018 | UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ | 3032 | UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ |
3019 | READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */ | ||
3020 | CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ | 3033 | CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ |
3021 | DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ | 3034 | DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ |
3022 | UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ | 3035 | UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ |
@@ -3026,6 +3039,7 @@ typedef union { | |||
3026 | struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ | 3039 | struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */ |
3027 | struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/ | 3040 | struct update_cfg_var varUpdateCfg; /* cmd = 0x1B (UPDATE_CFG)*/ |
3028 | CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ | 3041 | CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ |
3042 | struct lpfc_mbx_read_top varReadTop; /* cmd = 0x95 (READ_TOPOLOGY) */ | ||
3029 | REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ | 3043 | REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ |
3030 | UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ | 3044 | UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ |
3031 | ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ | 3045 | ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 6e4bc34e1d0d..94c1aa1136de 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -64,29 +64,39 @@ struct lpfc_sli_intf { | |||
64 | #define lpfc_sli_intf_valid_MASK 0x00000007 | 64 | #define lpfc_sli_intf_valid_MASK 0x00000007 |
65 | #define lpfc_sli_intf_valid_WORD word0 | 65 | #define lpfc_sli_intf_valid_WORD word0 |
66 | #define LPFC_SLI_INTF_VALID 6 | 66 | #define LPFC_SLI_INTF_VALID 6 |
67 | #define lpfc_sli_intf_featurelevel2_SHIFT 24 | 67 | #define lpfc_sli_intf_sli_hint2_SHIFT 24 |
68 | #define lpfc_sli_intf_featurelevel2_MASK 0x0000001F | 68 | #define lpfc_sli_intf_sli_hint2_MASK 0x0000001F |
69 | #define lpfc_sli_intf_featurelevel2_WORD word0 | 69 | #define lpfc_sli_intf_sli_hint2_WORD word0 |
70 | #define lpfc_sli_intf_featurelevel1_SHIFT 16 | 70 | #define LPFC_SLI_INTF_SLI_HINT2_NONE 0 |
71 | #define lpfc_sli_intf_featurelevel1_MASK 0x000000FF | 71 | #define lpfc_sli_intf_sli_hint1_SHIFT 16 |
72 | #define lpfc_sli_intf_featurelevel1_WORD word0 | 72 | #define lpfc_sli_intf_sli_hint1_MASK 0x000000FF |
73 | #define LPFC_SLI_INTF_FEATURELEVEL1_1 1 | 73 | #define lpfc_sli_intf_sli_hint1_WORD word0 |
74 | #define LPFC_SLI_INTF_FEATURELEVEL1_2 2 | 74 | #define LPFC_SLI_INTF_SLI_HINT1_NONE 0 |
75 | #define LPFC_SLI_INTF_SLI_HINT1_1 1 | ||
76 | #define LPFC_SLI_INTF_SLI_HINT1_2 2 | ||
77 | #define lpfc_sli_intf_if_type_SHIFT 12 | ||
78 | #define lpfc_sli_intf_if_type_MASK 0x0000000F | ||
79 | #define lpfc_sli_intf_if_type_WORD word0 | ||
80 | #define LPFC_SLI_INTF_IF_TYPE_0 0 | ||
81 | #define LPFC_SLI_INTF_IF_TYPE_1 1 | ||
82 | #define LPFC_SLI_INTF_IF_TYPE_2 2 | ||
75 | #define lpfc_sli_intf_sli_family_SHIFT 8 | 83 | #define lpfc_sli_intf_sli_family_SHIFT 8 |
76 | #define lpfc_sli_intf_sli_family_MASK 0x000000FF | 84 | #define lpfc_sli_intf_sli_family_MASK 0x0000000F |
77 | #define lpfc_sli_intf_sli_family_WORD word0 | 85 | #define lpfc_sli_intf_sli_family_WORD word0 |
78 | #define LPFC_SLI_INTF_FAMILY_BE2 0 | 86 | #define LPFC_SLI_INTF_FAMILY_BE2 0x0 |
79 | #define LPFC_SLI_INTF_FAMILY_BE3 1 | 87 | #define LPFC_SLI_INTF_FAMILY_BE3 0x1 |
88 | #define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa | ||
89 | #define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb | ||
80 | #define lpfc_sli_intf_slirev_SHIFT 4 | 90 | #define lpfc_sli_intf_slirev_SHIFT 4 |
81 | #define lpfc_sli_intf_slirev_MASK 0x0000000F | 91 | #define lpfc_sli_intf_slirev_MASK 0x0000000F |
82 | #define lpfc_sli_intf_slirev_WORD word0 | 92 | #define lpfc_sli_intf_slirev_WORD word0 |
83 | #define LPFC_SLI_INTF_REV_SLI3 3 | 93 | #define LPFC_SLI_INTF_REV_SLI3 3 |
84 | #define LPFC_SLI_INTF_REV_SLI4 4 | 94 | #define LPFC_SLI_INTF_REV_SLI4 4 |
85 | #define lpfc_sli_intf_if_type_SHIFT 0 | 95 | #define lpfc_sli_intf_func_type_SHIFT 0 |
86 | #define lpfc_sli_intf_if_type_MASK 0x00000007 | 96 | #define lpfc_sli_intf_func_type_MASK 0x00000001 |
87 | #define lpfc_sli_intf_if_type_WORD word0 | 97 | #define lpfc_sli_intf_func_type_WORD word0 |
88 | #define LPFC_SLI_INTF_IF_TYPE_0 0 | 98 | #define LPFC_SLI_INTF_IF_TYPE_PHYS 0 |
89 | #define LPFC_SLI_INTF_IF_TYPE_1 1 | 99 | #define LPFC_SLI_INTF_IF_TYPE_VIRT 1 |
90 | }; | 100 | }; |
91 | 101 | ||
92 | #define LPFC_SLI4_MBX_EMBED true | 102 | #define LPFC_SLI4_MBX_EMBED true |
@@ -450,35 +460,40 @@ struct lpfc_register { | |||
450 | uint32_t word0; | 460 | uint32_t word0; |
451 | }; | 461 | }; |
452 | 462 | ||
463 | /* The following BAR0 Registers apply to SLI4 if_type 0 UCNAs. */ | ||
453 | #define LPFC_UERR_STATUS_HI 0x00A4 | 464 | #define LPFC_UERR_STATUS_HI 0x00A4 |
454 | #define LPFC_UERR_STATUS_LO 0x00A0 | 465 | #define LPFC_UERR_STATUS_LO 0x00A0 |
455 | #define LPFC_UE_MASK_HI 0x00AC | 466 | #define LPFC_UE_MASK_HI 0x00AC |
456 | #define LPFC_UE_MASK_LO 0x00A8 | 467 | #define LPFC_UE_MASK_LO 0x00A8 |
468 | |||
469 | /* The following BAR0 register sets are defined for if_type 0 and 2 UCNAs. */ | ||
457 | #define LPFC_SLI_INTF 0x0058 | 470 | #define LPFC_SLI_INTF 0x0058 |
458 | 471 | ||
459 | /* BAR0 Registers */ | 472 | #define LPFC_SLIPORT_IF2_SMPHR 0x0400 |
460 | #define LPFC_HST_STATE 0x00AC | 473 | #define lpfc_port_smphr_perr_SHIFT 31 |
461 | #define lpfc_hst_state_perr_SHIFT 31 | 474 | #define lpfc_port_smphr_perr_MASK 0x1 |
462 | #define lpfc_hst_state_perr_MASK 0x1 | 475 | #define lpfc_port_smphr_perr_WORD word0 |
463 | #define lpfc_hst_state_perr_WORD word0 | 476 | #define lpfc_port_smphr_sfi_SHIFT 30 |
464 | #define lpfc_hst_state_sfi_SHIFT 30 | 477 | #define lpfc_port_smphr_sfi_MASK 0x1 |
465 | #define lpfc_hst_state_sfi_MASK 0x1 | 478 | #define lpfc_port_smphr_sfi_WORD word0 |
466 | #define lpfc_hst_state_sfi_WORD word0 | 479 | #define lpfc_port_smphr_nip_SHIFT 29 |
467 | #define lpfc_hst_state_nip_SHIFT 29 | 480 | #define lpfc_port_smphr_nip_MASK 0x1 |
468 | #define lpfc_hst_state_nip_MASK 0x1 | 481 | #define lpfc_port_smphr_nip_WORD word0 |
469 | #define lpfc_hst_state_nip_WORD word0 | 482 | #define lpfc_port_smphr_ipc_SHIFT 28 |
470 | #define lpfc_hst_state_ipc_SHIFT 28 | 483 | #define lpfc_port_smphr_ipc_MASK 0x1 |
471 | #define lpfc_hst_state_ipc_MASK 0x1 | 484 | #define lpfc_port_smphr_ipc_WORD word0 |
472 | #define lpfc_hst_state_ipc_WORD word0 | 485 | #define lpfc_port_smphr_scr1_SHIFT 27 |
473 | #define lpfc_hst_state_xrom_SHIFT 27 | 486 | #define lpfc_port_smphr_scr1_MASK 0x1 |
474 | #define lpfc_hst_state_xrom_MASK 0x1 | 487 | #define lpfc_port_smphr_scr1_WORD word0 |
475 | #define lpfc_hst_state_xrom_WORD word0 | 488 | #define lpfc_port_smphr_scr2_SHIFT 26 |
476 | #define lpfc_hst_state_dl_SHIFT 26 | 489 | #define lpfc_port_smphr_scr2_MASK 0x1 |
477 | #define lpfc_hst_state_dl_MASK 0x1 | 490 | #define lpfc_port_smphr_scr2_WORD word0 |
478 | #define lpfc_hst_state_dl_WORD word0 | 491 | #define lpfc_port_smphr_host_scratch_SHIFT 16 |
479 | #define lpfc_hst_state_port_status_SHIFT 0 | 492 | #define lpfc_port_smphr_host_scratch_MASK 0xFF |
480 | #define lpfc_hst_state_port_status_MASK 0xFFFF | 493 | #define lpfc_port_smphr_host_scratch_WORD word0 |
481 | #define lpfc_hst_state_port_status_WORD word0 | 494 | #define lpfc_port_smphr_port_status_SHIFT 0 |
495 | #define lpfc_port_smphr_port_status_MASK 0xFFFF | ||
496 | #define lpfc_port_smphr_port_status_WORD word0 | ||
482 | 497 | ||
483 | #define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 | 498 | #define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 |
484 | #define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 | 499 | #define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 |
@@ -511,10 +526,46 @@ struct lpfc_register { | |||
511 | #define LPFC_POST_STAGE_RC_DONE 0x0B07 | 526 | #define LPFC_POST_STAGE_RC_DONE 0x0B07 |
512 | #define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 | 527 | #define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 |
513 | #define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 | 528 | #define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 |
514 | #define LPFC_POST_STAGE_ARMFW_READY 0xC000 | 529 | #define LPFC_POST_STAGE_PORT_READY 0xC000 |
515 | #define LPFC_POST_STAGE_ARMFW_UE 0xF000 | 530 | #define LPFC_POST_STAGE_PORT_UE 0xF000 |
531 | |||
532 | #define LPFC_SLIPORT_STATUS 0x0404 | ||
533 | #define lpfc_sliport_status_err_SHIFT 31 | ||
534 | #define lpfc_sliport_status_err_MASK 0x1 | ||
535 | #define lpfc_sliport_status_err_WORD word0 | ||
536 | #define lpfc_sliport_status_end_SHIFT 30 | ||
537 | #define lpfc_sliport_status_end_MASK 0x1 | ||
538 | #define lpfc_sliport_status_end_WORD word0 | ||
539 | #define lpfc_sliport_status_oti_SHIFT 29 | ||
540 | #define lpfc_sliport_status_oti_MASK 0x1 | ||
541 | #define lpfc_sliport_status_oti_WORD word0 | ||
542 | #define lpfc_sliport_status_rn_SHIFT 24 | ||
543 | #define lpfc_sliport_status_rn_MASK 0x1 | ||
544 | #define lpfc_sliport_status_rn_WORD word0 | ||
545 | #define lpfc_sliport_status_rdy_SHIFT 23 | ||
546 | #define lpfc_sliport_status_rdy_MASK 0x1 | ||
547 | #define lpfc_sliport_status_rdy_WORD word0 | ||
548 | #define MAX_IF_TYPE_2_RESETS 1000 | ||
549 | |||
550 | #define LPFC_SLIPORT_CNTRL 0x0408 | ||
551 | #define lpfc_sliport_ctrl_end_SHIFT 30 | ||
552 | #define lpfc_sliport_ctrl_end_MASK 0x1 | ||
553 | #define lpfc_sliport_ctrl_end_WORD word0 | ||
554 | #define LPFC_SLIPORT_LITTLE_ENDIAN 0 | ||
555 | #define LPFC_SLIPORT_BIG_ENDIAN 1 | ||
556 | #define lpfc_sliport_ctrl_ip_SHIFT 27 | ||
557 | #define lpfc_sliport_ctrl_ip_MASK 0x1 | ||
558 | #define lpfc_sliport_ctrl_ip_WORD word0 | ||
559 | #define LPFC_SLIPORT_INIT_PORT 1 | ||
560 | |||
561 | #define LPFC_SLIPORT_ERR_1 0x040C | ||
562 | #define LPFC_SLIPORT_ERR_2 0x0410 | ||
563 | |||
564 | /* The following Registers apply to SLI4 if_type 0 UCNAs. They typically | ||
565 | * reside in BAR 2. | ||
566 | */ | ||
567 | #define LPFC_SLIPORT_IF0_SMPHR 0x00AC | ||
516 | 568 | ||
517 | /* BAR1 Registers */ | ||
518 | #define LPFC_IMR_MASK_ALL 0xFFFFFFFF | 569 | #define LPFC_IMR_MASK_ALL 0xFFFFFFFF |
519 | #define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF | 570 | #define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF |
520 | 571 | ||
@@ -569,14 +620,21 @@ struct lpfc_register { | |||
569 | #define LPFC_SLI4_INTR30 BIT30 | 620 | #define LPFC_SLI4_INTR30 BIT30 |
570 | #define LPFC_SLI4_INTR31 BIT31 | 621 | #define LPFC_SLI4_INTR31 BIT31 |
571 | 622 | ||
572 | /* BAR2 Registers */ | 623 | /* |
624 | * The Doorbell registers defined here exist in different BAR | ||
625 | * register sets depending on the UCNA Port's reported if_type | ||
626 | * value. For UCNA ports running SLI4 and if_type 0, they reside in | ||
627 | * BAR4. For UCNA ports running SLI4 and if_type 2, they reside in | ||
628 | * BAR0. The offsets are the same so the driver must account for | ||
629 | * any base address difference. | ||
630 | */ | ||
573 | #define LPFC_RQ_DOORBELL 0x00A0 | 631 | #define LPFC_RQ_DOORBELL 0x00A0 |
574 | #define lpfc_rq_doorbell_num_posted_SHIFT 16 | 632 | #define lpfc_rq_doorbell_num_posted_SHIFT 16 |
575 | #define lpfc_rq_doorbell_num_posted_MASK 0x3FFF | 633 | #define lpfc_rq_doorbell_num_posted_MASK 0x3FFF |
576 | #define lpfc_rq_doorbell_num_posted_WORD word0 | 634 | #define lpfc_rq_doorbell_num_posted_WORD word0 |
577 | #define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ | 635 | #define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ |
578 | #define lpfc_rq_doorbell_id_SHIFT 0 | 636 | #define lpfc_rq_doorbell_id_SHIFT 0 |
579 | #define lpfc_rq_doorbell_id_MASK 0x03FF | 637 | #define lpfc_rq_doorbell_id_MASK 0xFFFF |
580 | #define lpfc_rq_doorbell_id_WORD word0 | 638 | #define lpfc_rq_doorbell_id_WORD word0 |
581 | 639 | ||
582 | #define LPFC_WQ_DOORBELL 0x0040 | 640 | #define LPFC_WQ_DOORBELL 0x0040 |
@@ -591,6 +649,11 @@ struct lpfc_register { | |||
591 | #define lpfc_wq_doorbell_id_WORD word0 | 649 | #define lpfc_wq_doorbell_id_WORD word0 |
592 | 650 | ||
593 | #define LPFC_EQCQ_DOORBELL 0x0120 | 651 | #define LPFC_EQCQ_DOORBELL 0x0120 |
652 | #define lpfc_eqcq_doorbell_se_SHIFT 31 | ||
653 | #define lpfc_eqcq_doorbell_se_MASK 0x0001 | ||
654 | #define lpfc_eqcq_doorbell_se_WORD word0 | ||
655 | #define LPFC_EQCQ_SOLICIT_ENABLE_OFF 0 | ||
656 | #define LPFC_EQCQ_SOLICIT_ENABLE_ON 1 | ||
594 | #define lpfc_eqcq_doorbell_arm_SHIFT 29 | 657 | #define lpfc_eqcq_doorbell_arm_SHIFT 29 |
595 | #define lpfc_eqcq_doorbell_arm_MASK 0x0001 | 658 | #define lpfc_eqcq_doorbell_arm_MASK 0x0001 |
596 | #define lpfc_eqcq_doorbell_arm_WORD word0 | 659 | #define lpfc_eqcq_doorbell_arm_WORD word0 |
@@ -628,7 +691,7 @@ struct lpfc_register { | |||
628 | #define lpfc_mq_doorbell_num_posted_MASK 0x3FFF | 691 | #define lpfc_mq_doorbell_num_posted_MASK 0x3FFF |
629 | #define lpfc_mq_doorbell_num_posted_WORD word0 | 692 | #define lpfc_mq_doorbell_num_posted_WORD word0 |
630 | #define lpfc_mq_doorbell_id_SHIFT 0 | 693 | #define lpfc_mq_doorbell_id_SHIFT 0 |
631 | #define lpfc_mq_doorbell_id_MASK 0x03FF | 694 | #define lpfc_mq_doorbell_id_MASK 0xFFFF |
632 | #define lpfc_mq_doorbell_id_WORD word0 | 695 | #define lpfc_mq_doorbell_id_WORD word0 |
633 | 696 | ||
634 | struct lpfc_sli4_cfg_mhdr { | 697 | struct lpfc_sli4_cfg_mhdr { |
@@ -1048,12 +1111,18 @@ struct lpfc_mbx_mq_create_ext { | |||
1048 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK | 1111 | #define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK |
1049 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 | 1112 | #define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 |
1050 | #define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap | 1113 | #define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap |
1051 | #define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE | 1114 | #define lpfc_mbx_mq_create_ext_async_evt_fip_SHIFT LPFC_TRAILER_CODE_FCOE |
1052 | #define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001 | 1115 | #define lpfc_mbx_mq_create_ext_async_evt_fip_MASK 0x00000001 |
1053 | #define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap | 1116 | #define lpfc_mbx_mq_create_ext_async_evt_fip_WORD async_evt_bmap |
1054 | #define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5 | 1117 | #define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5 |
1055 | #define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001 | 1118 | #define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001 |
1056 | #define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap | 1119 | #define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap |
1120 | #define lpfc_mbx_mq_create_ext_async_evt_fc_SHIFT LPFC_TRAILER_CODE_FC | ||
1121 | #define lpfc_mbx_mq_create_ext_async_evt_fc_MASK 0x00000001 | ||
1122 | #define lpfc_mbx_mq_create_ext_async_evt_fc_WORD async_evt_bmap | ||
1123 | #define lpfc_mbx_mq_create_ext_async_evt_sli_SHIFT LPFC_TRAILER_CODE_SLI | ||
1124 | #define lpfc_mbx_mq_create_ext_async_evt_sli_MASK 0x00000001 | ||
1125 | #define lpfc_mbx_mq_create_ext_async_evt_sli_WORD async_evt_bmap | ||
1057 | struct mq_context context; | 1126 | struct mq_context context; |
1058 | struct dma_address page[LPFC_MAX_MQ_PAGE]; | 1127 | struct dma_address page[LPFC_MAX_MQ_PAGE]; |
1059 | } request; | 1128 | } request; |
@@ -1307,7 +1376,7 @@ struct lpfc_mbx_query_fw_cfg { | |||
1307 | #define lpfc_function_mode_dal_WORD function_mode | 1376 | #define lpfc_function_mode_dal_WORD function_mode |
1308 | #define lpfc_function_mode_lro_SHIFT 9 | 1377 | #define lpfc_function_mode_lro_SHIFT 9 |
1309 | #define lpfc_function_mode_lro_MASK 0x00000001 | 1378 | #define lpfc_function_mode_lro_MASK 0x00000001 |
1310 | #define lpfc_function_mode_lro_WORD function_mode9 | 1379 | #define lpfc_function_mode_lro_WORD function_mode |
1311 | #define lpfc_function_mode_flex10_SHIFT 10 | 1380 | #define lpfc_function_mode_flex10_SHIFT 10 |
1312 | #define lpfc_function_mode_flex10_MASK 0x00000001 | 1381 | #define lpfc_function_mode_flex10_MASK 0x00000001 |
1313 | #define lpfc_function_mode_flex10_WORD function_mode | 1382 | #define lpfc_function_mode_flex10_WORD function_mode |
@@ -1358,10 +1427,16 @@ struct lpfc_mbx_init_vfi { | |||
1358 | #define lpfc_init_vfi_vf_SHIFT 29 | 1427 | #define lpfc_init_vfi_vf_SHIFT 29 |
1359 | #define lpfc_init_vfi_vf_MASK 0x00000001 | 1428 | #define lpfc_init_vfi_vf_MASK 0x00000001 |
1360 | #define lpfc_init_vfi_vf_WORD word1 | 1429 | #define lpfc_init_vfi_vf_WORD word1 |
1430 | #define lpfc_init_vfi_vp_SHIFT 28 | ||
1431 | #define lpfc_init_vfi_vp_MASK 0x00000001 | ||
1432 | #define lpfc_init_vfi_vp_WORD word1 | ||
1361 | #define lpfc_init_vfi_vfi_SHIFT 0 | 1433 | #define lpfc_init_vfi_vfi_SHIFT 0 |
1362 | #define lpfc_init_vfi_vfi_MASK 0x0000FFFF | 1434 | #define lpfc_init_vfi_vfi_MASK 0x0000FFFF |
1363 | #define lpfc_init_vfi_vfi_WORD word1 | 1435 | #define lpfc_init_vfi_vfi_WORD word1 |
1364 | uint32_t word2; | 1436 | uint32_t word2; |
1437 | #define lpfc_init_vfi_vpi_SHIFT 16 | ||
1438 | #define lpfc_init_vfi_vpi_MASK 0x0000FFFF | ||
1439 | #define lpfc_init_vfi_vpi_WORD word2 | ||
1365 | #define lpfc_init_vfi_fcfi_SHIFT 0 | 1440 | #define lpfc_init_vfi_fcfi_SHIFT 0 |
1366 | #define lpfc_init_vfi_fcfi_MASK 0x0000FFFF | 1441 | #define lpfc_init_vfi_fcfi_MASK 0x0000FFFF |
1367 | #define lpfc_init_vfi_fcfi_WORD word2 | 1442 | #define lpfc_init_vfi_fcfi_WORD word2 |
@@ -2069,6 +2144,8 @@ struct lpfc_mcqe { | |||
2069 | #define LPFC_TRAILER_CODE_FCOE 0x2 | 2144 | #define LPFC_TRAILER_CODE_FCOE 0x2 |
2070 | #define LPFC_TRAILER_CODE_DCBX 0x3 | 2145 | #define LPFC_TRAILER_CODE_DCBX 0x3 |
2071 | #define LPFC_TRAILER_CODE_GRP5 0x5 | 2146 | #define LPFC_TRAILER_CODE_GRP5 0x5 |
2147 | #define LPFC_TRAILER_CODE_FC 0x10 | ||
2148 | #define LPFC_TRAILER_CODE_SLI 0x11 | ||
2072 | }; | 2149 | }; |
2073 | 2150 | ||
2074 | struct lpfc_acqe_link { | 2151 | struct lpfc_acqe_link { |
@@ -2094,11 +2171,12 @@ struct lpfc_acqe_link { | |||
2094 | #define LPFC_ASYNC_LINK_STATUS_UP 0x1 | 2171 | #define LPFC_ASYNC_LINK_STATUS_UP 0x1 |
2095 | #define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 | 2172 | #define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 |
2096 | #define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 | 2173 | #define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 |
2097 | #define lpfc_acqe_link_physical_SHIFT 0 | 2174 | #define lpfc_acqe_link_type_SHIFT 6 |
2098 | #define lpfc_acqe_link_physical_MASK 0x000000FF | 2175 | #define lpfc_acqe_link_type_MASK 0x00000003 |
2099 | #define lpfc_acqe_link_physical_WORD word0 | 2176 | #define lpfc_acqe_link_type_WORD word0 |
2100 | #define LPFC_ASYNC_LINK_PORT_A 0x0 | 2177 | #define lpfc_acqe_link_number_SHIFT 0 |
2101 | #define LPFC_ASYNC_LINK_PORT_B 0x1 | 2178 | #define lpfc_acqe_link_number_MASK 0x0000003F |
2179 | #define lpfc_acqe_link_number_WORD word0 | ||
2102 | uint32_t word1; | 2180 | uint32_t word1; |
2103 | #define lpfc_acqe_link_fault_SHIFT 0 | 2181 | #define lpfc_acqe_link_fault_SHIFT 0 |
2104 | #define lpfc_acqe_link_fault_MASK 0x000000FF | 2182 | #define lpfc_acqe_link_fault_MASK 0x000000FF |
@@ -2106,29 +2184,31 @@ struct lpfc_acqe_link { | |||
2106 | #define LPFC_ASYNC_LINK_FAULT_NONE 0x0 | 2184 | #define LPFC_ASYNC_LINK_FAULT_NONE 0x0 |
2107 | #define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 | 2185 | #define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 |
2108 | #define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 | 2186 | #define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 |
2109 | #define lpfc_acqe_qos_link_speed_SHIFT 16 | 2187 | #define lpfc_acqe_logical_link_speed_SHIFT 16 |
2110 | #define lpfc_acqe_qos_link_speed_MASK 0x0000FFFF | 2188 | #define lpfc_acqe_logical_link_speed_MASK 0x0000FFFF |
2111 | #define lpfc_acqe_qos_link_speed_WORD word1 | 2189 | #define lpfc_acqe_logical_link_speed_WORD word1 |
2112 | uint32_t event_tag; | 2190 | uint32_t event_tag; |
2113 | uint32_t trailer; | 2191 | uint32_t trailer; |
2192 | #define LPFC_LINK_EVENT_TYPE_PHYSICAL 0x0 | ||
2193 | #define LPFC_LINK_EVENT_TYPE_VIRTUAL 0x1 | ||
2114 | }; | 2194 | }; |
2115 | 2195 | ||
2116 | struct lpfc_acqe_fcoe { | 2196 | struct lpfc_acqe_fip { |
2117 | uint32_t index; | 2197 | uint32_t index; |
2118 | uint32_t word1; | 2198 | uint32_t word1; |
2119 | #define lpfc_acqe_fcoe_fcf_count_SHIFT 0 | 2199 | #define lpfc_acqe_fip_fcf_count_SHIFT 0 |
2120 | #define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF | 2200 | #define lpfc_acqe_fip_fcf_count_MASK 0x0000FFFF |
2121 | #define lpfc_acqe_fcoe_fcf_count_WORD word1 | 2201 | #define lpfc_acqe_fip_fcf_count_WORD word1 |
2122 | #define lpfc_acqe_fcoe_event_type_SHIFT 16 | 2202 | #define lpfc_acqe_fip_event_type_SHIFT 16 |
2123 | #define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF | 2203 | #define lpfc_acqe_fip_event_type_MASK 0x0000FFFF |
2124 | #define lpfc_acqe_fcoe_event_type_WORD word1 | 2204 | #define lpfc_acqe_fip_event_type_WORD word1 |
2125 | #define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 | ||
2126 | #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 | ||
2127 | #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 | ||
2128 | #define LPFC_FCOE_EVENT_TYPE_CVL 0x4 | ||
2129 | #define LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD 0x5 | ||
2130 | uint32_t event_tag; | 2205 | uint32_t event_tag; |
2131 | uint32_t trailer; | 2206 | uint32_t trailer; |
2207 | #define LPFC_FIP_EVENT_TYPE_NEW_FCF 0x1 | ||
2208 | #define LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL 0x2 | ||
2209 | #define LPFC_FIP_EVENT_TYPE_FCF_DEAD 0x3 | ||
2210 | #define LPFC_FIP_EVENT_TYPE_CVL 0x4 | ||
2211 | #define LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD 0x5 | ||
2132 | }; | 2212 | }; |
2133 | 2213 | ||
2134 | struct lpfc_acqe_dcbx { | 2214 | struct lpfc_acqe_dcbx { |
@@ -2140,9 +2220,12 @@ struct lpfc_acqe_dcbx { | |||
2140 | 2220 | ||
2141 | struct lpfc_acqe_grp5 { | 2221 | struct lpfc_acqe_grp5 { |
2142 | uint32_t word0; | 2222 | uint32_t word0; |
2143 | #define lpfc_acqe_grp5_pport_SHIFT 0 | 2223 | #define lpfc_acqe_grp5_type_SHIFT 6 |
2144 | #define lpfc_acqe_grp5_pport_MASK 0x000000FF | 2224 | #define lpfc_acqe_grp5_type_MASK 0x00000003 |
2145 | #define lpfc_acqe_grp5_pport_WORD word0 | 2225 | #define lpfc_acqe_grp5_type_WORD word0 |
2226 | #define lpfc_acqe_grp5_number_SHIFT 0 | ||
2227 | #define lpfc_acqe_grp5_number_MASK 0x0000003F | ||
2228 | #define lpfc_acqe_grp5_number_WORD word0 | ||
2146 | uint32_t word1; | 2229 | uint32_t word1; |
2147 | #define lpfc_acqe_grp5_llink_spd_SHIFT 16 | 2230 | #define lpfc_acqe_grp5_llink_spd_SHIFT 16 |
2148 | #define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF | 2231 | #define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF |
@@ -2151,6 +2234,68 @@ struct lpfc_acqe_grp5 { | |||
2151 | uint32_t trailer; | 2234 | uint32_t trailer; |
2152 | }; | 2235 | }; |
2153 | 2236 | ||
2237 | struct lpfc_acqe_fc_la { | ||
2238 | uint32_t word0; | ||
2239 | #define lpfc_acqe_fc_la_speed_SHIFT 24 | ||
2240 | #define lpfc_acqe_fc_la_speed_MASK 0x000000FF | ||
2241 | #define lpfc_acqe_fc_la_speed_WORD word0 | ||
2242 | #define LPFC_FC_LA_SPEED_UNKOWN 0x0 | ||
2243 | #define LPFC_FC_LA_SPEED_1G 0x1 | ||
2244 | #define LPFC_FC_LA_SPEED_2G 0x2 | ||
2245 | #define LPFC_FC_LA_SPEED_4G 0x4 | ||
2246 | #define LPFC_FC_LA_SPEED_8G 0x8 | ||
2247 | #define LPFC_FC_LA_SPEED_10G 0xA | ||
2248 | #define LPFC_FC_LA_SPEED_16G 0x10 | ||
2249 | #define lpfc_acqe_fc_la_topology_SHIFT 16 | ||
2250 | #define lpfc_acqe_fc_la_topology_MASK 0x000000FF | ||
2251 | #define lpfc_acqe_fc_la_topology_WORD word0 | ||
2252 | #define LPFC_FC_LA_TOP_UNKOWN 0x0 | ||
2253 | #define LPFC_FC_LA_TOP_P2P 0x1 | ||
2254 | #define LPFC_FC_LA_TOP_FCAL 0x2 | ||
2255 | #define LPFC_FC_LA_TOP_INTERNAL_LOOP 0x3 | ||
2256 | #define LPFC_FC_LA_TOP_SERDES_LOOP 0x4 | ||
2257 | #define lpfc_acqe_fc_la_att_type_SHIFT 8 | ||
2258 | #define lpfc_acqe_fc_la_att_type_MASK 0x000000FF | ||
2259 | #define lpfc_acqe_fc_la_att_type_WORD word0 | ||
2260 | #define LPFC_FC_LA_TYPE_LINK_UP 0x1 | ||
2261 | #define LPFC_FC_LA_TYPE_LINK_DOWN 0x2 | ||
2262 | #define LPFC_FC_LA_TYPE_NO_HARD_ALPA 0x3 | ||
2263 | #define lpfc_acqe_fc_la_port_type_SHIFT 6 | ||
2264 | #define lpfc_acqe_fc_la_port_type_MASK 0x00000003 | ||
2265 | #define lpfc_acqe_fc_la_port_type_WORD word0 | ||
2266 | #define LPFC_LINK_TYPE_ETHERNET 0x0 | ||
2267 | #define LPFC_LINK_TYPE_FC 0x1 | ||
2268 | #define lpfc_acqe_fc_la_port_number_SHIFT 0 | ||
2269 | #define lpfc_acqe_fc_la_port_number_MASK 0x0000003F | ||
2270 | #define lpfc_acqe_fc_la_port_number_WORD word0 | ||
2271 | uint32_t word1; | ||
2272 | #define lpfc_acqe_fc_la_llink_spd_SHIFT 16 | ||
2273 | #define lpfc_acqe_fc_la_llink_spd_MASK 0x0000FFFF | ||
2274 | #define lpfc_acqe_fc_la_llink_spd_WORD word1 | ||
2275 | #define lpfc_acqe_fc_la_fault_SHIFT 0 | ||
2276 | #define lpfc_acqe_fc_la_fault_MASK 0x000000FF | ||
2277 | #define lpfc_acqe_fc_la_fault_WORD word1 | ||
2278 | #define LPFC_FC_LA_FAULT_NONE 0x0 | ||
2279 | #define LPFC_FC_LA_FAULT_LOCAL 0x1 | ||
2280 | #define LPFC_FC_LA_FAULT_REMOTE 0x2 | ||
2281 | uint32_t event_tag; | ||
2282 | uint32_t trailer; | ||
2283 | #define LPFC_FC_LA_EVENT_TYPE_FC_LINK 0x1 | ||
2284 | #define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2 | ||
2285 | }; | ||
2286 | |||
2287 | struct lpfc_acqe_sli { | ||
2288 | uint32_t event_data1; | ||
2289 | uint32_t event_data2; | ||
2290 | uint32_t reserved; | ||
2291 | uint32_t trailer; | ||
2292 | #define LPFC_SLI_EVENT_TYPE_PORT_ERROR 0x1 | ||
2293 | #define LPFC_SLI_EVENT_TYPE_OVER_TEMP 0x2 | ||
2294 | #define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3 | ||
2295 | #define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4 | ||
2296 | #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 | ||
2297 | }; | ||
2298 | |||
2154 | /* | 2299 | /* |
2155 | * Define the bootstrap mailbox (bmbx) region used to communicate | 2300 | * Define the bootstrap mailbox (bmbx) region used to communicate |
2156 | * mailbox command between the host and port. The mailbox consists | 2301 | * mailbox command between the host and port. The mailbox consists |
@@ -2210,7 +2355,7 @@ struct wqe_common { | |||
2210 | #define wqe_rcvoxid_WORD word9 | 2355 | #define wqe_rcvoxid_WORD word9 |
2211 | uint32_t word10; | 2356 | uint32_t word10; |
2212 | #define wqe_ebde_cnt_SHIFT 0 | 2357 | #define wqe_ebde_cnt_SHIFT 0 |
2213 | #define wqe_ebde_cnt_MASK 0x00000007 | 2358 | #define wqe_ebde_cnt_MASK 0x0000000f |
2214 | #define wqe_ebde_cnt_WORD word10 | 2359 | #define wqe_ebde_cnt_WORD word10 |
2215 | #define wqe_lenloc_SHIFT 7 | 2360 | #define wqe_lenloc_SHIFT 7 |
2216 | #define wqe_lenloc_MASK 0x00000003 | 2361 | #define wqe_lenloc_MASK 0x00000003 |
@@ -2402,7 +2547,6 @@ struct xmit_seq64_wqe { | |||
2402 | uint32_t relative_offset; | 2547 | uint32_t relative_offset; |
2403 | struct wqe_rctl_dfctl wge_ctl; | 2548 | struct wqe_rctl_dfctl wge_ctl; |
2404 | struct wqe_common wqe_com; /* words 6-11 */ | 2549 | struct wqe_common wqe_com; /* words 6-11 */ |
2405 | /* Note: word10 different REVISIT */ | ||
2406 | uint32_t xmit_len; | 2550 | uint32_t xmit_len; |
2407 | uint32_t rsvd_12_15[3]; | 2551 | uint32_t rsvd_12_15[3]; |
2408 | }; | 2552 | }; |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b3065791f303..462242dcdd0a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -446,23 +446,25 @@ lpfc_config_port_post(struct lpfc_hba *phba) | |||
446 | /* Get the default values for Model Name and Description */ | 446 | /* Get the default values for Model Name and Description */ |
447 | lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); | 447 | lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); |
448 | 448 | ||
449 | if ((phba->cfg_link_speed > LINK_SPEED_10G) | 449 | if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G) |
450 | || ((phba->cfg_link_speed == LINK_SPEED_1G) | 450 | || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) |
451 | && !(phba->lmt & LMT_1Gb)) | 451 | && !(phba->lmt & LMT_1Gb)) |
452 | || ((phba->cfg_link_speed == LINK_SPEED_2G) | 452 | || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) |
453 | && !(phba->lmt & LMT_2Gb)) | 453 | && !(phba->lmt & LMT_2Gb)) |
454 | || ((phba->cfg_link_speed == LINK_SPEED_4G) | 454 | || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) |
455 | && !(phba->lmt & LMT_4Gb)) | 455 | && !(phba->lmt & LMT_4Gb)) |
456 | || ((phba->cfg_link_speed == LINK_SPEED_8G) | 456 | || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) |
457 | && !(phba->lmt & LMT_8Gb)) | 457 | && !(phba->lmt & LMT_8Gb)) |
458 | || ((phba->cfg_link_speed == LINK_SPEED_10G) | 458 | || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) |
459 | && !(phba->lmt & LMT_10Gb))) { | 459 | && !(phba->lmt & LMT_10Gb)) |
460 | || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) | ||
461 | && !(phba->lmt & LMT_16Gb))) { | ||
460 | /* Reset link speed to auto */ | 462 | /* Reset link speed to auto */ |
461 | lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, | 463 | lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, |
462 | "1302 Invalid speed for this board: " | 464 | "1302 Invalid speed for this board: " |
463 | "Reset link speed to auto: x%x\n", | 465 | "Reset link speed to auto: x%x\n", |
464 | phba->cfg_link_speed); | 466 | phba->cfg_link_speed); |
465 | phba->cfg_link_speed = LINK_SPEED_AUTO; | 467 | phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO; |
466 | } | 468 | } |
467 | 469 | ||
468 | phba->link_state = LPFC_LINK_DOWN; | 470 | phba->link_state = LPFC_LINK_DOWN; |
@@ -648,22 +650,23 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) | |||
648 | mb = &pmb->u.mb; | 650 | mb = &pmb->u.mb; |
649 | pmb->vport = vport; | 651 | pmb->vport = vport; |
650 | 652 | ||
651 | lpfc_init_link(phba, pmb, phba->cfg_topology, | 653 | lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); |
652 | phba->cfg_link_speed); | ||
653 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | 654 | pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; |
654 | lpfc_set_loopback_flag(phba); | 655 | lpfc_set_loopback_flag(phba); |
655 | rc = lpfc_sli_issue_mbox(phba, pmb, flag); | 656 | rc = lpfc_sli_issue_mbox(phba, pmb, flag); |
656 | if (rc != MBX_SUCCESS) { | 657 | if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { |
657 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 658 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
658 | "0498 Adapter failed to init, mbxCmd x%x " | 659 | "0498 Adapter failed to init, mbxCmd x%x " |
659 | "INIT_LINK, mbxStatus x%x\n", | 660 | "INIT_LINK, mbxStatus x%x\n", |
660 | mb->mbxCommand, mb->mbxStatus); | 661 | mb->mbxCommand, mb->mbxStatus); |
661 | /* Clear all interrupt enable conditions */ | 662 | if (phba->sli_rev <= LPFC_SLI_REV3) { |
662 | writel(0, phba->HCregaddr); | 663 | /* Clear all interrupt enable conditions */ |
663 | readl(phba->HCregaddr); /* flush */ | 664 | writel(0, phba->HCregaddr); |
664 | /* Clear all pending interrupts */ | 665 | readl(phba->HCregaddr); /* flush */ |
665 | writel(0xffffffff, phba->HAregaddr); | 666 | /* Clear all pending interrupts */ |
666 | readl(phba->HAregaddr); /* flush */ | 667 | writel(0xffffffff, phba->HAregaddr); |
668 | readl(phba->HAregaddr); /* flush */ | ||
669 | } | ||
667 | phba->link_state = LPFC_HBA_ERROR; | 670 | phba->link_state = LPFC_HBA_ERROR; |
668 | if (rc != MBX_BUSY || flag == MBX_POLL) | 671 | if (rc != MBX_BUSY || flag == MBX_POLL) |
669 | mempool_free(pmb, phba->mbox_mem_pool); | 672 | mempool_free(pmb, phba->mbox_mem_pool); |
@@ -927,6 +930,35 @@ lpfc_hb_timeout(unsigned long ptr) | |||
927 | } | 930 | } |
928 | 931 | ||
929 | /** | 932 | /** |
933 | * lpfc_rrq_timeout - The RRQ-timer timeout handler | ||
934 | * @ptr: unsigned long holds the pointer to lpfc hba data structure. | ||
935 | * | ||
936 | * This is the RRQ-timer timeout handler registered to the lpfc driver. When | ||
937 | * this timer fires, a RRQ timeout event shall be posted to the lpfc driver | ||
938 | * work-port-events bitmap and the worker thread is notified. This timeout | ||
939 | * event will be used by the worker thread to invoke the actual timeout | ||
940 | * handler routine, lpfc_rrq_handler. Any periodical operations will | ||
941 | * be performed in the timeout handler and the RRQ timeout event bit shall | ||
942 | * be cleared by the worker thread after it has taken the event bitmap out. | ||
943 | **/ | ||
944 | static void | ||
945 | lpfc_rrq_timeout(unsigned long ptr) | ||
946 | { | ||
947 | struct lpfc_hba *phba; | ||
948 | uint32_t tmo_posted; | ||
949 | unsigned long iflag; | ||
950 | |||
951 | phba = (struct lpfc_hba *)ptr; | ||
952 | spin_lock_irqsave(&phba->pport->work_port_lock, iflag); | ||
953 | tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE; | ||
954 | if (!tmo_posted) | ||
955 | phba->hba_flag |= HBA_RRQ_ACTIVE; | ||
956 | spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); | ||
957 | if (!tmo_posted) | ||
958 | lpfc_worker_wake_up(phba); | ||
959 | } | ||
960 | |||
961 | /** | ||
930 | * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function | 962 | * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function |
931 | * @phba: pointer to lpfc hba data structure. | 963 | * @phba: pointer to lpfc hba data structure. |
932 | * @pmboxq: pointer to the driver internal queue element for mailbox command. | 964 | * @pmboxq: pointer to the driver internal queue element for mailbox command. |
@@ -1374,6 +1406,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) | |||
1374 | struct lpfc_vport *vport = phba->pport; | 1406 | struct lpfc_vport *vport = phba->pport; |
1375 | uint32_t event_data; | 1407 | uint32_t event_data; |
1376 | struct Scsi_Host *shost; | 1408 | struct Scsi_Host *shost; |
1409 | uint32_t if_type; | ||
1410 | struct lpfc_register portstat_reg; | ||
1377 | 1411 | ||
1378 | /* If the pci channel is offline, ignore possible errors, since | 1412 | /* If the pci channel is offline, ignore possible errors, since |
1379 | * we cannot communicate with the pci card anyway. | 1413 | * we cannot communicate with the pci card anyway. |
@@ -1390,17 +1424,49 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) | |||
1390 | /* For now, the actual action for SLI4 device handling is not | 1424 | /* For now, the actual action for SLI4 device handling is not |
1391 | * specified yet, just treated it as adaptor hardware failure | 1425 | * specified yet, just treated it as adaptor hardware failure |
1392 | */ | 1426 | */ |
1393 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1394 | "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", | ||
1395 | phba->work_status[0], phba->work_status[1]); | ||
1396 | |||
1397 | event_data = FC_REG_DUMP_EVENT; | 1427 | event_data = FC_REG_DUMP_EVENT; |
1398 | shost = lpfc_shost_from_vport(vport); | 1428 | shost = lpfc_shost_from_vport(vport); |
1399 | fc_host_post_vendor_event(shost, fc_get_event_number(), | 1429 | fc_host_post_vendor_event(shost, fc_get_event_number(), |
1400 | sizeof(event_data), (char *) &event_data, | 1430 | sizeof(event_data), (char *) &event_data, |
1401 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | 1431 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); |
1402 | 1432 | ||
1403 | lpfc_sli4_offline_eratt(phba); | 1433 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
1434 | switch (if_type) { | ||
1435 | case LPFC_SLI_INTF_IF_TYPE_0: | ||
1436 | lpfc_sli4_offline_eratt(phba); | ||
1437 | break; | ||
1438 | case LPFC_SLI_INTF_IF_TYPE_2: | ||
1439 | portstat_reg.word0 = | ||
1440 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | ||
1441 | |||
1442 | if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { | ||
1443 | /* TODO: Register for Overtemp async events. */ | ||
1444 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1445 | "2889 Port Overtemperature event, " | ||
1446 | "taking port\n"); | ||
1447 | spin_lock_irq(&phba->hbalock); | ||
1448 | phba->over_temp_state = HBA_OVER_TEMP; | ||
1449 | spin_unlock_irq(&phba->hbalock); | ||
1450 | lpfc_sli4_offline_eratt(phba); | ||
1451 | return; | ||
1452 | } | ||
1453 | if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { | ||
1454 | /* | ||
1455 | * TODO: Attempt port recovery via a port reset. | ||
1456 | * When fully implemented, the driver should | ||
1457 | * attempt to recover the port here and return. | ||
1458 | * For now, log an error and take the port offline. | ||
1459 | */ | ||
1460 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1461 | "2887 Port Error: Attempting " | ||
1462 | "Port Recovery\n"); | ||
1463 | } | ||
1464 | lpfc_sli4_offline_eratt(phba); | ||
1465 | break; | ||
1466 | case LPFC_SLI_INTF_IF_TYPE_1: | ||
1467 | default: | ||
1468 | break; | ||
1469 | } | ||
1404 | } | 1470 | } |
1405 | 1471 | ||
1406 | /** | 1472 | /** |
@@ -1459,8 +1525,8 @@ lpfc_handle_latt(struct lpfc_hba *phba) | |||
1459 | lpfc_els_flush_all_cmd(phba); | 1525 | lpfc_els_flush_all_cmd(phba); |
1460 | 1526 | ||
1461 | psli->slistat.link_event++; | 1527 | psli->slistat.link_event++; |
1462 | lpfc_read_la(phba, pmb, mp); | 1528 | lpfc_read_topology(phba, pmb, mp); |
1463 | pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; | 1529 | pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; |
1464 | pmb->vport = vport; | 1530 | pmb->vport = vport; |
1465 | /* Block ELS IOCBs until we have processed this mbox command */ | 1531 | /* Block ELS IOCBs until we have processed this mbox command */ |
1466 | phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; | 1532 | phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; |
@@ -1853,6 +1919,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1853 | m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", | 1919 | m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", |
1854 | "Fibre Channel Adapter"}; | 1920 | "Fibre Channel Adapter"}; |
1855 | break; | 1921 | break; |
1922 | case PCI_DEVICE_ID_LANCER_FC: | ||
1923 | oneConnect = 1; | ||
1924 | m = (typeof(m)){"Undefined", "PCIe", "Fibre Channel Adapter"}; | ||
1925 | break; | ||
1926 | case PCI_DEVICE_ID_LANCER_FCOE: | ||
1927 | oneConnect = 1; | ||
1928 | m = (typeof(m)){"Undefined", "PCIe", "FCoE"}; | ||
1929 | break; | ||
1856 | default: | 1930 | default: |
1857 | m = (typeof(m)){"Unknown", "", ""}; | 1931 | m = (typeof(m)){"Unknown", "", ""}; |
1858 | break; | 1932 | break; |
@@ -2943,63 +3017,6 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) | |||
2943 | } | 3017 | } |
2944 | 3018 | ||
2945 | /** | 3019 | /** |
2946 | * lpfc_sli4_fw_cfg_check - Read the firmware config and verify FCoE support | ||
2947 | * @phba: pointer to lpfc hba data structure. | ||
2948 | * | ||
2949 | * This function uses the QUERY_FW_CFG mailbox command to determine if the | ||
2950 | * firmware loaded supports FCoE. A return of zero indicates that the mailbox | ||
2951 | * was successful and the firmware supports FCoE. Any other return indicates | ||
2952 | * a error. It is assumed that this function will be called before interrupts | ||
2953 | * are enabled. | ||
2954 | **/ | ||
2955 | static int | ||
2956 | lpfc_sli4_fw_cfg_check(struct lpfc_hba *phba) | ||
2957 | { | ||
2958 | int rc = 0; | ||
2959 | LPFC_MBOXQ_t *mboxq; | ||
2960 | struct lpfc_mbx_query_fw_cfg *query_fw_cfg; | ||
2961 | uint32_t length; | ||
2962 | uint32_t shdr_status, shdr_add_status; | ||
2963 | |||
2964 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2965 | if (!mboxq) { | ||
2966 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2967 | "2621 Failed to allocate mbox for " | ||
2968 | "query firmware config cmd\n"); | ||
2969 | return -ENOMEM; | ||
2970 | } | ||
2971 | query_fw_cfg = &mboxq->u.mqe.un.query_fw_cfg; | ||
2972 | length = (sizeof(struct lpfc_mbx_query_fw_cfg) - | ||
2973 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
2974 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
2975 | LPFC_MBOX_OPCODE_QUERY_FW_CFG, | ||
2976 | length, LPFC_SLI4_MBX_EMBED); | ||
2977 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
2978 | /* The IOCTL status is embedded in the mailbox subheader. */ | ||
2979 | shdr_status = bf_get(lpfc_mbox_hdr_status, | ||
2980 | &query_fw_cfg->header.cfg_shdr.response); | ||
2981 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, | ||
2982 | &query_fw_cfg->header.cfg_shdr.response); | ||
2983 | if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { | ||
2984 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2985 | "2622 Query Firmware Config failed " | ||
2986 | "mbx status x%x, status x%x add_status x%x\n", | ||
2987 | rc, shdr_status, shdr_add_status); | ||
2988 | return -EINVAL; | ||
2989 | } | ||
2990 | if (!bf_get(lpfc_function_mode_fcoe_i, query_fw_cfg)) { | ||
2991 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2992 | "2623 FCoE Function not supported by firmware. " | ||
2993 | "Function mode = %08x\n", | ||
2994 | query_fw_cfg->function_mode); | ||
2995 | return -EINVAL; | ||
2996 | } | ||
2997 | if (rc != MBX_TIMEOUT) | ||
2998 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
2999 | return 0; | ||
3000 | } | ||
3001 | |||
3002 | /** | ||
3003 | * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code | 3020 | * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code |
3004 | * @phba: pointer to lpfc hba data structure. | 3021 | * @phba: pointer to lpfc hba data structure. |
3005 | * @acqe_link: pointer to the async link completion queue entry. | 3022 | * @acqe_link: pointer to the async link completion queue entry. |
@@ -3051,20 +3068,20 @@ lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, | |||
3051 | switch (bf_get(lpfc_acqe_link_status, acqe_link)) { | 3068 | switch (bf_get(lpfc_acqe_link_status, acqe_link)) { |
3052 | case LPFC_ASYNC_LINK_STATUS_DOWN: | 3069 | case LPFC_ASYNC_LINK_STATUS_DOWN: |
3053 | case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: | 3070 | case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: |
3054 | att_type = AT_LINK_DOWN; | 3071 | att_type = LPFC_ATT_LINK_DOWN; |
3055 | break; | 3072 | break; |
3056 | case LPFC_ASYNC_LINK_STATUS_UP: | 3073 | case LPFC_ASYNC_LINK_STATUS_UP: |
3057 | /* Ignore physical link up events - wait for logical link up */ | 3074 | /* Ignore physical link up events - wait for logical link up */ |
3058 | att_type = AT_RESERVED; | 3075 | att_type = LPFC_ATT_RESERVED; |
3059 | break; | 3076 | break; |
3060 | case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: | 3077 | case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: |
3061 | att_type = AT_LINK_UP; | 3078 | att_type = LPFC_ATT_LINK_UP; |
3062 | break; | 3079 | break; |
3063 | default: | 3080 | default: |
3064 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3081 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
3065 | "0399 Invalid link attention type: x%x\n", | 3082 | "0399 Invalid link attention type: x%x\n", |
3066 | bf_get(lpfc_acqe_link_status, acqe_link)); | 3083 | bf_get(lpfc_acqe_link_status, acqe_link)); |
3067 | att_type = AT_RESERVED; | 3084 | att_type = LPFC_ATT_RESERVED; |
3068 | break; | 3085 | break; |
3069 | } | 3086 | } |
3070 | return att_type; | 3087 | return att_type; |
@@ -3088,36 +3105,32 @@ lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, | |||
3088 | 3105 | ||
3089 | switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { | 3106 | switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { |
3090 | case LPFC_ASYNC_LINK_SPEED_ZERO: | 3107 | case LPFC_ASYNC_LINK_SPEED_ZERO: |
3091 | link_speed = LA_UNKNW_LINK; | ||
3092 | break; | ||
3093 | case LPFC_ASYNC_LINK_SPEED_10MBPS: | 3108 | case LPFC_ASYNC_LINK_SPEED_10MBPS: |
3094 | link_speed = LA_UNKNW_LINK; | ||
3095 | break; | ||
3096 | case LPFC_ASYNC_LINK_SPEED_100MBPS: | 3109 | case LPFC_ASYNC_LINK_SPEED_100MBPS: |
3097 | link_speed = LA_UNKNW_LINK; | 3110 | link_speed = LPFC_LINK_SPEED_UNKNOWN; |
3098 | break; | 3111 | break; |
3099 | case LPFC_ASYNC_LINK_SPEED_1GBPS: | 3112 | case LPFC_ASYNC_LINK_SPEED_1GBPS: |
3100 | link_speed = LA_1GHZ_LINK; | 3113 | link_speed = LPFC_LINK_SPEED_1GHZ; |
3101 | break; | 3114 | break; |
3102 | case LPFC_ASYNC_LINK_SPEED_10GBPS: | 3115 | case LPFC_ASYNC_LINK_SPEED_10GBPS: |
3103 | link_speed = LA_10GHZ_LINK; | 3116 | link_speed = LPFC_LINK_SPEED_10GHZ; |
3104 | break; | 3117 | break; |
3105 | default: | 3118 | default: |
3106 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3119 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
3107 | "0483 Invalid link-attention link speed: x%x\n", | 3120 | "0483 Invalid link-attention link speed: x%x\n", |
3108 | bf_get(lpfc_acqe_link_speed, acqe_link)); | 3121 | bf_get(lpfc_acqe_link_speed, acqe_link)); |
3109 | link_speed = LA_UNKNW_LINK; | 3122 | link_speed = LPFC_LINK_SPEED_UNKNOWN; |
3110 | break; | 3123 | break; |
3111 | } | 3124 | } |
3112 | return link_speed; | 3125 | return link_speed; |
3113 | } | 3126 | } |
3114 | 3127 | ||
3115 | /** | 3128 | /** |
3116 | * lpfc_sli4_async_link_evt - Process the asynchronous link event | 3129 | * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event |
3117 | * @phba: pointer to lpfc hba data structure. | 3130 | * @phba: pointer to lpfc hba data structure. |
3118 | * @acqe_link: pointer to the async link completion queue entry. | 3131 | * @acqe_link: pointer to the async link completion queue entry. |
3119 | * | 3132 | * |
3120 | * This routine is to handle the SLI4 asynchronous link event. | 3133 | * This routine is to handle the SLI4 asynchronous FCoE link event. |
3121 | **/ | 3134 | **/ |
3122 | static void | 3135 | static void |
3123 | lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | 3136 | lpfc_sli4_async_link_evt(struct lpfc_hba *phba, |
@@ -3126,11 +3139,12 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | |||
3126 | struct lpfc_dmabuf *mp; | 3139 | struct lpfc_dmabuf *mp; |
3127 | LPFC_MBOXQ_t *pmb; | 3140 | LPFC_MBOXQ_t *pmb; |
3128 | MAILBOX_t *mb; | 3141 | MAILBOX_t *mb; |
3129 | READ_LA_VAR *la; | 3142 | struct lpfc_mbx_read_top *la; |
3130 | uint8_t att_type; | 3143 | uint8_t att_type; |
3144 | int rc; | ||
3131 | 3145 | ||
3132 | att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); | 3146 | att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); |
3133 | if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) | 3147 | if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP) |
3134 | return; | 3148 | return; |
3135 | phba->fcoe_eventtag = acqe_link->event_tag; | 3149 | phba->fcoe_eventtag = acqe_link->event_tag; |
3136 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 3150 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
@@ -3161,45 +3175,168 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | |||
3161 | /* Update link event statistics */ | 3175 | /* Update link event statistics */ |
3162 | phba->sli.slistat.link_event++; | 3176 | phba->sli.slistat.link_event++; |
3163 | 3177 | ||
3164 | /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ | 3178 | /* Create lpfc_handle_latt mailbox command from link ACQE */ |
3165 | lpfc_read_la(phba, pmb, mp); | 3179 | lpfc_read_topology(phba, pmb, mp); |
3180 | pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; | ||
3166 | pmb->vport = phba->pport; | 3181 | pmb->vport = phba->pport; |
3167 | 3182 | ||
3183 | /* Keep the link status for extra SLI4 state machine reference */ | ||
3184 | phba->sli4_hba.link_state.speed = | ||
3185 | bf_get(lpfc_acqe_link_speed, acqe_link); | ||
3186 | phba->sli4_hba.link_state.duplex = | ||
3187 | bf_get(lpfc_acqe_link_duplex, acqe_link); | ||
3188 | phba->sli4_hba.link_state.status = | ||
3189 | bf_get(lpfc_acqe_link_status, acqe_link); | ||
3190 | phba->sli4_hba.link_state.type = | ||
3191 | bf_get(lpfc_acqe_link_type, acqe_link); | ||
3192 | phba->sli4_hba.link_state.number = | ||
3193 | bf_get(lpfc_acqe_link_number, acqe_link); | ||
3194 | phba->sli4_hba.link_state.fault = | ||
3195 | bf_get(lpfc_acqe_link_fault, acqe_link); | ||
3196 | phba->sli4_hba.link_state.logical_speed = | ||
3197 | bf_get(lpfc_acqe_logical_link_speed, acqe_link); | ||
3198 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3199 | "2900 Async FCoE Link event - Speed:%dGBit duplex:x%x " | ||
3200 | "LA Type:x%x Port Type:%d Port Number:%d Logical " | ||
3201 | "speed:%dMbps Fault:%d\n", | ||
3202 | phba->sli4_hba.link_state.speed, | ||
3203 | phba->sli4_hba.link_state.topology, | ||
3204 | phba->sli4_hba.link_state.status, | ||
3205 | phba->sli4_hba.link_state.type, | ||
3206 | phba->sli4_hba.link_state.number, | ||
3207 | phba->sli4_hba.link_state.logical_speed * 10, | ||
3208 | phba->sli4_hba.link_state.fault); | ||
3209 | /* | ||
3210 | * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch | ||
3211 | * topology info. Note: Optional for non FC-AL ports. | ||
3212 | */ | ||
3213 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { | ||
3214 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
3215 | if (rc == MBX_NOT_FINISHED) | ||
3216 | goto out_free_dmabuf; | ||
3217 | return; | ||
3218 | } | ||
3219 | /* | ||
3220 | * For FCoE Mode: fill in all the topology information we need and call | ||
3221 | * the READ_TOPOLOGY completion routine to continue without actually | ||
3222 | * sending the READ_TOPOLOGY mailbox command to the port. | ||
3223 | */ | ||
3168 | /* Parse and translate status field */ | 3224 | /* Parse and translate status field */ |
3169 | mb = &pmb->u.mb; | 3225 | mb = &pmb->u.mb; |
3170 | mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); | 3226 | mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); |
3171 | 3227 | ||
3172 | /* Parse and translate link attention fields */ | 3228 | /* Parse and translate link attention fields */ |
3173 | la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; | 3229 | la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; |
3174 | la->eventTag = acqe_link->event_tag; | 3230 | la->eventTag = acqe_link->event_tag; |
3175 | la->attType = att_type; | 3231 | bf_set(lpfc_mbx_read_top_att_type, la, att_type); |
3176 | la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); | 3232 | bf_set(lpfc_mbx_read_top_link_spd, la, |
3233 | lpfc_sli4_parse_latt_link_speed(phba, acqe_link)); | ||
3177 | 3234 | ||
3178 | /* Fake the the following irrelvant fields */ | 3235 | /* Fake the the following irrelvant fields */ |
3179 | la->topology = TOPOLOGY_PT_PT; | 3236 | bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT); |
3180 | la->granted_AL_PA = 0; | 3237 | bf_set(lpfc_mbx_read_top_alpa_granted, la, 0); |
3181 | la->il = 0; | 3238 | bf_set(lpfc_mbx_read_top_il, la, 0); |
3182 | la->pb = 0; | 3239 | bf_set(lpfc_mbx_read_top_pb, la, 0); |
3183 | la->fa = 0; | 3240 | bf_set(lpfc_mbx_read_top_fa, la, 0); |
3184 | la->mm = 0; | 3241 | bf_set(lpfc_mbx_read_top_mm, la, 0); |
3242 | |||
3243 | /* Invoke the lpfc_handle_latt mailbox command callback function */ | ||
3244 | lpfc_mbx_cmpl_read_topology(phba, pmb); | ||
3245 | |||
3246 | return; | ||
3185 | 3247 | ||
3248 | out_free_dmabuf: | ||
3249 | kfree(mp); | ||
3250 | out_free_pmb: | ||
3251 | mempool_free(pmb, phba->mbox_mem_pool); | ||
3252 | } | ||
3253 | |||
3254 | /** | ||
3255 | * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event | ||
3256 | * @phba: pointer to lpfc hba data structure. | ||
3257 | * @acqe_fc: pointer to the async fc completion queue entry. | ||
3258 | * | ||
3259 | * This routine is to handle the SLI4 asynchronous FC event. It will simply log | ||
3260 | * that the event was received and then issue a read_topology mailbox command so | ||
3261 | * that the rest of the driver will treat it the same as SLI3. | ||
3262 | **/ | ||
3263 | static void | ||
3264 | lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc) | ||
3265 | { | ||
3266 | struct lpfc_dmabuf *mp; | ||
3267 | LPFC_MBOXQ_t *pmb; | ||
3268 | int rc; | ||
3269 | |||
3270 | if (bf_get(lpfc_trailer_type, acqe_fc) != | ||
3271 | LPFC_FC_LA_EVENT_TYPE_FC_LINK) { | ||
3272 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3273 | "2895 Non FC link Event detected.(%d)\n", | ||
3274 | bf_get(lpfc_trailer_type, acqe_fc)); | ||
3275 | return; | ||
3276 | } | ||
3186 | /* Keep the link status for extra SLI4 state machine reference */ | 3277 | /* Keep the link status for extra SLI4 state machine reference */ |
3187 | phba->sli4_hba.link_state.speed = | 3278 | phba->sli4_hba.link_state.speed = |
3188 | bf_get(lpfc_acqe_link_speed, acqe_link); | 3279 | bf_get(lpfc_acqe_fc_la_speed, acqe_fc); |
3189 | phba->sli4_hba.link_state.duplex = | 3280 | phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL; |
3190 | bf_get(lpfc_acqe_link_duplex, acqe_link); | 3281 | phba->sli4_hba.link_state.topology = |
3282 | bf_get(lpfc_acqe_fc_la_topology, acqe_fc); | ||
3191 | phba->sli4_hba.link_state.status = | 3283 | phba->sli4_hba.link_state.status = |
3192 | bf_get(lpfc_acqe_link_status, acqe_link); | 3284 | bf_get(lpfc_acqe_fc_la_att_type, acqe_fc); |
3193 | phba->sli4_hba.link_state.physical = | 3285 | phba->sli4_hba.link_state.type = |
3194 | bf_get(lpfc_acqe_link_physical, acqe_link); | 3286 | bf_get(lpfc_acqe_fc_la_port_type, acqe_fc); |
3287 | phba->sli4_hba.link_state.number = | ||
3288 | bf_get(lpfc_acqe_fc_la_port_number, acqe_fc); | ||
3195 | phba->sli4_hba.link_state.fault = | 3289 | phba->sli4_hba.link_state.fault = |
3196 | bf_get(lpfc_acqe_link_fault, acqe_link); | 3290 | bf_get(lpfc_acqe_link_fault, acqe_fc); |
3197 | phba->sli4_hba.link_state.logical_speed = | 3291 | phba->sli4_hba.link_state.logical_speed = |
3198 | bf_get(lpfc_acqe_qos_link_speed, acqe_link); | 3292 | bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc); |
3293 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3294 | "2896 Async FC event - Speed:%dGBaud Topology:x%x " | ||
3295 | "LA Type:x%x Port Type:%d Port Number:%d Logical speed:" | ||
3296 | "%dMbps Fault:%d\n", | ||
3297 | phba->sli4_hba.link_state.speed, | ||
3298 | phba->sli4_hba.link_state.topology, | ||
3299 | phba->sli4_hba.link_state.status, | ||
3300 | phba->sli4_hba.link_state.type, | ||
3301 | phba->sli4_hba.link_state.number, | ||
3302 | phba->sli4_hba.link_state.logical_speed * 10, | ||
3303 | phba->sli4_hba.link_state.fault); | ||
3304 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
3305 | if (!pmb) { | ||
3306 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3307 | "2897 The mboxq allocation failed\n"); | ||
3308 | return; | ||
3309 | } | ||
3310 | mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
3311 | if (!mp) { | ||
3312 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3313 | "2898 The lpfc_dmabuf allocation failed\n"); | ||
3314 | goto out_free_pmb; | ||
3315 | } | ||
3316 | mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); | ||
3317 | if (!mp->virt) { | ||
3318 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3319 | "2899 The mbuf allocation failed\n"); | ||
3320 | goto out_free_dmabuf; | ||
3321 | } | ||
3199 | 3322 | ||
3200 | /* Invoke the lpfc_handle_latt mailbox command callback function */ | 3323 | /* Cleanup any outstanding ELS commands */ |
3201 | lpfc_mbx_cmpl_read_la(phba, pmb); | 3324 | lpfc_els_flush_all_cmd(phba); |
3325 | |||
3326 | /* Block ELS IOCBs until we have done process link event */ | ||
3327 | phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; | ||
3328 | |||
3329 | /* Update link event statistics */ | ||
3330 | phba->sli.slistat.link_event++; | ||
3331 | |||
3332 | /* Create lpfc_handle_latt mailbox command from link ACQE */ | ||
3333 | lpfc_read_topology(phba, pmb, mp); | ||
3334 | pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology; | ||
3335 | pmb->vport = phba->pport; | ||
3202 | 3336 | ||
3337 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); | ||
3338 | if (rc == MBX_NOT_FINISHED) | ||
3339 | goto out_free_dmabuf; | ||
3203 | return; | 3340 | return; |
3204 | 3341 | ||
3205 | out_free_dmabuf: | 3342 | out_free_dmabuf: |
@@ -3209,6 +3346,24 @@ out_free_pmb: | |||
3209 | } | 3346 | } |
3210 | 3347 | ||
3211 | /** | 3348 | /** |
3349 | * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event | ||
3350 | * @phba: pointer to lpfc hba data structure. | ||
3351 | * @acqe_fc: pointer to the async SLI completion queue entry. | ||
3352 | * | ||
3353 | * This routine is to handle the SLI4 asynchronous SLI events. | ||
3354 | **/ | ||
3355 | static void | ||
3356 | lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) | ||
3357 | { | ||
3358 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3359 | "2901 Async SLI event - Event Data1:x%08x Event Data2:" | ||
3360 | "x%08x SLI Event Type:%d", | ||
3361 | acqe_sli->event_data1, acqe_sli->event_data2, | ||
3362 | bf_get(lpfc_trailer_type, acqe_sli)); | ||
3363 | return; | ||
3364 | } | ||
3365 | |||
3366 | /** | ||
3212 | * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport | 3367 | * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport |
3213 | * @vport: pointer to vport data structure. | 3368 | * @vport: pointer to vport data structure. |
3214 | * | 3369 | * |
@@ -3247,10 +3402,12 @@ lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) | |||
3247 | if (!ndlp) | 3402 | if (!ndlp) |
3248 | return 0; | 3403 | return 0; |
3249 | } | 3404 | } |
3250 | if (phba->pport->port_state < LPFC_FLOGI) | 3405 | if ((phba->pport->port_state < LPFC_FLOGI) && |
3406 | (phba->pport->port_state != LPFC_VPORT_FAILED)) | ||
3251 | return NULL; | 3407 | return NULL; |
3252 | /* If virtual link is not yet instantiated ignore CVL */ | 3408 | /* If virtual link is not yet instantiated ignore CVL */ |
3253 | if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)) | 3409 | if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC) |
3410 | && (vport->port_state != LPFC_VPORT_FAILED)) | ||
3254 | return NULL; | 3411 | return NULL; |
3255 | shost = lpfc_shost_from_vport(vport); | 3412 | shost = lpfc_shost_from_vport(vport); |
3256 | if (!shost) | 3413 | if (!shost) |
@@ -3285,17 +3442,17 @@ lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) | |||
3285 | } | 3442 | } |
3286 | 3443 | ||
3287 | /** | 3444 | /** |
3288 | * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event | 3445 | * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event |
3289 | * @phba: pointer to lpfc hba data structure. | 3446 | * @phba: pointer to lpfc hba data structure. |
3290 | * @acqe_link: pointer to the async fcoe completion queue entry. | 3447 | * @acqe_link: pointer to the async fcoe completion queue entry. |
3291 | * | 3448 | * |
3292 | * This routine is to handle the SLI4 asynchronous fcoe event. | 3449 | * This routine is to handle the SLI4 asynchronous fcoe event. |
3293 | **/ | 3450 | **/ |
3294 | static void | 3451 | static void |
3295 | lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | 3452 | lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, |
3296 | struct lpfc_acqe_fcoe *acqe_fcoe) | 3453 | struct lpfc_acqe_fip *acqe_fip) |
3297 | { | 3454 | { |
3298 | uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); | 3455 | uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip); |
3299 | int rc; | 3456 | int rc; |
3300 | struct lpfc_vport *vport; | 3457 | struct lpfc_vport *vport; |
3301 | struct lpfc_nodelist *ndlp; | 3458 | struct lpfc_nodelist *ndlp; |
@@ -3304,25 +3461,25 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3304 | struct lpfc_vport **vports; | 3461 | struct lpfc_vport **vports; |
3305 | int i; | 3462 | int i; |
3306 | 3463 | ||
3307 | phba->fc_eventTag = acqe_fcoe->event_tag; | 3464 | phba->fc_eventTag = acqe_fip->event_tag; |
3308 | phba->fcoe_eventtag = acqe_fcoe->event_tag; | 3465 | phba->fcoe_eventtag = acqe_fip->event_tag; |
3309 | switch (event_type) { | 3466 | switch (event_type) { |
3310 | case LPFC_FCOE_EVENT_TYPE_NEW_FCF: | 3467 | case LPFC_FIP_EVENT_TYPE_NEW_FCF: |
3311 | case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: | 3468 | case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD: |
3312 | if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) | 3469 | if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF) |
3313 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3470 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
3314 | LOG_DISCOVERY, | 3471 | LOG_DISCOVERY, |
3315 | "2546 New FCF event, evt_tag:x%x, " | 3472 | "2546 New FCF event, evt_tag:x%x, " |
3316 | "index:x%x\n", | 3473 | "index:x%x\n", |
3317 | acqe_fcoe->event_tag, | 3474 | acqe_fip->event_tag, |
3318 | acqe_fcoe->index); | 3475 | acqe_fip->index); |
3319 | else | 3476 | else |
3320 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | | 3477 | lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | |
3321 | LOG_DISCOVERY, | 3478 | LOG_DISCOVERY, |
3322 | "2788 FCF param modified event, " | 3479 | "2788 FCF param modified event, " |
3323 | "evt_tag:x%x, index:x%x\n", | 3480 | "evt_tag:x%x, index:x%x\n", |
3324 | acqe_fcoe->event_tag, | 3481 | acqe_fip->event_tag, |
3325 | acqe_fcoe->index); | 3482 | acqe_fip->index); |
3326 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { | 3483 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { |
3327 | /* | 3484 | /* |
3328 | * During period of FCF discovery, read the FCF | 3485 | * During period of FCF discovery, read the FCF |
@@ -3333,8 +3490,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3333 | LOG_DISCOVERY, | 3490 | LOG_DISCOVERY, |
3334 | "2779 Read FCF (x%x) for updating " | 3491 | "2779 Read FCF (x%x) for updating " |
3335 | "roundrobin FCF failover bmask\n", | 3492 | "roundrobin FCF failover bmask\n", |
3336 | acqe_fcoe->index); | 3493 | acqe_fip->index); |
3337 | rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); | 3494 | rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index); |
3338 | } | 3495 | } |
3339 | 3496 | ||
3340 | /* If the FCF discovery is in progress, do nothing. */ | 3497 | /* If the FCF discovery is in progress, do nothing. */ |
@@ -3360,7 +3517,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3360 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 3517 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3361 | "2770 Start FCF table scan per async FCF " | 3518 | "2770 Start FCF table scan per async FCF " |
3362 | "event, evt_tag:x%x, index:x%x\n", | 3519 | "event, evt_tag:x%x, index:x%x\n", |
3363 | acqe_fcoe->event_tag, acqe_fcoe->index); | 3520 | acqe_fip->event_tag, acqe_fip->index); |
3364 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, | 3521 | rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, |
3365 | LPFC_FCOE_FCF_GET_FIRST); | 3522 | LPFC_FCOE_FCF_GET_FIRST); |
3366 | if (rc) | 3523 | if (rc) |
@@ -3369,17 +3526,17 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3369 | "command failed (x%x)\n", rc); | 3526 | "command failed (x%x)\n", rc); |
3370 | break; | 3527 | break; |
3371 | 3528 | ||
3372 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: | 3529 | case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL: |
3373 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3530 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
3374 | "2548 FCF Table full count 0x%x tag 0x%x\n", | 3531 | "2548 FCF Table full count 0x%x tag 0x%x\n", |
3375 | bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), | 3532 | bf_get(lpfc_acqe_fip_fcf_count, acqe_fip), |
3376 | acqe_fcoe->event_tag); | 3533 | acqe_fip->event_tag); |
3377 | break; | 3534 | break; |
3378 | 3535 | ||
3379 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: | 3536 | case LPFC_FIP_EVENT_TYPE_FCF_DEAD: |
3380 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3537 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3381 | "2549 FCF (x%x) disconnected from network, " | 3538 | "2549 FCF (x%x) disconnected from network, " |
3382 | "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); | 3539 | "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag); |
3383 | /* | 3540 | /* |
3384 | * If we are in the middle of FCF failover process, clear | 3541 | * If we are in the middle of FCF failover process, clear |
3385 | * the corresponding FCF bit in the roundrobin bitmap. | 3542 | * the corresponding FCF bit in the roundrobin bitmap. |
@@ -3388,13 +3545,13 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3388 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { | 3545 | if (phba->fcf.fcf_flag & FCF_DISCOVERY) { |
3389 | spin_unlock_irq(&phba->hbalock); | 3546 | spin_unlock_irq(&phba->hbalock); |
3390 | /* Update FLOGI FCF failover eligible FCF bmask */ | 3547 | /* Update FLOGI FCF failover eligible FCF bmask */ |
3391 | lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); | 3548 | lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index); |
3392 | break; | 3549 | break; |
3393 | } | 3550 | } |
3394 | spin_unlock_irq(&phba->hbalock); | 3551 | spin_unlock_irq(&phba->hbalock); |
3395 | 3552 | ||
3396 | /* If the event is not for currently used fcf do nothing */ | 3553 | /* If the event is not for currently used fcf do nothing */ |
3397 | if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) | 3554 | if (phba->fcf.current_rec.fcf_indx != acqe_fip->index) |
3398 | break; | 3555 | break; |
3399 | 3556 | ||
3400 | /* | 3557 | /* |
@@ -3411,7 +3568,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3411 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, | 3568 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, |
3412 | "2771 Start FCF fast failover process due to " | 3569 | "2771 Start FCF fast failover process due to " |
3413 | "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " | 3570 | "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " |
3414 | "\n", acqe_fcoe->event_tag, acqe_fcoe->index); | 3571 | "\n", acqe_fip->event_tag, acqe_fip->index); |
3415 | rc = lpfc_sli4_redisc_fcf_table(phba); | 3572 | rc = lpfc_sli4_redisc_fcf_table(phba); |
3416 | if (rc) { | 3573 | if (rc) { |
3417 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3574 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
@@ -3438,12 +3595,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3438 | lpfc_sli4_perform_all_vport_cvl(phba); | 3595 | lpfc_sli4_perform_all_vport_cvl(phba); |
3439 | } | 3596 | } |
3440 | break; | 3597 | break; |
3441 | case LPFC_FCOE_EVENT_TYPE_CVL: | 3598 | case LPFC_FIP_EVENT_TYPE_CVL: |
3442 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, | 3599 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, |
3443 | "2718 Clear Virtual Link Received for VPI 0x%x" | 3600 | "2718 Clear Virtual Link Received for VPI 0x%x" |
3444 | " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); | 3601 | " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag); |
3445 | vport = lpfc_find_vport_by_vpid(phba, | 3602 | vport = lpfc_find_vport_by_vpid(phba, |
3446 | acqe_fcoe->index - phba->vpi_base); | 3603 | acqe_fip->index - phba->vpi_base); |
3447 | ndlp = lpfc_sli4_perform_vport_cvl(vport); | 3604 | ndlp = lpfc_sli4_perform_vport_cvl(vport); |
3448 | if (!ndlp) | 3605 | if (!ndlp) |
3449 | break; | 3606 | break; |
@@ -3494,7 +3651,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3494 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | | 3651 | lpfc_printf_log(phba, KERN_INFO, LOG_FIP | |
3495 | LOG_DISCOVERY, | 3652 | LOG_DISCOVERY, |
3496 | "2773 Start FCF failover per CVL, " | 3653 | "2773 Start FCF failover per CVL, " |
3497 | "evt_tag:x%x\n", acqe_fcoe->event_tag); | 3654 | "evt_tag:x%x\n", acqe_fip->event_tag); |
3498 | rc = lpfc_sli4_redisc_fcf_table(phba); | 3655 | rc = lpfc_sli4_redisc_fcf_table(phba); |
3499 | if (rc) { | 3656 | if (rc) { |
3500 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | | 3657 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP | |
@@ -3522,7 +3679,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, | |||
3522 | default: | 3679 | default: |
3523 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3680 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
3524 | "0288 Unknown FCoE event type 0x%x event tag " | 3681 | "0288 Unknown FCoE event type 0x%x event tag " |
3525 | "0x%x\n", event_type, acqe_fcoe->event_tag); | 3682 | "0x%x\n", event_type, acqe_fip->event_tag); |
3526 | break; | 3683 | break; |
3527 | } | 3684 | } |
3528 | } | 3685 | } |
@@ -3599,8 +3756,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) | |||
3599 | &cq_event->cqe.acqe_link); | 3756 | &cq_event->cqe.acqe_link); |
3600 | break; | 3757 | break; |
3601 | case LPFC_TRAILER_CODE_FCOE: | 3758 | case LPFC_TRAILER_CODE_FCOE: |
3602 | lpfc_sli4_async_fcoe_evt(phba, | 3759 | lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); |
3603 | &cq_event->cqe.acqe_fcoe); | ||
3604 | break; | 3760 | break; |
3605 | case LPFC_TRAILER_CODE_DCBX: | 3761 | case LPFC_TRAILER_CODE_DCBX: |
3606 | lpfc_sli4_async_dcbx_evt(phba, | 3762 | lpfc_sli4_async_dcbx_evt(phba, |
@@ -3610,6 +3766,12 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) | |||
3610 | lpfc_sli4_async_grp5_evt(phba, | 3766 | lpfc_sli4_async_grp5_evt(phba, |
3611 | &cq_event->cqe.acqe_grp5); | 3767 | &cq_event->cqe.acqe_grp5); |
3612 | break; | 3768 | break; |
3769 | case LPFC_TRAILER_CODE_FC: | ||
3770 | lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); | ||
3771 | break; | ||
3772 | case LPFC_TRAILER_CODE_SLI: | ||
3773 | lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); | ||
3774 | break; | ||
3613 | default: | 3775 | default: |
3614 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 3776 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
3615 | "1804 Invalid asynchrous event code: " | 3777 | "1804 Invalid asynchrous event code: " |
@@ -3948,7 +4110,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
3948 | int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; | 4110 | int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; |
3949 | uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; | 4111 | uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; |
3950 | struct lpfc_mqe *mqe; | 4112 | struct lpfc_mqe *mqe; |
3951 | int longs; | 4113 | int longs, sli_family; |
3952 | 4114 | ||
3953 | /* Before proceed, wait for POST done and device ready */ | 4115 | /* Before proceed, wait for POST done and device ready */ |
3954 | rc = lpfc_sli4_post_status_check(phba); | 4116 | rc = lpfc_sli4_post_status_check(phba); |
@@ -3963,6 +4125,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
3963 | init_timer(&phba->hb_tmofunc); | 4125 | init_timer(&phba->hb_tmofunc); |
3964 | phba->hb_tmofunc.function = lpfc_hb_timeout; | 4126 | phba->hb_tmofunc.function = lpfc_hb_timeout; |
3965 | phba->hb_tmofunc.data = (unsigned long)phba; | 4127 | phba->hb_tmofunc.data = (unsigned long)phba; |
4128 | init_timer(&phba->rrq_tmr); | ||
4129 | phba->rrq_tmr.function = lpfc_rrq_timeout; | ||
4130 | phba->rrq_tmr.data = (unsigned long)phba; | ||
3966 | 4131 | ||
3967 | psli = &phba->sli; | 4132 | psli = &phba->sli; |
3968 | /* MBOX heartbeat timer */ | 4133 | /* MBOX heartbeat timer */ |
@@ -4010,12 +4175,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4010 | */ | 4175 | */ |
4011 | buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + | 4176 | buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + |
4012 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); | 4177 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); |
4013 | /* Feature Level 1 hardware is limited to 2 pages */ | 4178 | |
4014 | if ((bf_get(lpfc_sli_intf_featurelevel1, &phba->sli4_hba.sli_intf) == | 4179 | sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); |
4015 | LPFC_SLI_INTF_FEATURELEVEL1_1)) | 4180 | max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; |
4016 | max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; | 4181 | switch (sli_family) { |
4017 | else | 4182 | case LPFC_SLI_INTF_FAMILY_BE2: |
4018 | max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; | 4183 | case LPFC_SLI_INTF_FAMILY_BE3: |
4184 | /* There is a single hint for BE - 2 pages per BPL. */ | ||
4185 | if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == | ||
4186 | LPFC_SLI_INTF_SLI_HINT1_1) | ||
4187 | max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; | ||
4188 | break; | ||
4189 | case LPFC_SLI_INTF_FAMILY_LNCR_A0: | ||
4190 | case LPFC_SLI_INTF_FAMILY_LNCR_B0: | ||
4191 | default: | ||
4192 | break; | ||
4193 | } | ||
4019 | for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; | 4194 | for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; |
4020 | dma_buf_size < max_buf_size && buf_size > dma_buf_size; | 4195 | dma_buf_size < max_buf_size && buf_size > dma_buf_size; |
4021 | dma_buf_size = dma_buf_size << 1) | 4196 | dma_buf_size = dma_buf_size << 1) |
@@ -4070,6 +4245,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4070 | if (rc) | 4245 | if (rc) |
4071 | return -ENOMEM; | 4246 | return -ENOMEM; |
4072 | 4247 | ||
4248 | /* IF Type 2 ports get initialized now. */ | ||
4249 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == | ||
4250 | LPFC_SLI_INTF_IF_TYPE_2) { | ||
4251 | rc = lpfc_pci_function_reset(phba); | ||
4252 | if (unlikely(rc)) | ||
4253 | return -ENODEV; | ||
4254 | } | ||
4255 | |||
4073 | /* Create the bootstrap mailbox command */ | 4256 | /* Create the bootstrap mailbox command */ |
4074 | rc = lpfc_create_bootstrap_mbox(phba); | 4257 | rc = lpfc_create_bootstrap_mbox(phba); |
4075 | if (unlikely(rc)) | 4258 | if (unlikely(rc)) |
@@ -4080,19 +4263,18 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4080 | if (unlikely(rc)) | 4263 | if (unlikely(rc)) |
4081 | goto out_free_bsmbx; | 4264 | goto out_free_bsmbx; |
4082 | 4265 | ||
4083 | rc = lpfc_sli4_fw_cfg_check(phba); | ||
4084 | if (unlikely(rc)) | ||
4085 | goto out_free_bsmbx; | ||
4086 | |||
4087 | /* Set up the hba's configuration parameters. */ | 4266 | /* Set up the hba's configuration parameters. */ |
4088 | rc = lpfc_sli4_read_config(phba); | 4267 | rc = lpfc_sli4_read_config(phba); |
4089 | if (unlikely(rc)) | 4268 | if (unlikely(rc)) |
4090 | goto out_free_bsmbx; | 4269 | goto out_free_bsmbx; |
4091 | 4270 | ||
4092 | /* Perform a function reset */ | 4271 | /* IF Type 0 ports get initialized now. */ |
4093 | rc = lpfc_pci_function_reset(phba); | 4272 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
4094 | if (unlikely(rc)) | 4273 | LPFC_SLI_INTF_IF_TYPE_0) { |
4095 | goto out_free_bsmbx; | 4274 | rc = lpfc_pci_function_reset(phba); |
4275 | if (unlikely(rc)) | ||
4276 | goto out_free_bsmbx; | ||
4277 | } | ||
4096 | 4278 | ||
4097 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, | 4279 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
4098 | GFP_KERNEL); | 4280 | GFP_KERNEL); |
@@ -5190,97 +5372,183 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) | |||
5190 | int | 5372 | int |
5191 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) | 5373 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) |
5192 | { | 5374 | { |
5193 | struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg; | 5375 | struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg; |
5194 | int i, port_error = -ENODEV; | 5376 | struct lpfc_register reg_data; |
5377 | int i, port_error = 0; | ||
5378 | uint32_t if_type; | ||
5195 | 5379 | ||
5196 | if (!phba->sli4_hba.STAregaddr) | 5380 | if (!phba->sli4_hba.PSMPHRregaddr) |
5197 | return -ENODEV; | 5381 | return -ENODEV; |
5198 | 5382 | ||
5199 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ | 5383 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ |
5200 | for (i = 0; i < 3000; i++) { | 5384 | for (i = 0; i < 3000; i++) { |
5201 | sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); | 5385 | portsmphr_reg.word0 = readl(phba->sli4_hba.PSMPHRregaddr); |
5202 | /* Encounter fatal POST error, break out */ | 5386 | if (bf_get(lpfc_port_smphr_perr, &portsmphr_reg)) { |
5203 | if (bf_get(lpfc_hst_state_perr, &sta_reg)) { | 5387 | /* Port has a fatal POST error, break out */ |
5204 | port_error = -ENODEV; | 5388 | port_error = -ENODEV; |
5205 | break; | 5389 | break; |
5206 | } | 5390 | } |
5207 | if (LPFC_POST_STAGE_ARMFW_READY == | 5391 | if (LPFC_POST_STAGE_PORT_READY == |
5208 | bf_get(lpfc_hst_state_port_status, &sta_reg)) { | 5392 | bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)) |
5209 | port_error = 0; | ||
5210 | break; | 5393 | break; |
5211 | } | ||
5212 | msleep(10); | 5394 | msleep(10); |
5213 | } | 5395 | } |
5214 | 5396 | ||
5215 | if (port_error) | 5397 | /* |
5398 | * If there was a port error during POST, then don't proceed with | ||
5399 | * other register reads as the data may not be valid. Just exit. | ||
5400 | */ | ||
5401 | if (port_error) { | ||
5216 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5402 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5217 | "1408 Failure HBA POST Status: sta_reg=0x%x, " | 5403 | "1408 Port Failed POST - portsmphr=0x%x, " |
5218 | "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " | 5404 | "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, " |
5219 | "dl=x%x, pstatus=x%x\n", sta_reg.word0, | 5405 | "scr2=x%x, hscratch=x%x, pstatus=x%x\n", |
5220 | bf_get(lpfc_hst_state_perr, &sta_reg), | 5406 | portsmphr_reg.word0, |
5221 | bf_get(lpfc_hst_state_sfi, &sta_reg), | 5407 | bf_get(lpfc_port_smphr_perr, &portsmphr_reg), |
5222 | bf_get(lpfc_hst_state_nip, &sta_reg), | 5408 | bf_get(lpfc_port_smphr_sfi, &portsmphr_reg), |
5223 | bf_get(lpfc_hst_state_ipc, &sta_reg), | 5409 | bf_get(lpfc_port_smphr_nip, &portsmphr_reg), |
5224 | bf_get(lpfc_hst_state_xrom, &sta_reg), | 5410 | bf_get(lpfc_port_smphr_ipc, &portsmphr_reg), |
5225 | bf_get(lpfc_hst_state_dl, &sta_reg), | 5411 | bf_get(lpfc_port_smphr_scr1, &portsmphr_reg), |
5226 | bf_get(lpfc_hst_state_port_status, &sta_reg)); | 5412 | bf_get(lpfc_port_smphr_scr2, &portsmphr_reg), |
5227 | 5413 | bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg), | |
5228 | /* Log device information */ | 5414 | bf_get(lpfc_port_smphr_port_status, &portsmphr_reg)); |
5229 | phba->sli4_hba.sli_intf.word0 = readl(phba->sli4_hba.SLIINTFregaddr); | 5415 | } else { |
5230 | if (bf_get(lpfc_sli_intf_valid, | ||
5231 | &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_VALID) { | ||
5232 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 5416 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
5233 | "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " | 5417 | "2534 Device Info: SLIFamily=0x%x, " |
5234 | "FeatureL1=0x%x, FeatureL2=0x%x\n", | 5418 | "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, " |
5419 | "SLIHint_2=0x%x, FT=0x%x\n", | ||
5235 | bf_get(lpfc_sli_intf_sli_family, | 5420 | bf_get(lpfc_sli_intf_sli_family, |
5236 | &phba->sli4_hba.sli_intf), | 5421 | &phba->sli4_hba.sli_intf), |
5237 | bf_get(lpfc_sli_intf_slirev, | 5422 | bf_get(lpfc_sli_intf_slirev, |
5238 | &phba->sli4_hba.sli_intf), | 5423 | &phba->sli4_hba.sli_intf), |
5239 | bf_get(lpfc_sli_intf_featurelevel1, | 5424 | bf_get(lpfc_sli_intf_if_type, |
5425 | &phba->sli4_hba.sli_intf), | ||
5426 | bf_get(lpfc_sli_intf_sli_hint1, | ||
5240 | &phba->sli4_hba.sli_intf), | 5427 | &phba->sli4_hba.sli_intf), |
5241 | bf_get(lpfc_sli_intf_featurelevel2, | 5428 | bf_get(lpfc_sli_intf_sli_hint2, |
5429 | &phba->sli4_hba.sli_intf), | ||
5430 | bf_get(lpfc_sli_intf_func_type, | ||
5242 | &phba->sli4_hba.sli_intf)); | 5431 | &phba->sli4_hba.sli_intf)); |
5432 | /* | ||
5433 | * Check for other Port errors during the initialization | ||
5434 | * process. Fail the load if the port did not come up | ||
5435 | * correctly. | ||
5436 | */ | ||
5437 | if_type = bf_get(lpfc_sli_intf_if_type, | ||
5438 | &phba->sli4_hba.sli_intf); | ||
5439 | switch (if_type) { | ||
5440 | case LPFC_SLI_INTF_IF_TYPE_0: | ||
5441 | phba->sli4_hba.ue_mask_lo = | ||
5442 | readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr); | ||
5443 | phba->sli4_hba.ue_mask_hi = | ||
5444 | readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr); | ||
5445 | uerrlo_reg.word0 = | ||
5446 | readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); | ||
5447 | uerrhi_reg.word0 = | ||
5448 | readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); | ||
5449 | if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || | ||
5450 | (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { | ||
5451 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5452 | "1422 Unrecoverable Error " | ||
5453 | "Detected during POST " | ||
5454 | "uerr_lo_reg=0x%x, " | ||
5455 | "uerr_hi_reg=0x%x, " | ||
5456 | "ue_mask_lo_reg=0x%x, " | ||
5457 | "ue_mask_hi_reg=0x%x\n", | ||
5458 | uerrlo_reg.word0, | ||
5459 | uerrhi_reg.word0, | ||
5460 | phba->sli4_hba.ue_mask_lo, | ||
5461 | phba->sli4_hba.ue_mask_hi); | ||
5462 | port_error = -ENODEV; | ||
5463 | } | ||
5464 | break; | ||
5465 | case LPFC_SLI_INTF_IF_TYPE_2: | ||
5466 | /* Final checks. The port status should be clean. */ | ||
5467 | reg_data.word0 = | ||
5468 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | ||
5469 | if (bf_get(lpfc_sliport_status_err, ®_data)) { | ||
5470 | phba->work_status[0] = | ||
5471 | readl(phba->sli4_hba.u.if_type2. | ||
5472 | ERR1regaddr); | ||
5473 | phba->work_status[1] = | ||
5474 | readl(phba->sli4_hba.u.if_type2. | ||
5475 | ERR2regaddr); | ||
5476 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5477 | "2888 Port Error Detected " | ||
5478 | "during POST: " | ||
5479 | "port status reg 0x%x, " | ||
5480 | "port_smphr reg 0x%x, " | ||
5481 | "error 1=0x%x, error 2=0x%x\n", | ||
5482 | reg_data.word0, | ||
5483 | portsmphr_reg.word0, | ||
5484 | phba->work_status[0], | ||
5485 | phba->work_status[1]); | ||
5486 | port_error = -ENODEV; | ||
5487 | } | ||
5488 | break; | ||
5489 | case LPFC_SLI_INTF_IF_TYPE_1: | ||
5490 | default: | ||
5491 | break; | ||
5492 | } | ||
5243 | } | 5493 | } |
5244 | phba->sli4_hba.ue_mask_lo = readl(phba->sli4_hba.UEMASKLOregaddr); | ||
5245 | phba->sli4_hba.ue_mask_hi = readl(phba->sli4_hba.UEMASKHIregaddr); | ||
5246 | /* With uncoverable error, log the error message and return error */ | ||
5247 | uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); | ||
5248 | uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); | ||
5249 | if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) || | ||
5250 | (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) { | ||
5251 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5252 | "1422 HBA Unrecoverable error: " | ||
5253 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | ||
5254 | "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", | ||
5255 | uerrlo_reg.word0, uerrhi_reg.word0, | ||
5256 | phba->sli4_hba.ue_mask_lo, | ||
5257 | phba->sli4_hba.ue_mask_hi); | ||
5258 | return -ENODEV; | ||
5259 | } | ||
5260 | |||
5261 | return port_error; | 5494 | return port_error; |
5262 | } | 5495 | } |
5263 | 5496 | ||
5264 | /** | 5497 | /** |
5265 | * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. | 5498 | * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. |
5266 | * @phba: pointer to lpfc hba data structure. | 5499 | * @phba: pointer to lpfc hba data structure. |
5500 | * @if_type: The SLI4 interface type getting configured. | ||
5267 | * | 5501 | * |
5268 | * This routine is invoked to set up SLI4 BAR0 PCI config space register | 5502 | * This routine is invoked to set up SLI4 BAR0 PCI config space register |
5269 | * memory map. | 5503 | * memory map. |
5270 | **/ | 5504 | **/ |
5271 | static void | 5505 | static void |
5272 | lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) | 5506 | lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type) |
5273 | { | 5507 | { |
5274 | phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5508 | switch (if_type) { |
5275 | LPFC_UERR_STATUS_LO; | 5509 | case LPFC_SLI_INTF_IF_TYPE_0: |
5276 | phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5510 | phba->sli4_hba.u.if_type0.UERRLOregaddr = |
5277 | LPFC_UERR_STATUS_HI; | 5511 | phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO; |
5278 | phba->sli4_hba.UEMASKLOregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5512 | phba->sli4_hba.u.if_type0.UERRHIregaddr = |
5279 | LPFC_UE_MASK_LO; | 5513 | phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI; |
5280 | phba->sli4_hba.UEMASKHIregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5514 | phba->sli4_hba.u.if_type0.UEMASKLOregaddr = |
5281 | LPFC_UE_MASK_HI; | 5515 | phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO; |
5282 | phba->sli4_hba.SLIINTFregaddr = phba->sli4_hba.conf_regs_memmap_p + | 5516 | phba->sli4_hba.u.if_type0.UEMASKHIregaddr = |
5283 | LPFC_SLI_INTF; | 5517 | phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI; |
5518 | phba->sli4_hba.SLIINTFregaddr = | ||
5519 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; | ||
5520 | break; | ||
5521 | case LPFC_SLI_INTF_IF_TYPE_2: | ||
5522 | phba->sli4_hba.u.if_type2.ERR1regaddr = | ||
5523 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_1; | ||
5524 | phba->sli4_hba.u.if_type2.ERR2regaddr = | ||
5525 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_ERR_2; | ||
5526 | phba->sli4_hba.u.if_type2.CTRLregaddr = | ||
5527 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_CNTRL; | ||
5528 | phba->sli4_hba.u.if_type2.STATUSregaddr = | ||
5529 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_STATUS; | ||
5530 | phba->sli4_hba.SLIINTFregaddr = | ||
5531 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF; | ||
5532 | phba->sli4_hba.PSMPHRregaddr = | ||
5533 | phba->sli4_hba.conf_regs_memmap_p + LPFC_SLIPORT_IF2_SMPHR; | ||
5534 | phba->sli4_hba.RQDBregaddr = | ||
5535 | phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL; | ||
5536 | phba->sli4_hba.WQDBregaddr = | ||
5537 | phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL; | ||
5538 | phba->sli4_hba.EQCQDBregaddr = | ||
5539 | phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL; | ||
5540 | phba->sli4_hba.MQDBregaddr = | ||
5541 | phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL; | ||
5542 | phba->sli4_hba.BMBXregaddr = | ||
5543 | phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX; | ||
5544 | break; | ||
5545 | case LPFC_SLI_INTF_IF_TYPE_1: | ||
5546 | default: | ||
5547 | dev_printk(KERN_ERR, &phba->pcidev->dev, | ||
5548 | "FATAL - unsupported SLI4 interface type - %d\n", | ||
5549 | if_type); | ||
5550 | break; | ||
5551 | } | ||
5284 | } | 5552 | } |
5285 | 5553 | ||
5286 | /** | 5554 | /** |
@@ -5293,16 +5561,14 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) | |||
5293 | static void | 5561 | static void |
5294 | lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) | 5562 | lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) |
5295 | { | 5563 | { |
5296 | 5564 | phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | |
5297 | phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | 5565 | LPFC_SLIPORT_IF0_SMPHR; |
5298 | LPFC_HST_STATE; | ||
5299 | phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | 5566 | phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + |
5300 | LPFC_HST_ISR0; | 5567 | LPFC_HST_ISR0; |
5301 | phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | 5568 | phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + |
5302 | LPFC_HST_IMR0; | 5569 | LPFC_HST_IMR0; |
5303 | phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | 5570 | phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + |
5304 | LPFC_HST_ISCR0; | 5571 | LPFC_HST_ISCR0; |
5305 | return; | ||
5306 | } | 5572 | } |
5307 | 5573 | ||
5308 | /** | 5574 | /** |
@@ -5542,11 +5808,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) | |||
5542 | } | 5808 | } |
5543 | 5809 | ||
5544 | /** | 5810 | /** |
5545 | * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. | 5811 | * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port. |
5546 | * @phba: pointer to lpfc hba data structure. | 5812 | * @phba: pointer to lpfc hba data structure. |
5547 | * | 5813 | * |
5548 | * This routine is invoked to setup the host-side endian order to the | 5814 | * This routine is invoked to setup the port-side endian order when |
5549 | * HBA consistent with the SLI-4 interface spec. | 5815 | * the port if_type is 0. This routine has no function for other |
5816 | * if_types. | ||
5550 | * | 5817 | * |
5551 | * Return codes | 5818 | * Return codes |
5552 | * 0 - successful | 5819 | * 0 - successful |
@@ -5557,34 +5824,44 @@ static int | |||
5557 | lpfc_setup_endian_order(struct lpfc_hba *phba) | 5824 | lpfc_setup_endian_order(struct lpfc_hba *phba) |
5558 | { | 5825 | { |
5559 | LPFC_MBOXQ_t *mboxq; | 5826 | LPFC_MBOXQ_t *mboxq; |
5560 | uint32_t rc = 0; | 5827 | uint32_t if_type, rc = 0; |
5561 | uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, | 5828 | uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, |
5562 | HOST_ENDIAN_HIGH_WORD1}; | 5829 | HOST_ENDIAN_HIGH_WORD1}; |
5563 | 5830 | ||
5564 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 5831 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
5565 | if (!mboxq) { | 5832 | switch (if_type) { |
5566 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5833 | case LPFC_SLI_INTF_IF_TYPE_0: |
5567 | "0492 Unable to allocate memory for issuing " | 5834 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
5568 | "SLI_CONFIG_SPECIAL mailbox command\n"); | 5835 | GFP_KERNEL); |
5569 | return -ENOMEM; | 5836 | if (!mboxq) { |
5570 | } | 5837 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5838 | "0492 Unable to allocate memory for " | ||
5839 | "issuing SLI_CONFIG_SPECIAL mailbox " | ||
5840 | "command\n"); | ||
5841 | return -ENOMEM; | ||
5842 | } | ||
5571 | 5843 | ||
5572 | /* | 5844 | /* |
5573 | * The SLI4_CONFIG_SPECIAL mailbox command requires the first two | 5845 | * The SLI4_CONFIG_SPECIAL mailbox command requires the first |
5574 | * words to contain special data values and no other data. | 5846 | * two words to contain special data values and no other data. |
5575 | */ | 5847 | */ |
5576 | memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); | 5848 | memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); |
5577 | memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); | 5849 | memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); |
5578 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | 5850 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
5579 | if (rc != MBX_SUCCESS) { | 5851 | if (rc != MBX_SUCCESS) { |
5580 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5852 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5581 | "0493 SLI_CONFIG_SPECIAL mailbox failed with " | 5853 | "0493 SLI_CONFIG_SPECIAL mailbox " |
5582 | "status x%x\n", | 5854 | "failed with status x%x\n", |
5583 | rc); | 5855 | rc); |
5584 | rc = -EIO; | 5856 | rc = -EIO; |
5857 | } | ||
5858 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
5859 | break; | ||
5860 | case LPFC_SLI_INTF_IF_TYPE_2: | ||
5861 | case LPFC_SLI_INTF_IF_TYPE_1: | ||
5862 | default: | ||
5863 | break; | ||
5585 | } | 5864 | } |
5586 | |||
5587 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
5588 | return rc; | 5865 | return rc; |
5589 | } | 5866 | } |
5590 | 5867 | ||
@@ -6416,36 +6693,124 @@ int | |||
6416 | lpfc_pci_function_reset(struct lpfc_hba *phba) | 6693 | lpfc_pci_function_reset(struct lpfc_hba *phba) |
6417 | { | 6694 | { |
6418 | LPFC_MBOXQ_t *mboxq; | 6695 | LPFC_MBOXQ_t *mboxq; |
6419 | uint32_t rc = 0; | 6696 | uint32_t rc = 0, if_type; |
6420 | uint32_t shdr_status, shdr_add_status; | 6697 | uint32_t shdr_status, shdr_add_status; |
6698 | uint32_t rdy_chk, num_resets = 0, reset_again = 0; | ||
6421 | union lpfc_sli4_cfg_shdr *shdr; | 6699 | union lpfc_sli4_cfg_shdr *shdr; |
6700 | struct lpfc_register reg_data; | ||
6422 | 6701 | ||
6423 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 6702 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
6424 | if (!mboxq) { | 6703 | switch (if_type) { |
6425 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 6704 | case LPFC_SLI_INTF_IF_TYPE_0: |
6426 | "0494 Unable to allocate memory for issuing " | 6705 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
6427 | "SLI_FUNCTION_RESET mailbox command\n"); | 6706 | GFP_KERNEL); |
6428 | return -ENOMEM; | 6707 | if (!mboxq) { |
6429 | } | 6708 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
6709 | "0494 Unable to allocate memory for " | ||
6710 | "issuing SLI_FUNCTION_RESET mailbox " | ||
6711 | "command\n"); | ||
6712 | return -ENOMEM; | ||
6713 | } | ||
6430 | 6714 | ||
6431 | /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ | 6715 | /* Setup PCI function reset mailbox-ioctl command */ |
6432 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, | 6716 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, |
6433 | LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, | 6717 | LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, |
6434 | LPFC_SLI4_MBX_EMBED); | 6718 | LPFC_SLI4_MBX_EMBED); |
6435 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | 6719 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
6436 | shdr = (union lpfc_sli4_cfg_shdr *) | 6720 | shdr = (union lpfc_sli4_cfg_shdr *) |
6437 | &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; | 6721 | &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; |
6438 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | 6722 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); |
6439 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | 6723 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, |
6440 | if (rc != MBX_TIMEOUT) | 6724 | &shdr->response); |
6441 | mempool_free(mboxq, phba->mbox_mem_pool); | 6725 | if (rc != MBX_TIMEOUT) |
6442 | if (shdr_status || shdr_add_status || rc) { | 6726 | mempool_free(mboxq, phba->mbox_mem_pool); |
6443 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 6727 | if (shdr_status || shdr_add_status || rc) { |
6444 | "0495 SLI_FUNCTION_RESET mailbox failed with " | 6728 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
6445 | "status x%x add_status x%x, mbx status x%x\n", | 6729 | "0495 SLI_FUNCTION_RESET mailbox " |
6446 | shdr_status, shdr_add_status, rc); | 6730 | "failed with status x%x add_status x%x," |
6447 | rc = -ENXIO; | 6731 | " mbx status x%x\n", |
6732 | shdr_status, shdr_add_status, rc); | ||
6733 | rc = -ENXIO; | ||
6734 | } | ||
6735 | break; | ||
6736 | case LPFC_SLI_INTF_IF_TYPE_2: | ||
6737 | for (num_resets = 0; | ||
6738 | num_resets < MAX_IF_TYPE_2_RESETS; | ||
6739 | num_resets++) { | ||
6740 | reg_data.word0 = 0; | ||
6741 | bf_set(lpfc_sliport_ctrl_end, ®_data, | ||
6742 | LPFC_SLIPORT_LITTLE_ENDIAN); | ||
6743 | bf_set(lpfc_sliport_ctrl_ip, ®_data, | ||
6744 | LPFC_SLIPORT_INIT_PORT); | ||
6745 | writel(reg_data.word0, phba->sli4_hba.u.if_type2. | ||
6746 | CTRLregaddr); | ||
6747 | |||
6748 | /* | ||
6749 | * Poll the Port Status Register and wait for RDY for | ||
6750 | * up to 10 seconds. If the port doesn't respond, treat | ||
6751 | * it as an error. If the port responds with RN, start | ||
6752 | * the loop again. | ||
6753 | */ | ||
6754 | for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { | ||
6755 | reg_data.word0 = | ||
6756 | readl(phba->sli4_hba.u.if_type2. | ||
6757 | STATUSregaddr); | ||
6758 | if (bf_get(lpfc_sliport_status_rdy, ®_data)) | ||
6759 | break; | ||
6760 | if (bf_get(lpfc_sliport_status_rn, ®_data)) { | ||
6761 | reset_again++; | ||
6762 | break; | ||
6763 | } | ||
6764 | msleep(10); | ||
6765 | } | ||
6766 | |||
6767 | /* | ||
6768 | * If the port responds to the init request with | ||
6769 | * reset needed, delay for a bit and restart the loop. | ||
6770 | */ | ||
6771 | if (reset_again) { | ||
6772 | msleep(10); | ||
6773 | reset_again = 0; | ||
6774 | continue; | ||
6775 | } | ||
6776 | |||
6777 | /* Detect any port errors. */ | ||
6778 | reg_data.word0 = readl(phba->sli4_hba.u.if_type2. | ||
6779 | STATUSregaddr); | ||
6780 | if ((bf_get(lpfc_sliport_status_err, ®_data)) || | ||
6781 | (rdy_chk >= 1000)) { | ||
6782 | phba->work_status[0] = readl( | ||
6783 | phba->sli4_hba.u.if_type2.ERR1regaddr); | ||
6784 | phba->work_status[1] = readl( | ||
6785 | phba->sli4_hba.u.if_type2.ERR2regaddr); | ||
6786 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
6787 | "2890 Port Error Detected " | ||
6788 | "during Port Reset: " | ||
6789 | "port status reg 0x%x, " | ||
6790 | "error 1=0x%x, error 2=0x%x\n", | ||
6791 | reg_data.word0, | ||
6792 | phba->work_status[0], | ||
6793 | phba->work_status[1]); | ||
6794 | rc = -ENODEV; | ||
6795 | } | ||
6796 | |||
6797 | /* | ||
6798 | * Terminate the outer loop provided the Port indicated | ||
6799 | * ready within 10 seconds. | ||
6800 | */ | ||
6801 | if (rdy_chk < 1000) | ||
6802 | break; | ||
6803 | } | ||
6804 | break; | ||
6805 | case LPFC_SLI_INTF_IF_TYPE_1: | ||
6806 | default: | ||
6807 | break; | ||
6448 | } | 6808 | } |
6809 | |||
6810 | /* Catch the not-ready port failure after a port reset. */ | ||
6811 | if (num_resets >= MAX_IF_TYPE_2_RESETS) | ||
6812 | rc = -ENODEV; | ||
6813 | |||
6449 | return rc; | 6814 | return rc; |
6450 | } | 6815 | } |
6451 | 6816 | ||
@@ -6536,6 +6901,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
6536 | struct pci_dev *pdev; | 6901 | struct pci_dev *pdev; |
6537 | unsigned long bar0map_len, bar1map_len, bar2map_len; | 6902 | unsigned long bar0map_len, bar1map_len, bar2map_len; |
6538 | int error = -ENODEV; | 6903 | int error = -ENODEV; |
6904 | uint32_t if_type; | ||
6539 | 6905 | ||
6540 | /* Obtain PCI device reference */ | 6906 | /* Obtain PCI device reference */ |
6541 | if (!phba->pcidev) | 6907 | if (!phba->pcidev) |
@@ -6552,61 +6918,105 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
6552 | } | 6918 | } |
6553 | } | 6919 | } |
6554 | 6920 | ||
6555 | /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the | 6921 | /* |
6556 | * number of bytes required by each mapping. They are actually | 6922 | * The BARs and register set definitions and offset locations are |
6557 | * mapping to the PCI BAR regions 0 or 1, 2, and 4 by the SLI4 device. | 6923 | * dependent on the if_type. |
6924 | */ | ||
6925 | if (pci_read_config_dword(pdev, LPFC_SLI_INTF, | ||
6926 | &phba->sli4_hba.sli_intf.word0)) { | ||
6927 | return error; | ||
6928 | } | ||
6929 | |||
6930 | /* There is no SLI3 failback for SLI4 devices. */ | ||
6931 | if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) != | ||
6932 | LPFC_SLI_INTF_VALID) { | ||
6933 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
6934 | "2894 SLI_INTF reg contents invalid " | ||
6935 | "sli_intf reg 0x%x\n", | ||
6936 | phba->sli4_hba.sli_intf.word0); | ||
6937 | return error; | ||
6938 | } | ||
6939 | |||
6940 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); | ||
6941 | /* | ||
6942 | * Get the bus address of SLI4 device Bar regions and the | ||
6943 | * number of bytes required by each mapping. The mapping of the | ||
6944 | * particular PCI BARs regions is dependent on the type of | ||
6945 | * SLI4 device. | ||
6558 | */ | 6946 | */ |
6559 | if (pci_resource_start(pdev, 0)) { | 6947 | if (pci_resource_start(pdev, 0)) { |
6560 | phba->pci_bar0_map = pci_resource_start(pdev, 0); | 6948 | phba->pci_bar0_map = pci_resource_start(pdev, 0); |
6561 | bar0map_len = pci_resource_len(pdev, 0); | 6949 | bar0map_len = pci_resource_len(pdev, 0); |
6950 | |||
6951 | /* | ||
6952 | * Map SLI4 PCI Config Space Register base to a kernel virtual | ||
6953 | * addr | ||
6954 | */ | ||
6955 | phba->sli4_hba.conf_regs_memmap_p = | ||
6956 | ioremap(phba->pci_bar0_map, bar0map_len); | ||
6957 | if (!phba->sli4_hba.conf_regs_memmap_p) { | ||
6958 | dev_printk(KERN_ERR, &pdev->dev, | ||
6959 | "ioremap failed for SLI4 PCI config " | ||
6960 | "registers.\n"); | ||
6961 | goto out; | ||
6962 | } | ||
6963 | /* Set up BAR0 PCI config space register memory map */ | ||
6964 | lpfc_sli4_bar0_register_memmap(phba, if_type); | ||
6562 | } else { | 6965 | } else { |
6563 | phba->pci_bar0_map = pci_resource_start(pdev, 1); | 6966 | phba->pci_bar0_map = pci_resource_start(pdev, 1); |
6564 | bar0map_len = pci_resource_len(pdev, 1); | 6967 | bar0map_len = pci_resource_len(pdev, 1); |
6565 | } | 6968 | if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { |
6566 | phba->pci_bar1_map = pci_resource_start(pdev, 2); | 6969 | dev_printk(KERN_ERR, &pdev->dev, |
6567 | bar1map_len = pci_resource_len(pdev, 2); | 6970 | "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); |
6568 | 6971 | goto out; | |
6569 | phba->pci_bar2_map = pci_resource_start(pdev, 4); | 6972 | } |
6570 | bar2map_len = pci_resource_len(pdev, 4); | 6973 | phba->sli4_hba.conf_regs_memmap_p = |
6571 | |||
6572 | /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ | ||
6573 | phba->sli4_hba.conf_regs_memmap_p = | ||
6574 | ioremap(phba->pci_bar0_map, bar0map_len); | 6974 | ioremap(phba->pci_bar0_map, bar0map_len); |
6575 | if (!phba->sli4_hba.conf_regs_memmap_p) { | 6975 | if (!phba->sli4_hba.conf_regs_memmap_p) { |
6576 | dev_printk(KERN_ERR, &pdev->dev, | 6976 | dev_printk(KERN_ERR, &pdev->dev, |
6577 | "ioremap failed for SLI4 PCI config registers.\n"); | 6977 | "ioremap failed for SLI4 PCI config " |
6578 | goto out; | 6978 | "registers.\n"); |
6979 | goto out; | ||
6980 | } | ||
6981 | lpfc_sli4_bar0_register_memmap(phba, if_type); | ||
6579 | } | 6982 | } |
6580 | 6983 | ||
6581 | /* Map SLI4 HBA Control Register base to a kernel virtual address. */ | 6984 | if (pci_resource_start(pdev, 2)) { |
6582 | phba->sli4_hba.ctrl_regs_memmap_p = | 6985 | /* |
6986 | * Map SLI4 if type 0 HBA Control Register base to a kernel | ||
6987 | * virtual address and setup the registers. | ||
6988 | */ | ||
6989 | phba->pci_bar1_map = pci_resource_start(pdev, 2); | ||
6990 | bar1map_len = pci_resource_len(pdev, 2); | ||
6991 | phba->sli4_hba.ctrl_regs_memmap_p = | ||
6583 | ioremap(phba->pci_bar1_map, bar1map_len); | 6992 | ioremap(phba->pci_bar1_map, bar1map_len); |
6584 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { | 6993 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { |
6585 | dev_printk(KERN_ERR, &pdev->dev, | 6994 | dev_printk(KERN_ERR, &pdev->dev, |
6586 | "ioremap failed for SLI4 HBA control registers.\n"); | 6995 | "ioremap failed for SLI4 HBA control registers.\n"); |
6587 | goto out_iounmap_conf; | 6996 | goto out_iounmap_conf; |
6997 | } | ||
6998 | lpfc_sli4_bar1_register_memmap(phba); | ||
6588 | } | 6999 | } |
6589 | 7000 | ||
6590 | /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ | 7001 | if (pci_resource_start(pdev, 4)) { |
6591 | phba->sli4_hba.drbl_regs_memmap_p = | 7002 | /* |
7003 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel | ||
7004 | * virtual address and setup the registers. | ||
7005 | */ | ||
7006 | phba->pci_bar2_map = pci_resource_start(pdev, 4); | ||
7007 | bar2map_len = pci_resource_len(pdev, 4); | ||
7008 | phba->sli4_hba.drbl_regs_memmap_p = | ||
6592 | ioremap(phba->pci_bar2_map, bar2map_len); | 7009 | ioremap(phba->pci_bar2_map, bar2map_len); |
6593 | if (!phba->sli4_hba.drbl_regs_memmap_p) { | 7010 | if (!phba->sli4_hba.drbl_regs_memmap_p) { |
6594 | dev_printk(KERN_ERR, &pdev->dev, | 7011 | dev_printk(KERN_ERR, &pdev->dev, |
6595 | "ioremap failed for SLI4 HBA doorbell registers.\n"); | 7012 | "ioremap failed for SLI4 HBA doorbell registers.\n"); |
6596 | goto out_iounmap_ctrl; | 7013 | goto out_iounmap_ctrl; |
7014 | } | ||
7015 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); | ||
7016 | if (error) | ||
7017 | goto out_iounmap_all; | ||
6597 | } | 7018 | } |
6598 | 7019 | ||
6599 | /* Set up BAR0 PCI config space register memory map */ | ||
6600 | lpfc_sli4_bar0_register_memmap(phba); | ||
6601 | |||
6602 | /* Set up BAR1 register memory map */ | ||
6603 | lpfc_sli4_bar1_register_memmap(phba); | ||
6604 | |||
6605 | /* Set up BAR2 register memory map */ | ||
6606 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); | ||
6607 | if (error) | ||
6608 | goto out_iounmap_all; | ||
6609 | |||
6610 | return 0; | 7020 | return 0; |
6611 | 7021 | ||
6612 | out_iounmap_all: | 7022 | out_iounmap_all: |
@@ -8149,6 +8559,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
8149 | goto out_unset_driver_resource_s4; | 8559 | goto out_unset_driver_resource_s4; |
8150 | } | 8560 | } |
8151 | 8561 | ||
8562 | INIT_LIST_HEAD(&phba->active_rrq_list); | ||
8563 | |||
8152 | /* Set up common device driver resources */ | 8564 | /* Set up common device driver resources */ |
8153 | error = lpfc_setup_driver_resource_phase2(phba); | 8565 | error = lpfc_setup_driver_resource_phase2(phba); |
8154 | if (error) { | 8566 | if (error) { |
@@ -8218,7 +8630,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
8218 | "0451 Configure interrupt mode (%d) " | 8630 | "0451 Configure interrupt mode (%d) " |
8219 | "failed active interrupt test.\n", | 8631 | "failed active interrupt test.\n", |
8220 | intr_mode); | 8632 | intr_mode); |
8221 | /* Unset the preivous SLI-4 HBA setup */ | 8633 | /* Unset the previous SLI-4 HBA setup. */ |
8634 | /* | ||
8635 | * TODO: Is this operation compatible with IF TYPE 2 | ||
8636 | * devices? All port state is deleted and cleared. | ||
8637 | */ | ||
8222 | lpfc_sli4_unset_hba(phba); | 8638 | lpfc_sli4_unset_hba(phba); |
8223 | /* Try next level of interrupt mode */ | 8639 | /* Try next level of interrupt mode */ |
8224 | cfg_mode = --intr_mode; | 8640 | cfg_mode = --intr_mode; |
@@ -8990,6 +9406,10 @@ static struct pci_device_id lpfc_id_table[] = { | |||
8990 | PCI_ANY_ID, PCI_ANY_ID, }, | 9406 | PCI_ANY_ID, PCI_ANY_ID, }, |
8991 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, | 9407 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS, |
8992 | PCI_ANY_ID, PCI_ANY_ID, }, | 9408 | PCI_ANY_ID, PCI_ANY_ID, }, |
9409 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC, | ||
9410 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
9411 | {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE, | ||
9412 | PCI_ANY_ID, PCI_ANY_ID, }, | ||
8993 | { 0 } | 9413 | { 0 } |
8994 | }; | 9414 | }; |
8995 | 9415 | ||
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index bb59e9273126..e3b790e59156 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h | |||
@@ -33,7 +33,7 @@ | |||
33 | #define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ | 33 | #define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ |
34 | #define LOG_LIBDFC 0x00002000 /* Libdfc events */ | 34 | #define LOG_LIBDFC 0x00002000 /* Libdfc events */ |
35 | #define LOG_VPORT 0x00004000 /* NPIV events */ | 35 | #define LOG_VPORT 0x00004000 /* NPIV events */ |
36 | #define LOF_SECURITY 0x00008000 /* Security events */ | 36 | #define LOG_SECURITY 0x00008000 /* Security events */ |
37 | #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ | 37 | #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ |
38 | #define LOG_FIP 0x00020000 /* FIP events */ | 38 | #define LOG_FIP 0x00020000 /* FIP events */ |
39 | #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ | 39 | #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 62d0957e1d4c..23403c650207 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -263,18 +263,19 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
263 | } | 263 | } |
264 | 264 | ||
265 | /** | 265 | /** |
266 | * lpfc_read_la - Prepare a mailbox command for reading HBA link attention | 266 | * lpfc_read_topology - Prepare a mailbox command for reading HBA topology |
267 | * @phba: pointer to lpfc hba data structure. | 267 | * @phba: pointer to lpfc hba data structure. |
268 | * @pmb: pointer to the driver internal queue element for mailbox command. | 268 | * @pmb: pointer to the driver internal queue element for mailbox command. |
269 | * @mp: DMA buffer memory for reading the link attention information into. | 269 | * @mp: DMA buffer memory for reading the link attention information into. |
270 | * | 270 | * |
271 | * The read link attention mailbox command is issued to read the Link Event | 271 | * The read topology mailbox command is issued to read the link topology |
272 | * Attention information indicated by the HBA port when the Link Event bit | 272 | * information indicated by the HBA port when the Link Event bit of the Host |
273 | * of the Host Attention (HSTATT) register is set to 1. A Link Event | 273 | * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link |
274 | * Attention ACQE is received from the port (For SLI-4). A Link Event | ||
274 | * Attention occurs based on an exception detected at the Fibre Channel link | 275 | * Attention occurs based on an exception detected at the Fibre Channel link |
275 | * interface. | 276 | * interface. |
276 | * | 277 | * |
277 | * This routine prepares the mailbox command for reading HBA link attention | 278 | * This routine prepares the mailbox command for reading HBA link topology |
278 | * information. A DMA memory has been set aside and address passed to the | 279 | * information. A DMA memory has been set aside and address passed to the |
279 | * HBA through @mp for the HBA to DMA link attention information into the | 280 | * HBA through @mp for the HBA to DMA link attention information into the |
280 | * memory as part of the execution of the mailbox command. | 281 | * memory as part of the execution of the mailbox command. |
@@ -283,7 +284,8 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
283 | * 0 - Success (currently always return 0) | 284 | * 0 - Success (currently always return 0) |
284 | **/ | 285 | **/ |
285 | int | 286 | int |
286 | lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) | 287 | lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, |
288 | struct lpfc_dmabuf *mp) | ||
287 | { | 289 | { |
288 | MAILBOX_t *mb; | 290 | MAILBOX_t *mb; |
289 | struct lpfc_sli *psli; | 291 | struct lpfc_sli *psli; |
@@ -293,15 +295,15 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) | |||
293 | memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); | 295 | memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); |
294 | 296 | ||
295 | INIT_LIST_HEAD(&mp->list); | 297 | INIT_LIST_HEAD(&mp->list); |
296 | mb->mbxCommand = MBX_READ_LA64; | 298 | mb->mbxCommand = MBX_READ_TOPOLOGY; |
297 | mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128; | 299 | mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE; |
298 | mb->un.varReadLA.un.lilpBde64.addrHigh = putPaddrHigh(mp->phys); | 300 | mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys); |
299 | mb->un.varReadLA.un.lilpBde64.addrLow = putPaddrLow(mp->phys); | 301 | mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys); |
300 | 302 | ||
301 | /* Save address for later completion and set the owner to host so that | 303 | /* Save address for later completion and set the owner to host so that |
302 | * the FW knows this mailbox is available for processing. | 304 | * the FW knows this mailbox is available for processing. |
303 | */ | 305 | */ |
304 | pmb->context1 = (uint8_t *) mp; | 306 | pmb->context1 = (uint8_t *)mp; |
305 | mb->mbxOwner = OWN_HOST; | 307 | mb->mbxOwner = OWN_HOST; |
306 | return (0); | 308 | return (0); |
307 | } | 309 | } |
@@ -516,18 +518,33 @@ lpfc_init_link(struct lpfc_hba * phba, | |||
516 | vpd = &phba->vpd; | 518 | vpd = &phba->vpd; |
517 | if (vpd->rev.feaLevelHigh >= 0x02){ | 519 | if (vpd->rev.feaLevelHigh >= 0x02){ |
518 | switch(linkspeed){ | 520 | switch(linkspeed){ |
519 | case LINK_SPEED_1G: | 521 | case LPFC_USER_LINK_SPEED_1G: |
520 | case LINK_SPEED_2G: | 522 | mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; |
521 | case LINK_SPEED_4G: | 523 | mb->un.varInitLnk.link_speed = LINK_SPEED_1G; |
522 | case LINK_SPEED_8G: | 524 | break; |
523 | mb->un.varInitLnk.link_flags |= | 525 | case LPFC_USER_LINK_SPEED_2G: |
524 | FLAGS_LINK_SPEED; | 526 | mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; |
525 | mb->un.varInitLnk.link_speed = linkspeed; | 527 | mb->un.varInitLnk.link_speed = LINK_SPEED_2G; |
528 | break; | ||
529 | case LPFC_USER_LINK_SPEED_4G: | ||
530 | mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; | ||
531 | mb->un.varInitLnk.link_speed = LINK_SPEED_4G; | ||
532 | break; | ||
533 | case LPFC_USER_LINK_SPEED_8G: | ||
534 | mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; | ||
535 | mb->un.varInitLnk.link_speed = LINK_SPEED_8G; | ||
536 | break; | ||
537 | case LPFC_USER_LINK_SPEED_10G: | ||
538 | mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; | ||
539 | mb->un.varInitLnk.link_speed = LINK_SPEED_10G; | ||
526 | break; | 540 | break; |
527 | case LINK_SPEED_AUTO: | 541 | case LPFC_USER_LINK_SPEED_16G: |
528 | default: | 542 | mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; |
529 | mb->un.varInitLnk.link_speed = | 543 | mb->un.varInitLnk.link_speed = LINK_SPEED_16G; |
530 | LINK_SPEED_AUTO; | 544 | break; |
545 | case LPFC_USER_LINK_SPEED_AUTO: | ||
546 | default: | ||
547 | mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; | ||
531 | break; | 548 | break; |
532 | } | 549 | } |
533 | 550 | ||
@@ -693,7 +710,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
693 | * @did: remote port identifier. | 710 | * @did: remote port identifier. |
694 | * @param: pointer to memory holding the server parameters. | 711 | * @param: pointer to memory holding the server parameters. |
695 | * @pmb: pointer to the driver internal queue element for mailbox command. | 712 | * @pmb: pointer to the driver internal queue element for mailbox command. |
696 | * @flag: action flag to be passed back for the complete function. | 713 | * @rpi: the rpi to use in the registration (usually only used for SLI4. |
697 | * | 714 | * |
698 | * The registration login mailbox command is used to register an N_Port or | 715 | * The registration login mailbox command is used to register an N_Port or |
699 | * F_Port login. This registration allows the HBA to cache the remote N_Port | 716 | * F_Port login. This registration allows the HBA to cache the remote N_Port |
@@ -712,7 +729,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) | |||
712 | **/ | 729 | **/ |
713 | int | 730 | int |
714 | lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, | 731 | lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, |
715 | uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) | 732 | uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi) |
716 | { | 733 | { |
717 | MAILBOX_t *mb = &pmb->u.mb; | 734 | MAILBOX_t *mb = &pmb->u.mb; |
718 | uint8_t *sparam; | 735 | uint8_t *sparam; |
@@ -722,17 +739,13 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, | |||
722 | 739 | ||
723 | mb->un.varRegLogin.rpi = 0; | 740 | mb->un.varRegLogin.rpi = 0; |
724 | if (phba->sli_rev == LPFC_SLI_REV4) { | 741 | if (phba->sli_rev == LPFC_SLI_REV4) { |
725 | mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba); | 742 | mb->un.varRegLogin.rpi = rpi; |
726 | if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) | 743 | if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) |
727 | return 1; | 744 | return 1; |
728 | } | 745 | } |
729 | |||
730 | mb->un.varRegLogin.vpi = vpi + phba->vpi_base; | 746 | mb->un.varRegLogin.vpi = vpi + phba->vpi_base; |
731 | mb->un.varRegLogin.did = did; | 747 | mb->un.varRegLogin.did = did; |
732 | mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ | ||
733 | |||
734 | mb->mbxOwner = OWN_HOST; | 748 | mb->mbxOwner = OWN_HOST; |
735 | |||
736 | /* Get a buffer to hold NPorts Service Parameters */ | 749 | /* Get a buffer to hold NPorts Service Parameters */ |
737 | mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); | 750 | mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); |
738 | if (mp) | 751 | if (mp) |
@@ -743,7 +756,7 @@ lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, | |||
743 | /* REG_LOGIN: no buffers */ | 756 | /* REG_LOGIN: no buffers */ |
744 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, | 757 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, |
745 | "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " | 758 | "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " |
746 | "flag x%x\n", vpi, did, flag); | 759 | "rpi x%x\n", vpi, did, rpi); |
747 | return (1); | 760 | return (1); |
748 | } | 761 | } |
749 | INIT_LIST_HEAD(&mp->list); | 762 | INIT_LIST_HEAD(&mp->list); |
@@ -1918,11 +1931,14 @@ lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) | |||
1918 | struct lpfc_mbx_init_vfi *init_vfi; | 1931 | struct lpfc_mbx_init_vfi *init_vfi; |
1919 | 1932 | ||
1920 | memset(mbox, 0, sizeof(*mbox)); | 1933 | memset(mbox, 0, sizeof(*mbox)); |
1934 | mbox->vport = vport; | ||
1921 | init_vfi = &mbox->u.mqe.un.init_vfi; | 1935 | init_vfi = &mbox->u.mqe.un.init_vfi; |
1922 | bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); | 1936 | bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); |
1923 | bf_set(lpfc_init_vfi_vr, init_vfi, 1); | 1937 | bf_set(lpfc_init_vfi_vr, init_vfi, 1); |
1924 | bf_set(lpfc_init_vfi_vt, init_vfi, 1); | 1938 | bf_set(lpfc_init_vfi_vt, init_vfi, 1); |
1939 | bf_set(lpfc_init_vfi_vp, init_vfi, 1); | ||
1925 | bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); | 1940 | bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); |
1941 | bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base); | ||
1926 | bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); | 1942 | bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); |
1927 | } | 1943 | } |
1928 | 1944 | ||
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 8f879e477e9d..cbb48ee8b0bb 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -113,11 +113,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) | |||
113 | goto fail_free_mbox_pool; | 113 | goto fail_free_mbox_pool; |
114 | 114 | ||
115 | if (phba->sli_rev == LPFC_SLI_REV4) { | 115 | if (phba->sli_rev == LPFC_SLI_REV4) { |
116 | phba->rrq_pool = | ||
117 | mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, | ||
118 | sizeof(struct lpfc_node_rrq)); | ||
119 | if (!phba->rrq_pool) | ||
120 | goto fail_free_nlp_mem_pool; | ||
116 | phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", | 121 | phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", |
117 | phba->pcidev, | 122 | phba->pcidev, |
118 | LPFC_HDR_BUF_SIZE, align, 0); | 123 | LPFC_HDR_BUF_SIZE, align, 0); |
119 | if (!phba->lpfc_hrb_pool) | 124 | if (!phba->lpfc_hrb_pool) |
120 | goto fail_free_nlp_mem_pool; | 125 | goto fail_free_rrq_mem_pool; |
121 | 126 | ||
122 | phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", | 127 | phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", |
123 | phba->pcidev, | 128 | phba->pcidev, |
@@ -147,6 +152,9 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) | |||
147 | fail_free_hrb_pool: | 152 | fail_free_hrb_pool: |
148 | pci_pool_destroy(phba->lpfc_hrb_pool); | 153 | pci_pool_destroy(phba->lpfc_hrb_pool); |
149 | phba->lpfc_hrb_pool = NULL; | 154 | phba->lpfc_hrb_pool = NULL; |
155 | fail_free_rrq_mem_pool: | ||
156 | mempool_destroy(phba->rrq_pool); | ||
157 | phba->rrq_pool = NULL; | ||
150 | fail_free_nlp_mem_pool: | 158 | fail_free_nlp_mem_pool: |
151 | mempool_destroy(phba->nlp_mem_pool); | 159 | mempool_destroy(phba->nlp_mem_pool); |
152 | phba->nlp_mem_pool = NULL; | 160 | phba->nlp_mem_pool = NULL; |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bccc9c66fa37..d85a7423a694 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -386,7 +386,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
386 | goto out; | 386 | goto out; |
387 | 387 | ||
388 | rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, | 388 | rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, |
389 | (uint8_t *) sp, mbox, 0); | 389 | (uint8_t *) sp, mbox, ndlp->nlp_rpi); |
390 | if (rc) { | 390 | if (rc) { |
391 | mempool_free(mbox, phba->mbox_mem_pool); | 391 | mempool_free(mbox, phba->mbox_mem_pool); |
392 | goto out; | 392 | goto out; |
@@ -632,7 +632,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
632 | { | 632 | { |
633 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 633 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
634 | 634 | ||
635 | if (!(ndlp->nlp_flag & NLP_RPI_VALID)) { | 635 | if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { |
636 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; | 636 | ndlp->nlp_flag &= ~NLP_NPR_ADISC; |
637 | return 0; | 637 | return 0; |
638 | } | 638 | } |
@@ -968,7 +968,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, | |||
968 | lpfc_unreg_rpi(vport, ndlp); | 968 | lpfc_unreg_rpi(vport, ndlp); |
969 | 969 | ||
970 | if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, | 970 | if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, |
971 | (uint8_t *) sp, mbox, 0) == 0) { | 971 | (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) { |
972 | switch (ndlp->nlp_DID) { | 972 | switch (ndlp->nlp_DID) { |
973 | case NameServer_DID: | 973 | case NameServer_DID: |
974 | mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; | 974 | mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; |
@@ -1338,12 +1338,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, | |||
1338 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { | 1338 | list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { |
1339 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && | 1339 | if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && |
1340 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { | 1340 | (ndlp == (struct lpfc_nodelist *) mb->context2)) { |
1341 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
1342 | spin_unlock_irq(&phba->hbalock); | ||
1343 | lpfc_sli4_free_rpi(phba, | ||
1344 | mb->u.mb.un.varRegLogin.rpi); | ||
1345 | spin_lock_irq(&phba->hbalock); | ||
1346 | } | ||
1347 | mp = (struct lpfc_dmabuf *) (mb->context1); | 1341 | mp = (struct lpfc_dmabuf *) (mb->context1); |
1348 | if (mp) { | 1342 | if (mp) { |
1349 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); | 1343 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -1426,7 +1420,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, | |||
1426 | } | 1420 | } |
1427 | 1421 | ||
1428 | ndlp->nlp_rpi = mb->un.varWords[0]; | 1422 | ndlp->nlp_rpi = mb->un.varWords[0]; |
1429 | ndlp->nlp_flag |= NLP_RPI_VALID; | 1423 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
1430 | 1424 | ||
1431 | /* Only if we are not a fabric nport do we issue PRLI */ | 1425 | /* Only if we are not a fabric nport do we issue PRLI */ |
1432 | if (!(ndlp->nlp_type & NLP_FABRIC)) { | 1426 | if (!(ndlp->nlp_type & NLP_FABRIC)) { |
@@ -2027,7 +2021,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, | |||
2027 | 2021 | ||
2028 | if (!mb->mbxStatus) { | 2022 | if (!mb->mbxStatus) { |
2029 | ndlp->nlp_rpi = mb->un.varWords[0]; | 2023 | ndlp->nlp_rpi = mb->un.varWords[0]; |
2030 | ndlp->nlp_flag |= NLP_RPI_VALID; | 2024 | ndlp->nlp_flag |= NLP_RPI_REGISTERED; |
2031 | } else { | 2025 | } else { |
2032 | if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { | 2026 | if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { |
2033 | lpfc_drop_node(vport, ndlp); | 2027 | lpfc_drop_node(vport, ndlp); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 581837b3c71a..c97751c95d77 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -621,10 +621,13 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, | |||
621 | struct sli4_wcqe_xri_aborted *axri) | 621 | struct sli4_wcqe_xri_aborted *axri) |
622 | { | 622 | { |
623 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); | 623 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); |
624 | uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); | ||
624 | struct lpfc_scsi_buf *psb, *next_psb; | 625 | struct lpfc_scsi_buf *psb, *next_psb; |
625 | unsigned long iflag = 0; | 626 | unsigned long iflag = 0; |
626 | struct lpfc_iocbq *iocbq; | 627 | struct lpfc_iocbq *iocbq; |
627 | int i; | 628 | int i; |
629 | struct lpfc_nodelist *ndlp; | ||
630 | int rrq_empty = 0; | ||
628 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; | 631 | struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; |
629 | 632 | ||
630 | spin_lock_irqsave(&phba->hbalock, iflag); | 633 | spin_lock_irqsave(&phba->hbalock, iflag); |
@@ -637,8 +640,14 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, | |||
637 | psb->status = IOSTAT_SUCCESS; | 640 | psb->status = IOSTAT_SUCCESS; |
638 | spin_unlock( | 641 | spin_unlock( |
639 | &phba->sli4_hba.abts_scsi_buf_list_lock); | 642 | &phba->sli4_hba.abts_scsi_buf_list_lock); |
643 | ndlp = psb->rdata->pnode; | ||
644 | rrq_empty = list_empty(&phba->active_rrq_list); | ||
640 | spin_unlock_irqrestore(&phba->hbalock, iflag); | 645 | spin_unlock_irqrestore(&phba->hbalock, iflag); |
646 | if (ndlp) | ||
647 | lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1); | ||
641 | lpfc_release_scsi_buf_s4(phba, psb); | 648 | lpfc_release_scsi_buf_s4(phba, psb); |
649 | if (rrq_empty) | ||
650 | lpfc_worker_wake_up(phba); | ||
642 | return; | 651 | return; |
643 | } | 652 | } |
644 | } | 653 | } |
@@ -914,7 +923,7 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) | |||
914 | } | 923 | } |
915 | 924 | ||
916 | /** | 925 | /** |
917 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA | 926 | * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA |
918 | * @phba: The HBA for which this call is being executed. | 927 | * @phba: The HBA for which this call is being executed. |
919 | * | 928 | * |
920 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list | 929 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list |
@@ -925,7 +934,7 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) | |||
925 | * Pointer to lpfc_scsi_buf - Success | 934 | * Pointer to lpfc_scsi_buf - Success |
926 | **/ | 935 | **/ |
927 | static struct lpfc_scsi_buf* | 936 | static struct lpfc_scsi_buf* |
928 | lpfc_get_scsi_buf(struct lpfc_hba * phba) | 937 | lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
929 | { | 938 | { |
930 | struct lpfc_scsi_buf * lpfc_cmd = NULL; | 939 | struct lpfc_scsi_buf * lpfc_cmd = NULL; |
931 | struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; | 940 | struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; |
@@ -941,6 +950,67 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) | |||
941 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | 950 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); |
942 | return lpfc_cmd; | 951 | return lpfc_cmd; |
943 | } | 952 | } |
953 | /** | ||
954 | * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA | ||
955 | * @phba: The HBA for which this call is being executed. | ||
956 | * | ||
957 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list | ||
958 | * and returns to caller. | ||
959 | * | ||
960 | * Return codes: | ||
961 | * NULL - Error | ||
962 | * Pointer to lpfc_scsi_buf - Success | ||
963 | **/ | ||
964 | static struct lpfc_scsi_buf* | ||
965 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | ||
966 | { | ||
967 | struct lpfc_scsi_buf *lpfc_cmd = NULL; | ||
968 | struct lpfc_scsi_buf *start_lpfc_cmd = NULL; | ||
969 | struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; | ||
970 | unsigned long iflag = 0; | ||
971 | int found = 0; | ||
972 | |||
973 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
974 | list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); | ||
975 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | ||
976 | while (!found && lpfc_cmd) { | ||
977 | if (lpfc_test_rrq_active(phba, ndlp, | ||
978 | lpfc_cmd->cur_iocbq.sli4_xritag)) { | ||
979 | lpfc_release_scsi_buf_s4(phba, lpfc_cmd); | ||
980 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
981 | list_remove_head(scsi_buf_list, lpfc_cmd, | ||
982 | struct lpfc_scsi_buf, list); | ||
983 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, | ||
984 | iflag); | ||
985 | if (lpfc_cmd == start_lpfc_cmd) { | ||
986 | lpfc_cmd = NULL; | ||
987 | break; | ||
988 | } else | ||
989 | continue; | ||
990 | } | ||
991 | found = 1; | ||
992 | lpfc_cmd->seg_cnt = 0; | ||
993 | lpfc_cmd->nonsg_phys = 0; | ||
994 | lpfc_cmd->prot_seg_cnt = 0; | ||
995 | } | ||
996 | return lpfc_cmd; | ||
997 | } | ||
998 | /** | ||
999 | * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA | ||
1000 | * @phba: The HBA for which this call is being executed. | ||
1001 | * | ||
1002 | * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list | ||
1003 | * and returns to caller. | ||
1004 | * | ||
1005 | * Return codes: | ||
1006 | * NULL - Error | ||
1007 | * Pointer to lpfc_scsi_buf - Success | ||
1008 | **/ | ||
1009 | static struct lpfc_scsi_buf* | ||
1010 | lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | ||
1011 | { | ||
1012 | return phba->lpfc_get_scsi_buf(phba, ndlp); | ||
1013 | } | ||
944 | 1014 | ||
945 | /** | 1015 | /** |
946 | * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list | 1016 | * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list |
@@ -2744,18 +2814,19 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
2744 | 2814 | ||
2745 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; | 2815 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf; |
2746 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; | 2816 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd; |
2747 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; | ||
2748 | 2817 | ||
2749 | switch (dev_grp) { | 2818 | switch (dev_grp) { |
2750 | case LPFC_PCI_DEV_LP: | 2819 | case LPFC_PCI_DEV_LP: |
2751 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; | 2820 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; |
2752 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; | 2821 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; |
2753 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; | 2822 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; |
2823 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; | ||
2754 | break; | 2824 | break; |
2755 | case LPFC_PCI_DEV_OC: | 2825 | case LPFC_PCI_DEV_OC: |
2756 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; | 2826 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; |
2757 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; | 2827 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; |
2758 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; | 2828 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; |
2829 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; | ||
2759 | break; | 2830 | break; |
2760 | default: | 2831 | default: |
2761 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2832 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
@@ -2764,7 +2835,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
2764 | return -ENODEV; | 2835 | return -ENODEV; |
2765 | break; | 2836 | break; |
2766 | } | 2837 | } |
2767 | phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; | ||
2768 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; | 2838 | phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; |
2769 | phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; | 2839 | phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; |
2770 | return 0; | 2840 | return 0; |
@@ -2940,7 +3010,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) | |||
2940 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) | 3010 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) |
2941 | goto out_host_busy; | 3011 | goto out_host_busy; |
2942 | 3012 | ||
2943 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 3013 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); |
2944 | if (lpfc_cmd == NULL) { | 3014 | if (lpfc_cmd == NULL) { |
2945 | lpfc_rampdown_queue_depth(phba); | 3015 | lpfc_rampdown_queue_depth(phba); |
2946 | 3016 | ||
@@ -3239,7 +3309,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
3239 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | 3309 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
3240 | return FAILED; | 3310 | return FAILED; |
3241 | 3311 | ||
3242 | lpfc_cmd = lpfc_get_scsi_buf(phba); | 3312 | lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); |
3243 | if (lpfc_cmd == NULL) | 3313 | if (lpfc_cmd == NULL) |
3244 | return FAILED; | 3314 | return FAILED; |
3245 | lpfc_cmd->timeout = 60; | 3315 | lpfc_cmd->timeout = 60; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 554efa6623f4..634b2fea9c4d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -513,8 +513,344 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) | |||
513 | } | 513 | } |
514 | 514 | ||
515 | /** | 515 | /** |
516 | * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap. | ||
517 | * @phba: Pointer to HBA context object. | ||
518 | * @ndlp: nodelist pointer for this target. | ||
519 | * @xritag: xri used in this exchange. | ||
520 | * @rxid: Remote Exchange ID. | ||
521 | * @send_rrq: Flag used to determine if we should send rrq els cmd. | ||
522 | * | ||
523 | * This function is called with hbalock held. | ||
524 | * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an | ||
525 | * rrq struct and adds it to the active_rrq_list. | ||
526 | * | ||
527 | * returns 0 for rrq slot for this xri | ||
528 | * < 0 Were not able to get rrq mem or invalid parameter. | ||
529 | **/ | ||
530 | static int | ||
531 | __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | ||
532 | uint16_t xritag, uint16_t rxid, uint16_t send_rrq) | ||
533 | { | ||
534 | uint16_t adj_xri; | ||
535 | struct lpfc_node_rrq *rrq; | ||
536 | int empty; | ||
537 | |||
538 | /* | ||
539 | * set the active bit even if there is no mem available. | ||
540 | */ | ||
541 | adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; | ||
542 | if (!ndlp) | ||
543 | return -EINVAL; | ||
544 | if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) | ||
545 | return -EINVAL; | ||
546 | rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); | ||
547 | if (rrq) { | ||
548 | rrq->send_rrq = send_rrq; | ||
549 | rrq->xritag = xritag; | ||
550 | rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); | ||
551 | rrq->ndlp = ndlp; | ||
552 | rrq->nlp_DID = ndlp->nlp_DID; | ||
553 | rrq->vport = ndlp->vport; | ||
554 | rrq->rxid = rxid; | ||
555 | empty = list_empty(&phba->active_rrq_list); | ||
556 | if (phba->cfg_enable_rrq && send_rrq) | ||
557 | /* | ||
558 | * We need the xri before we can add this to the | ||
559 | * phba active rrq list. | ||
560 | */ | ||
561 | rrq->send_rrq = send_rrq; | ||
562 | else | ||
563 | rrq->send_rrq = 0; | ||
564 | list_add_tail(&rrq->list, &phba->active_rrq_list); | ||
565 | if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { | ||
566 | phba->hba_flag |= HBA_RRQ_ACTIVE; | ||
567 | if (empty) | ||
568 | lpfc_worker_wake_up(phba); | ||
569 | } | ||
570 | return 0; | ||
571 | } | ||
572 | return -ENOMEM; | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. | ||
577 | * @phba: Pointer to HBA context object. | ||
578 | * @xritag: xri used in this exchange. | ||
579 | * @rrq: The RRQ to be cleared. | ||
580 | * | ||
581 | * This function is called with hbalock held. This function | ||
582 | **/ | ||
583 | static void | ||
584 | __lpfc_clr_rrq_active(struct lpfc_hba *phba, | ||
585 | uint16_t xritag, | ||
586 | struct lpfc_node_rrq *rrq) | ||
587 | { | ||
588 | uint16_t adj_xri; | ||
589 | struct lpfc_nodelist *ndlp; | ||
590 | |||
591 | ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); | ||
592 | |||
593 | /* The target DID could have been swapped (cable swap) | ||
594 | * we should use the ndlp from the findnode if it is | ||
595 | * available. | ||
596 | */ | ||
597 | if (!ndlp) | ||
598 | ndlp = rrq->ndlp; | ||
599 | |||
600 | adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; | ||
601 | if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) { | ||
602 | rrq->send_rrq = 0; | ||
603 | rrq->xritag = 0; | ||
604 | rrq->rrq_stop_time = 0; | ||
605 | } | ||
606 | mempool_free(rrq, phba->rrq_pool); | ||
607 | } | ||
608 | |||
609 | /** | ||
610 | * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. | ||
611 | * @phba: Pointer to HBA context object. | ||
612 | * | ||
613 | * This function is called with hbalock held. This function | ||
614 | * Checks if stop_time (ratov from setting rrq active) has | ||
615 | * been reached, if it has and the send_rrq flag is set then | ||
616 | * it will call lpfc_send_rrq. If the send_rrq flag is not set | ||
617 | * then it will just call the routine to clear the rrq and | ||
618 | * free the rrq resource. | ||
619 | * The timer is set to the next rrq that is going to expire before | ||
620 | * leaving the routine. | ||
621 | * | ||
622 | **/ | ||
623 | void | ||
624 | lpfc_handle_rrq_active(struct lpfc_hba *phba) | ||
625 | { | ||
626 | struct lpfc_node_rrq *rrq; | ||
627 | struct lpfc_node_rrq *nextrrq; | ||
628 | unsigned long next_time; | ||
629 | unsigned long iflags; | ||
630 | |||
631 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
632 | phba->hba_flag &= ~HBA_RRQ_ACTIVE; | ||
633 | next_time = jiffies + HZ * (phba->fc_ratov + 1); | ||
634 | list_for_each_entry_safe(rrq, nextrrq, | ||
635 | &phba->active_rrq_list, list) { | ||
636 | if (time_after(jiffies, rrq->rrq_stop_time)) { | ||
637 | list_del(&rrq->list); | ||
638 | if (!rrq->send_rrq) | ||
639 | /* this call will free the rrq */ | ||
640 | __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); | ||
641 | else { | ||
642 | /* if we send the rrq then the completion handler | ||
643 | * will clear the bit in the xribitmap. | ||
644 | */ | ||
645 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
646 | if (lpfc_send_rrq(phba, rrq)) { | ||
647 | lpfc_clr_rrq_active(phba, rrq->xritag, | ||
648 | rrq); | ||
649 | } | ||
650 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
651 | } | ||
652 | } else if (time_before(rrq->rrq_stop_time, next_time)) | ||
653 | next_time = rrq->rrq_stop_time; | ||
654 | } | ||
655 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
656 | if (!list_empty(&phba->active_rrq_list)) | ||
657 | mod_timer(&phba->rrq_tmr, next_time); | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * lpfc_get_active_rrq - Get the active RRQ for this exchange. | ||
662 | * @vport: Pointer to vport context object. | ||
663 | * @xri: The xri used in the exchange. | ||
664 | * @did: The targets DID for this exchange. | ||
665 | * | ||
666 | * returns NULL = rrq not found in the phba->active_rrq_list. | ||
667 | * rrq = rrq for this xri and target. | ||
668 | **/ | ||
669 | struct lpfc_node_rrq * | ||
670 | lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) | ||
671 | { | ||
672 | struct lpfc_hba *phba = vport->phba; | ||
673 | struct lpfc_node_rrq *rrq; | ||
674 | struct lpfc_node_rrq *nextrrq; | ||
675 | unsigned long iflags; | ||
676 | |||
677 | if (phba->sli_rev != LPFC_SLI_REV4) | ||
678 | return NULL; | ||
679 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
680 | list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { | ||
681 | if (rrq->vport == vport && rrq->xritag == xri && | ||
682 | rrq->nlp_DID == did){ | ||
683 | list_del(&rrq->list); | ||
684 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
685 | return rrq; | ||
686 | } | ||
687 | } | ||
688 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
689 | return NULL; | ||
690 | } | ||
691 | |||
692 | /** | ||
693 | * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. | ||
694 | * @vport: Pointer to vport context object. | ||
695 | * | ||
696 | * Remove all active RRQs for this vport from the phba->active_rrq_list and | ||
697 | * clear the rrq. | ||
698 | **/ | ||
699 | void | ||
700 | lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport) | ||
701 | |||
702 | { | ||
703 | struct lpfc_hba *phba = vport->phba; | ||
704 | struct lpfc_node_rrq *rrq; | ||
705 | struct lpfc_node_rrq *nextrrq; | ||
706 | unsigned long iflags; | ||
707 | |||
708 | if (phba->sli_rev != LPFC_SLI_REV4) | ||
709 | return; | ||
710 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
711 | list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { | ||
712 | if (rrq->vport == vport) { | ||
713 | list_del(&rrq->list); | ||
714 | __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); | ||
715 | } | ||
716 | } | ||
717 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
718 | } | ||
719 | |||
720 | /** | ||
721 | * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. | ||
722 | * @phba: Pointer to HBA context object. | ||
723 | * | ||
724 | * Remove all rrqs from the phba->active_rrq_list and free them by | ||
725 | * calling __lpfc_clr_active_rrq | ||
726 | * | ||
727 | **/ | ||
728 | void | ||
729 | lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) | ||
730 | { | ||
731 | struct lpfc_node_rrq *rrq; | ||
732 | struct lpfc_node_rrq *nextrrq; | ||
733 | unsigned long next_time; | ||
734 | unsigned long iflags; | ||
735 | |||
736 | if (phba->sli_rev != LPFC_SLI_REV4) | ||
737 | return; | ||
738 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
739 | phba->hba_flag &= ~HBA_RRQ_ACTIVE; | ||
740 | next_time = jiffies + HZ * (phba->fc_ratov * 2); | ||
741 | list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { | ||
742 | list_del(&rrq->list); | ||
743 | __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); | ||
744 | } | ||
745 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
746 | if (!list_empty(&phba->active_rrq_list)) | ||
747 | mod_timer(&phba->rrq_tmr, next_time); | ||
748 | } | ||
749 | |||
750 | |||
751 | /** | ||
752 | * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. | ||
753 | * @phba: Pointer to HBA context object. | ||
754 | * @ndlp: Targets nodelist pointer for this exchange. | ||
755 | * @xritag the xri in the bitmap to test. | ||
756 | * | ||
757 | * This function is called with hbalock held. This function | ||
758 | * returns 0 = rrq not active for this xri | ||
759 | * 1 = rrq is valid for this xri. | ||
760 | **/ | ||
761 | static int | ||
762 | __lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | ||
763 | uint16_t xritag) | ||
764 | { | ||
765 | uint16_t adj_xri; | ||
766 | |||
767 | adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; | ||
768 | if (!ndlp) | ||
769 | return 0; | ||
770 | if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) | ||
771 | return 1; | ||
772 | else | ||
773 | return 0; | ||
774 | } | ||
775 | |||
776 | /** | ||
777 | * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. | ||
778 | * @phba: Pointer to HBA context object. | ||
779 | * @ndlp: nodelist pointer for this target. | ||
780 | * @xritag: xri used in this exchange. | ||
781 | * @rxid: Remote Exchange ID. | ||
782 | * @send_rrq: Flag used to determine if we should send rrq els cmd. | ||
783 | * | ||
784 | * This function takes the hbalock. | ||
785 | * The active bit is always set in the active rrq xri_bitmap even | ||
786 | * if there is no slot avaiable for the other rrq information. | ||
787 | * | ||
788 | * returns 0 rrq actived for this xri | ||
789 | * < 0 No memory or invalid ndlp. | ||
790 | **/ | ||
791 | int | ||
792 | lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | ||
793 | uint16_t xritag, uint16_t rxid, uint16_t send_rrq) | ||
794 | { | ||
795 | int ret; | ||
796 | unsigned long iflags; | ||
797 | |||
798 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
799 | ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq); | ||
800 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
801 | return ret; | ||
802 | } | ||
803 | |||
804 | /** | ||
805 | * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. | ||
806 | * @phba: Pointer to HBA context object. | ||
807 | * @xritag: xri used in this exchange. | ||
808 | * @rrq: The RRQ to be cleared. | ||
809 | * | ||
810 | * This function is takes the hbalock. | ||
811 | **/ | ||
812 | void | ||
813 | lpfc_clr_rrq_active(struct lpfc_hba *phba, | ||
814 | uint16_t xritag, | ||
815 | struct lpfc_node_rrq *rrq) | ||
816 | { | ||
817 | unsigned long iflags; | ||
818 | |||
819 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
820 | __lpfc_clr_rrq_active(phba, xritag, rrq); | ||
821 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
822 | return; | ||
823 | } | ||
824 | |||
825 | |||
826 | |||
827 | /** | ||
828 | * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. | ||
829 | * @phba: Pointer to HBA context object. | ||
830 | * @ndlp: Targets nodelist pointer for this exchange. | ||
831 | * @xritag the xri in the bitmap to test. | ||
832 | * | ||
833 | * This function takes the hbalock. | ||
834 | * returns 0 = rrq not active for this xri | ||
835 | * 1 = rrq is valid for this xri. | ||
836 | **/ | ||
837 | int | ||
838 | lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | ||
839 | uint16_t xritag) | ||
840 | { | ||
841 | int ret; | ||
842 | unsigned long iflags; | ||
843 | |||
844 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
845 | ret = __lpfc_test_rrq_active(phba, ndlp, xritag); | ||
846 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
847 | return ret; | ||
848 | } | ||
849 | |||
850 | /** | ||
516 | * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool | 851 | * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool |
517 | * @phba: Pointer to HBA context object. | 852 | * @phba: Pointer to HBA context object. |
853 | * @piocb: Pointer to the iocbq. | ||
518 | * | 854 | * |
519 | * This function is called with hbalock held. This function | 855 | * This function is called with hbalock held. This function |
520 | * Gets a new driver sglq object from the sglq list. If the | 856 | * Gets a new driver sglq object from the sglq list. If the |
@@ -522,17 +858,51 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) | |||
522 | * allocated sglq object else it returns NULL. | 858 | * allocated sglq object else it returns NULL. |
523 | **/ | 859 | **/ |
524 | static struct lpfc_sglq * | 860 | static struct lpfc_sglq * |
525 | __lpfc_sli_get_sglq(struct lpfc_hba *phba) | 861 | __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) |
526 | { | 862 | { |
527 | struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; | 863 | struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; |
528 | struct lpfc_sglq *sglq = NULL; | 864 | struct lpfc_sglq *sglq = NULL; |
865 | struct lpfc_sglq *start_sglq = NULL; | ||
529 | uint16_t adj_xri; | 866 | uint16_t adj_xri; |
867 | struct lpfc_scsi_buf *lpfc_cmd; | ||
868 | struct lpfc_nodelist *ndlp; | ||
869 | int found = 0; | ||
870 | |||
871 | if (piocbq->iocb_flag & LPFC_IO_FCP) { | ||
872 | lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; | ||
873 | ndlp = lpfc_cmd->rdata->pnode; | ||
874 | } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && | ||
875 | !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) | ||
876 | ndlp = piocbq->context_un.ndlp; | ||
877 | else | ||
878 | ndlp = piocbq->context1; | ||
879 | |||
530 | list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); | 880 | list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); |
531 | if (!sglq) | 881 | start_sglq = sglq; |
532 | return NULL; | 882 | while (!found) { |
533 | adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; | 883 | if (!sglq) |
534 | phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; | 884 | return NULL; |
535 | sglq->state = SGL_ALLOCATED; | 885 | adj_xri = sglq->sli4_xritag - |
886 | phba->sli4_hba.max_cfg_param.xri_base; | ||
887 | if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { | ||
888 | /* This xri has an rrq outstanding for this DID. | ||
889 | * put it back in the list and get another xri. | ||
890 | */ | ||
891 | list_add_tail(&sglq->list, lpfc_sgl_list); | ||
892 | sglq = NULL; | ||
893 | list_remove_head(lpfc_sgl_list, sglq, | ||
894 | struct lpfc_sglq, list); | ||
895 | if (sglq == start_sglq) { | ||
896 | sglq = NULL; | ||
897 | break; | ||
898 | } else | ||
899 | continue; | ||
900 | } | ||
901 | sglq->ndlp = ndlp; | ||
902 | found = 1; | ||
903 | phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; | ||
904 | sglq->state = SGL_ALLOCATED; | ||
905 | } | ||
536 | return sglq; | 906 | return sglq; |
537 | } | 907 | } |
538 | 908 | ||
@@ -598,6 +968,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) | |||
598 | &phba->sli4_hba.abts_sgl_list_lock, iflag); | 968 | &phba->sli4_hba.abts_sgl_list_lock, iflag); |
599 | } else { | 969 | } else { |
600 | sglq->state = SGL_FREED; | 970 | sglq->state = SGL_FREED; |
971 | sglq->ndlp = NULL; | ||
601 | list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); | 972 | list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); |
602 | 973 | ||
603 | /* Check if TXQ queue needs to be serviced */ | 974 | /* Check if TXQ queue needs to be serviced */ |
@@ -1634,7 +2005,6 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) | |||
1634 | case MBX_READ_LNK_STAT: | 2005 | case MBX_READ_LNK_STAT: |
1635 | case MBX_REG_LOGIN: | 2006 | case MBX_REG_LOGIN: |
1636 | case MBX_UNREG_LOGIN: | 2007 | case MBX_UNREG_LOGIN: |
1637 | case MBX_READ_LA: | ||
1638 | case MBX_CLEAR_LA: | 2008 | case MBX_CLEAR_LA: |
1639 | case MBX_DUMP_MEMORY: | 2009 | case MBX_DUMP_MEMORY: |
1640 | case MBX_DUMP_CONTEXT: | 2010 | case MBX_DUMP_CONTEXT: |
@@ -1656,7 +2026,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) | |||
1656 | case MBX_READ_SPARM64: | 2026 | case MBX_READ_SPARM64: |
1657 | case MBX_READ_RPI64: | 2027 | case MBX_READ_RPI64: |
1658 | case MBX_REG_LOGIN64: | 2028 | case MBX_REG_LOGIN64: |
1659 | case MBX_READ_LA64: | 2029 | case MBX_READ_TOPOLOGY: |
1660 | case MBX_WRITE_WWN: | 2030 | case MBX_WRITE_WWN: |
1661 | case MBX_SET_DEBUG: | 2031 | case MBX_SET_DEBUG: |
1662 | case MBX_LOAD_EXP_ROM: | 2032 | case MBX_LOAD_EXP_ROM: |
@@ -1746,11 +2116,6 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
1746 | kfree(mp); | 2116 | kfree(mp); |
1747 | } | 2117 | } |
1748 | 2118 | ||
1749 | if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && | ||
1750 | (phba->sli_rev == LPFC_SLI_REV4) && | ||
1751 | (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) | ||
1752 | lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); | ||
1753 | |||
1754 | /* | 2119 | /* |
1755 | * If a REG_LOGIN succeeded after node is destroyed or node | 2120 | * If a REG_LOGIN succeeded after node is destroyed or node |
1756 | * is in re-discovery driver need to cleanup the RPI. | 2121 | * is in re-discovery driver need to cleanup the RPI. |
@@ -3483,12 +3848,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
3483 | phba->pport->fc_myDID = 0; | 3848 | phba->pport->fc_myDID = 0; |
3484 | phba->pport->fc_prevDID = 0; | 3849 | phba->pport->fc_prevDID = 0; |
3485 | 3850 | ||
3486 | /* Turn off parity checking and serr during the physical reset */ | ||
3487 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); | ||
3488 | pci_write_config_word(phba->pcidev, PCI_COMMAND, | ||
3489 | (cfg_value & | ||
3490 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | ||
3491 | |||
3492 | spin_lock_irq(&phba->hbalock); | 3851 | spin_lock_irq(&phba->hbalock); |
3493 | psli->sli_flag &= ~(LPFC_PROCESS_LA); | 3852 | psli->sli_flag &= ~(LPFC_PROCESS_LA); |
3494 | phba->fcf.fcf_flag = 0; | 3853 | phba->fcf.fcf_flag = 0; |
@@ -3508,9 +3867,18 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) | |||
3508 | /* Now physically reset the device */ | 3867 | /* Now physically reset the device */ |
3509 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 3868 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, |
3510 | "0389 Performing PCI function reset!\n"); | 3869 | "0389 Performing PCI function reset!\n"); |
3870 | |||
3871 | /* Turn off parity checking and serr during the physical reset */ | ||
3872 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); | ||
3873 | pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & | ||
3874 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | ||
3875 | |||
3511 | /* Perform FCoE PCI function reset */ | 3876 | /* Perform FCoE PCI function reset */ |
3512 | lpfc_pci_function_reset(phba); | 3877 | lpfc_pci_function_reset(phba); |
3513 | 3878 | ||
3879 | /* Restore PCI cmd register */ | ||
3880 | pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); | ||
3881 | |||
3514 | return 0; | 3882 | return 0; |
3515 | } | 3883 | } |
3516 | 3884 | ||
@@ -4317,6 +4685,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4317 | struct lpfc_vport *vport = phba->pport; | 4685 | struct lpfc_vport *vport = phba->pport; |
4318 | struct lpfc_dmabuf *mp; | 4686 | struct lpfc_dmabuf *mp; |
4319 | 4687 | ||
4688 | /* | ||
4689 | * TODO: Why does this routine execute these task in a different | ||
4690 | * order from probe? | ||
4691 | */ | ||
4320 | /* Perform a PCI function reset to start from clean */ | 4692 | /* Perform a PCI function reset to start from clean */ |
4321 | rc = lpfc_pci_function_reset(phba); | 4693 | rc = lpfc_pci_function_reset(phba); |
4322 | if (unlikely(rc)) | 4694 | if (unlikely(rc)) |
@@ -4357,13 +4729,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4357 | } | 4729 | } |
4358 | 4730 | ||
4359 | rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); | 4731 | rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); |
4360 | if (unlikely(rc)) | 4732 | if (unlikely(rc)) { |
4361 | goto out_free_vpd; | 4733 | kfree(vpd); |
4362 | 4734 | goto out_free_mbox; | |
4735 | } | ||
4363 | mqe = &mboxq->u.mqe; | 4736 | mqe = &mboxq->u.mqe; |
4364 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); | 4737 | phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); |
4365 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) | 4738 | if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) |
4366 | phba->hba_flag |= HBA_FCOE_SUPPORT; | 4739 | phba->hba_flag |= HBA_FCOE_MODE; |
4740 | else | ||
4741 | phba->hba_flag &= ~HBA_FCOE_MODE; | ||
4367 | 4742 | ||
4368 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == | 4743 | if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == |
4369 | LPFC_DCBX_CEE_MODE) | 4744 | LPFC_DCBX_CEE_MODE) |
@@ -4372,13 +4747,14 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4372 | phba->hba_flag &= ~HBA_FIP_SUPPORT; | 4747 | phba->hba_flag &= ~HBA_FIP_SUPPORT; |
4373 | 4748 | ||
4374 | if (phba->sli_rev != LPFC_SLI_REV4 || | 4749 | if (phba->sli_rev != LPFC_SLI_REV4 || |
4375 | !(phba->hba_flag & HBA_FCOE_SUPPORT)) { | 4750 | !(phba->hba_flag & HBA_FCOE_MODE)) { |
4376 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | 4751 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, |
4377 | "0376 READ_REV Error. SLI Level %d " | 4752 | "0376 READ_REV Error. SLI Level %d " |
4378 | "FCoE enabled %d\n", | 4753 | "FCoE enabled %d\n", |
4379 | phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT); | 4754 | phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); |
4380 | rc = -EIO; | 4755 | rc = -EIO; |
4381 | goto out_free_vpd; | 4756 | kfree(vpd); |
4757 | goto out_free_mbox; | ||
4382 | } | 4758 | } |
4383 | /* | 4759 | /* |
4384 | * Evaluate the read rev and vpd data. Populate the driver | 4760 | * Evaluate the read rev and vpd data. Populate the driver |
@@ -4392,6 +4768,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4392 | "Using defaults.\n", rc); | 4768 | "Using defaults.\n", rc); |
4393 | rc = 0; | 4769 | rc = 0; |
4394 | } | 4770 | } |
4771 | kfree(vpd); | ||
4395 | 4772 | ||
4396 | /* Save information as VPD data */ | 4773 | /* Save information as VPD data */ |
4397 | phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; | 4774 | phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; |
@@ -4428,7 +4805,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4428 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | 4805 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
4429 | if (unlikely(rc)) { | 4806 | if (unlikely(rc)) { |
4430 | rc = -EIO; | 4807 | rc = -EIO; |
4431 | goto out_free_vpd; | 4808 | goto out_free_mbox; |
4432 | } | 4809 | } |
4433 | 4810 | ||
4434 | /* | 4811 | /* |
@@ -4476,7 +4853,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4476 | if (rc) { | 4853 | if (rc) { |
4477 | phba->link_state = LPFC_HBA_ERROR; | 4854 | phba->link_state = LPFC_HBA_ERROR; |
4478 | rc = -ENOMEM; | 4855 | rc = -ENOMEM; |
4479 | goto out_free_vpd; | 4856 | goto out_free_mbox; |
4480 | } | 4857 | } |
4481 | 4858 | ||
4482 | mboxq->vport = vport; | 4859 | mboxq->vport = vport; |
@@ -4501,7 +4878,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4501 | rc, bf_get(lpfc_mqe_status, mqe)); | 4878 | rc, bf_get(lpfc_mqe_status, mqe)); |
4502 | phba->link_state = LPFC_HBA_ERROR; | 4879 | phba->link_state = LPFC_HBA_ERROR; |
4503 | rc = -EIO; | 4880 | rc = -EIO; |
4504 | goto out_free_vpd; | 4881 | goto out_free_mbox; |
4505 | } | 4882 | } |
4506 | 4883 | ||
4507 | if (phba->cfg_soft_wwnn) | 4884 | if (phba->cfg_soft_wwnn) |
@@ -4526,7 +4903,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4526 | "0582 Error %d during sgl post operation\n", | 4903 | "0582 Error %d during sgl post operation\n", |
4527 | rc); | 4904 | rc); |
4528 | rc = -ENODEV; | 4905 | rc = -ENODEV; |
4529 | goto out_free_vpd; | 4906 | goto out_free_mbox; |
4530 | } | 4907 | } |
4531 | 4908 | ||
4532 | /* Register SCSI SGL pool to the device */ | 4909 | /* Register SCSI SGL pool to the device */ |
@@ -4538,7 +4915,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4538 | /* Some Scsi buffers were moved to the abort scsi list */ | 4915 | /* Some Scsi buffers were moved to the abort scsi list */ |
4539 | /* A pci function reset will repost them */ | 4916 | /* A pci function reset will repost them */ |
4540 | rc = -ENODEV; | 4917 | rc = -ENODEV; |
4541 | goto out_free_vpd; | 4918 | goto out_free_mbox; |
4542 | } | 4919 | } |
4543 | 4920 | ||
4544 | /* Post the rpi header region to the device. */ | 4921 | /* Post the rpi header region to the device. */ |
@@ -4548,7 +4925,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4548 | "0393 Error %d during rpi post operation\n", | 4925 | "0393 Error %d during rpi post operation\n", |
4549 | rc); | 4926 | rc); |
4550 | rc = -ENODEV; | 4927 | rc = -ENODEV; |
4551 | goto out_free_vpd; | 4928 | goto out_free_mbox; |
4552 | } | 4929 | } |
4553 | 4930 | ||
4554 | /* Set up all the queues to the device */ | 4931 | /* Set up all the queues to the device */ |
@@ -4608,33 +4985,33 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) | |||
4608 | } | 4985 | } |
4609 | } | 4986 | } |
4610 | 4987 | ||
4988 | if (!(phba->hba_flag & HBA_FCOE_MODE)) { | ||
4989 | /* | ||
4990 | * The FC Port needs to register FCFI (index 0) | ||
4991 | */ | ||
4992 | lpfc_reg_fcfi(phba, mboxq); | ||
4993 | mboxq->vport = phba->pport; | ||
4994 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
4995 | if (rc == MBX_SUCCESS) | ||
4996 | rc = 0; | ||
4997 | else | ||
4998 | goto out_unset_queue; | ||
4999 | } | ||
4611 | /* | 5000 | /* |
4612 | * The port is ready, set the host's link state to LINK_DOWN | 5001 | * The port is ready, set the host's link state to LINK_DOWN |
4613 | * in preparation for link interrupts. | 5002 | * in preparation for link interrupts. |
4614 | */ | 5003 | */ |
4615 | lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); | ||
4616 | mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
4617 | lpfc_set_loopback_flag(phba); | ||
4618 | /* Change driver state to LPFC_LINK_DOWN right before init link */ | ||
4619 | spin_lock_irq(&phba->hbalock); | 5004 | spin_lock_irq(&phba->hbalock); |
4620 | phba->link_state = LPFC_LINK_DOWN; | 5005 | phba->link_state = LPFC_LINK_DOWN; |
4621 | spin_unlock_irq(&phba->hbalock); | 5006 | spin_unlock_irq(&phba->hbalock); |
4622 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | 5007 | rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); |
4623 | if (unlikely(rc != MBX_NOT_FINISHED)) { | 5008 | out_unset_queue: |
4624 | kfree(vpd); | ||
4625 | return 0; | ||
4626 | } else | ||
4627 | rc = -EIO; | ||
4628 | |||
4629 | /* Unset all the queues set up in this routine when error out */ | 5009 | /* Unset all the queues set up in this routine when error out */ |
4630 | if (rc) | 5010 | if (rc) |
4631 | lpfc_sli4_queue_unset(phba); | 5011 | lpfc_sli4_queue_unset(phba); |
4632 | |||
4633 | out_stop_timers: | 5012 | out_stop_timers: |
4634 | if (rc) | 5013 | if (rc) |
4635 | lpfc_stop_hba_timers(phba); | 5014 | lpfc_stop_hba_timers(phba); |
4636 | out_free_vpd: | ||
4637 | kfree(vpd); | ||
4638 | out_free_mbox: | 5015 | out_free_mbox: |
4639 | mempool_free(mboxq, phba->mbox_mem_pool); | 5016 | mempool_free(mboxq, phba->mbox_mem_pool); |
4640 | return rc; | 5017 | return rc; |
@@ -5863,6 +6240,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
5863 | IOCB_t *icmd; | 6240 | IOCB_t *icmd; |
5864 | int numBdes = 0; | 6241 | int numBdes = 0; |
5865 | int i = 0; | 6242 | int i = 0; |
6243 | uint32_t offset = 0; /* accumulated offset in the sg request list */ | ||
6244 | int inbound = 0; /* number of sg reply entries inbound from firmware */ | ||
5866 | 6245 | ||
5867 | if (!piocbq || !sglq) | 6246 | if (!piocbq || !sglq) |
5868 | return xritag; | 6247 | return xritag; |
@@ -5897,6 +6276,20 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, | |||
5897 | */ | 6276 | */ |
5898 | bde.tus.w = le32_to_cpu(bpl->tus.w); | 6277 | bde.tus.w = le32_to_cpu(bpl->tus.w); |
5899 | sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); | 6278 | sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); |
6279 | /* The offsets in the sgl need to be accumulated | ||
6280 | * separately for the request and reply lists. | ||
6281 | * The request is always first, the reply follows. | ||
6282 | */ | ||
6283 | if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { | ||
6284 | /* add up the reply sg entries */ | ||
6285 | if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) | ||
6286 | inbound++; | ||
6287 | /* first inbound? reset the offset */ | ||
6288 | if (inbound == 1) | ||
6289 | offset = 0; | ||
6290 | bf_set(lpfc_sli4_sge_offset, sgl, offset); | ||
6291 | offset += bde.tus.f.bdeSize; | ||
6292 | } | ||
5900 | bpl++; | 6293 | bpl++; |
5901 | sgl++; | 6294 | sgl++; |
5902 | } | 6295 | } |
@@ -6028,11 +6421,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6028 | bf_set(els_req64_vf, &wqe->els_req, 0); | 6421 | bf_set(els_req64_vf, &wqe->els_req, 0); |
6029 | /* And a VFID for word 12 */ | 6422 | /* And a VFID for word 12 */ |
6030 | bf_set(els_req64_vfid, &wqe->els_req, 0); | 6423 | bf_set(els_req64_vfid, &wqe->els_req, 0); |
6031 | /* | ||
6032 | * Set ct field to 3, indicates that the context_tag field | ||
6033 | * contains the FCFI and remote N_Port_ID is | ||
6034 | * in word 5. | ||
6035 | */ | ||
6036 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); | 6424 | ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); |
6037 | bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, | 6425 | bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, |
6038 | iocbq->iocb.ulpContext); | 6426 | iocbq->iocb.ulpContext); |
@@ -6140,6 +6528,18 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
6140 | bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); | 6528 | bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); |
6141 | break; | 6529 | break; |
6142 | case CMD_GEN_REQUEST64_CR: | 6530 | case CMD_GEN_REQUEST64_CR: |
6531 | /* For this command calculate the xmit length of the | ||
6532 | * request bde. | ||
6533 | */ | ||
6534 | xmit_len = 0; | ||
6535 | numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / | ||
6536 | sizeof(struct ulp_bde64); | ||
6537 | for (i = 0; i < numBdes; i++) { | ||
6538 | if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) | ||
6539 | break; | ||
6540 | bde.tus.w = le32_to_cpu(bpl[i].tus.w); | ||
6541 | xmit_len += bde.tus.f.bdeSize; | ||
6542 | } | ||
6143 | /* word3 iocb=IO_TAG wqe=request_payload_len */ | 6543 | /* word3 iocb=IO_TAG wqe=request_payload_len */ |
6144 | wqe->gen_req.request_payload_len = xmit_len; | 6544 | wqe->gen_req.request_payload_len = xmit_len; |
6145 | /* word4 iocb=parameter wqe=relative_offset memcpy */ | 6545 | /* word4 iocb=parameter wqe=relative_offset memcpy */ |
@@ -6320,7 +6720,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, | |||
6320 | return IOCB_BUSY; | 6720 | return IOCB_BUSY; |
6321 | } | 6721 | } |
6322 | } else { | 6722 | } else { |
6323 | sglq = __lpfc_sli_get_sglq(phba); | 6723 | sglq = __lpfc_sli_get_sglq(phba, piocb); |
6324 | if (!sglq) { | 6724 | if (!sglq) { |
6325 | if (!(flag & SLI_IOCB_RET_IOCB)) { | 6725 | if (!(flag & SLI_IOCB_RET_IOCB)) { |
6326 | __lpfc_sli_ringtx_put(phba, | 6726 | __lpfc_sli_ringtx_put(phba, |
@@ -8033,29 +8433,66 @@ static int | |||
8033 | lpfc_sli4_eratt_read(struct lpfc_hba *phba) | 8433 | lpfc_sli4_eratt_read(struct lpfc_hba *phba) |
8034 | { | 8434 | { |
8035 | uint32_t uerr_sta_hi, uerr_sta_lo; | 8435 | uint32_t uerr_sta_hi, uerr_sta_lo; |
8436 | uint32_t if_type, portsmphr; | ||
8437 | struct lpfc_register portstat_reg; | ||
8036 | 8438 | ||
8037 | /* For now, use the SLI4 device internal unrecoverable error | 8439 | /* |
8440 | * For now, use the SLI4 device internal unrecoverable error | ||
8038 | * registers for error attention. This can be changed later. | 8441 | * registers for error attention. This can be changed later. |
8039 | */ | 8442 | */ |
8040 | uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); | 8443 | if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); |
8041 | uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); | 8444 | switch (if_type) { |
8042 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || | 8445 | case LPFC_SLI_INTF_IF_TYPE_0: |
8043 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { | 8446 | uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); |
8447 | uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); | ||
8448 | if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || | ||
8449 | (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { | ||
8450 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
8451 | "1423 HBA Unrecoverable error: " | ||
8452 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | ||
8453 | "ue_mask_lo_reg=0x%x, " | ||
8454 | "ue_mask_hi_reg=0x%x\n", | ||
8455 | uerr_sta_lo, uerr_sta_hi, | ||
8456 | phba->sli4_hba.ue_mask_lo, | ||
8457 | phba->sli4_hba.ue_mask_hi); | ||
8458 | phba->work_status[0] = uerr_sta_lo; | ||
8459 | phba->work_status[1] = uerr_sta_hi; | ||
8460 | phba->work_ha |= HA_ERATT; | ||
8461 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8462 | return 1; | ||
8463 | } | ||
8464 | break; | ||
8465 | case LPFC_SLI_INTF_IF_TYPE_2: | ||
8466 | portstat_reg.word0 = | ||
8467 | readl(phba->sli4_hba.u.if_type2.STATUSregaddr); | ||
8468 | portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); | ||
8469 | if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { | ||
8470 | phba->work_status[0] = | ||
8471 | readl(phba->sli4_hba.u.if_type2.ERR1regaddr); | ||
8472 | phba->work_status[1] = | ||
8473 | readl(phba->sli4_hba.u.if_type2.ERR2regaddr); | ||
8474 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
8475 | "2885 Port Error Detected: " | ||
8476 | "port status reg 0x%x, " | ||
8477 | "port smphr reg 0x%x, " | ||
8478 | "error 1=0x%x, error 2=0x%x\n", | ||
8479 | portstat_reg.word0, | ||
8480 | portsmphr, | ||
8481 | phba->work_status[0], | ||
8482 | phba->work_status[1]); | ||
8483 | phba->work_ha |= HA_ERATT; | ||
8484 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8485 | return 1; | ||
8486 | } | ||
8487 | break; | ||
8488 | case LPFC_SLI_INTF_IF_TYPE_1: | ||
8489 | default: | ||
8044 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8490 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8045 | "1423 HBA Unrecoverable error: " | 8491 | "2886 HBA Error Attention on unsupported " |
8046 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | 8492 | "if type %d.", if_type); |
8047 | "ue_mask_lo_reg=0x%x, ue_mask_hi_reg=0x%x\n", | ||
8048 | uerr_sta_lo, uerr_sta_hi, | ||
8049 | phba->sli4_hba.ue_mask_lo, | ||
8050 | phba->sli4_hba.ue_mask_hi); | ||
8051 | phba->work_status[0] = uerr_sta_lo; | ||
8052 | phba->work_status[1] = uerr_sta_hi; | ||
8053 | /* Set the driver HA work bitmap */ | ||
8054 | phba->work_ha |= HA_ERATT; | ||
8055 | /* Indicate polling handles this ERATT */ | ||
8056 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
8057 | return 1; | 8493 | return 1; |
8058 | } | 8494 | } |
8495 | |||
8059 | return 0; | 8496 | return 0; |
8060 | } | 8497 | } |
8061 | 8498 | ||
@@ -8110,7 +8547,7 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) | |||
8110 | ha_copy = lpfc_sli_eratt_read(phba); | 8547 | ha_copy = lpfc_sli_eratt_read(phba); |
8111 | break; | 8548 | break; |
8112 | case LPFC_SLI_REV4: | 8549 | case LPFC_SLI_REV4: |
8113 | /* Read devcie Uncoverable Error (UERR) registers */ | 8550 | /* Read device Uncoverable Error (UERR) registers */ |
8114 | ha_copy = lpfc_sli4_eratt_read(phba); | 8551 | ha_copy = lpfc_sli4_eratt_read(phba); |
8115 | break; | 8552 | break; |
8116 | default: | 8553 | default: |
@@ -10155,16 +10592,20 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, | |||
10155 | length, LPFC_SLI4_MBX_EMBED); | 10592 | length, LPFC_SLI4_MBX_EMBED); |
10156 | 10593 | ||
10157 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; | 10594 | mq_create_ext = &mbox->u.mqe.un.mq_create_ext; |
10158 | bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, | 10595 | bf_set(lpfc_mbx_mq_create_ext_num_pages, |
10159 | mq->page_count); | 10596 | &mq_create_ext->u.request, mq->page_count); |
10160 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request, | 10597 | bf_set(lpfc_mbx_mq_create_ext_async_evt_link, |
10161 | 1); | 10598 | &mq_create_ext->u.request, 1); |
10162 | bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste, | 10599 | bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, |
10163 | &mq_create_ext->u.request, 1); | 10600 | &mq_create_ext->u.request, 1); |
10164 | bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, | 10601 | bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, |
10165 | &mq_create_ext->u.request, 1); | 10602 | &mq_create_ext->u.request, 1); |
10166 | bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, | 10603 | bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, |
10167 | cq->queue_id); | 10604 | &mq_create_ext->u.request, 1); |
10605 | bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, | ||
10606 | &mq_create_ext->u.request, 1); | ||
10607 | bf_set(lpfc_mq_context_cq_id, | ||
10608 | &mq_create_ext->u.request.context, cq->queue_id); | ||
10168 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); | 10609 | bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); |
10169 | switch (mq->entry_count) { | 10610 | switch (mq->entry_count) { |
10170 | default: | 10611 | default: |
@@ -11137,7 +11578,8 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, | |||
11137 | static int | 11578 | static int |
11138 | lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) | 11579 | lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) |
11139 | { | 11580 | { |
11140 | char *rctl_names[] = FC_RCTL_NAMES_INIT; | 11581 | /* make rctl_names static to save stack space */ |
11582 | static char *rctl_names[] = FC_RCTL_NAMES_INIT; | ||
11141 | char *type_names[] = FC_TYPE_NAMES_INIT; | 11583 | char *type_names[] = FC_TYPE_NAMES_INIT; |
11142 | struct fc_vft_header *fc_vft_hdr; | 11584 | struct fc_vft_header *fc_vft_hdr; |
11143 | 11585 | ||
@@ -11538,6 +11980,10 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11538 | "SID:x%x\n", oxid, sid); | 11980 | "SID:x%x\n", oxid, sid); |
11539 | return; | 11981 | return; |
11540 | } | 11982 | } |
11983 | if (rxid >= phba->sli4_hba.max_cfg_param.xri_base | ||
11984 | && rxid <= (phba->sli4_hba.max_cfg_param.max_xri | ||
11985 | + phba->sli4_hba.max_cfg_param.xri_base)) | ||
11986 | lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); | ||
11541 | 11987 | ||
11542 | /* Allocate buffer for acc iocb */ | 11988 | /* Allocate buffer for acc iocb */ |
11543 | ctiocb = lpfc_sli_get_iocbq(phba); | 11989 | ctiocb = lpfc_sli_get_iocbq(phba); |
@@ -11560,6 +12006,7 @@ lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, | |||
11560 | icmd->ulpLe = 1; | 12006 | icmd->ulpLe = 1; |
11561 | icmd->ulpClass = CLASS3; | 12007 | icmd->ulpClass = CLASS3; |
11562 | icmd->ulpContext = ndlp->nlp_rpi; | 12008 | icmd->ulpContext = ndlp->nlp_rpi; |
12009 | ctiocb->context1 = ndlp; | ||
11563 | 12010 | ||
11564 | ctiocb->iocb_cmpl = NULL; | 12011 | ctiocb->iocb_cmpl = NULL; |
11565 | ctiocb->vport = phba->pport; | 12012 | ctiocb->vport = phba->pport; |
@@ -12129,42 +12576,37 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) | |||
12129 | 12576 | ||
12130 | /** | 12577 | /** |
12131 | * lpfc_sli4_init_vpi - Initialize a vpi with the port | 12578 | * lpfc_sli4_init_vpi - Initialize a vpi with the port |
12132 | * @phba: pointer to lpfc hba data structure. | 12579 | * @vport: Pointer to the vport for which the vpi is being initialized |
12133 | * @vpi: vpi value to activate with the port. | ||
12134 | * | 12580 | * |
12135 | * This routine is invoked to activate a vpi with the | 12581 | * This routine is invoked to activate a vpi with the port. |
12136 | * port when the host intends to use vports with a | ||
12137 | * nonzero vpi. | ||
12138 | * | 12582 | * |
12139 | * Returns: | 12583 | * Returns: |
12140 | * 0 success | 12584 | * 0 success |
12141 | * -Evalue otherwise | 12585 | * -Evalue otherwise |
12142 | **/ | 12586 | **/ |
12143 | int | 12587 | int |
12144 | lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) | 12588 | lpfc_sli4_init_vpi(struct lpfc_vport *vport) |
12145 | { | 12589 | { |
12146 | LPFC_MBOXQ_t *mboxq; | 12590 | LPFC_MBOXQ_t *mboxq; |
12147 | int rc = 0; | 12591 | int rc = 0; |
12148 | int retval = MBX_SUCCESS; | 12592 | int retval = MBX_SUCCESS; |
12149 | uint32_t mbox_tmo; | 12593 | uint32_t mbox_tmo; |
12150 | 12594 | struct lpfc_hba *phba = vport->phba; | |
12151 | if (vpi == 0) | ||
12152 | return -EINVAL; | ||
12153 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 12595 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
12154 | if (!mboxq) | 12596 | if (!mboxq) |
12155 | return -ENOMEM; | 12597 | return -ENOMEM; |
12156 | lpfc_init_vpi(phba, mboxq, vpi); | 12598 | lpfc_init_vpi(phba, mboxq, vport->vpi); |
12157 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); | 12599 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); |
12158 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); | 12600 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); |
12159 | if (rc != MBX_SUCCESS) { | 12601 | if (rc != MBX_SUCCESS) { |
12160 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | 12602 | lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, |
12161 | "2022 INIT VPI Mailbox failed " | 12603 | "2022 INIT VPI Mailbox failed " |
12162 | "status %d, mbxStatus x%x\n", rc, | 12604 | "status %d, mbxStatus x%x\n", rc, |
12163 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); | 12605 | bf_get(lpfc_mqe_status, &mboxq->u.mqe)); |
12164 | retval = -EIO; | 12606 | retval = -EIO; |
12165 | } | 12607 | } |
12166 | if (rc != MBX_TIMEOUT) | 12608 | if (rc != MBX_TIMEOUT) |
12167 | mempool_free(mboxq, phba->mbox_mem_pool); | 12609 | mempool_free(mboxq, vport->phba->mbox_mem_pool); |
12168 | 12610 | ||
12169 | return retval; | 12611 | return retval; |
12170 | } | 12612 | } |
@@ -12854,6 +13296,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) | |||
12854 | struct lpfc_nodelist *act_mbx_ndlp = NULL; | 13296 | struct lpfc_nodelist *act_mbx_ndlp = NULL; |
12855 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 13297 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
12856 | LIST_HEAD(mbox_cmd_list); | 13298 | LIST_HEAD(mbox_cmd_list); |
13299 | uint8_t restart_loop; | ||
12857 | 13300 | ||
12858 | /* Clean up internally queued mailbox commands with the vport */ | 13301 | /* Clean up internally queued mailbox commands with the vport */ |
12859 | spin_lock_irq(&phba->hbalock); | 13302 | spin_lock_irq(&phba->hbalock); |
@@ -12882,15 +13325,44 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) | |||
12882 | mb->mbox_flag |= LPFC_MBX_IMED_UNREG; | 13325 | mb->mbox_flag |= LPFC_MBX_IMED_UNREG; |
12883 | } | 13326 | } |
12884 | } | 13327 | } |
13328 | /* Cleanup any mailbox completions which are not yet processed */ | ||
13329 | do { | ||
13330 | restart_loop = 0; | ||
13331 | list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { | ||
13332 | /* | ||
13333 | * If this mailox is already processed or it is | ||
13334 | * for another vport ignore it. | ||
13335 | */ | ||
13336 | if ((mb->vport != vport) || | ||
13337 | (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) | ||
13338 | continue; | ||
13339 | |||
13340 | if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && | ||
13341 | (mb->u.mb.mbxCommand != MBX_REG_VPI)) | ||
13342 | continue; | ||
13343 | |||
13344 | mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
13345 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { | ||
13346 | ndlp = (struct lpfc_nodelist *)mb->context2; | ||
13347 | /* Unregister the RPI when mailbox complete */ | ||
13348 | mb->mbox_flag |= LPFC_MBX_IMED_UNREG; | ||
13349 | restart_loop = 1; | ||
13350 | spin_unlock_irq(&phba->hbalock); | ||
13351 | spin_lock(shost->host_lock); | ||
13352 | ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; | ||
13353 | spin_unlock(shost->host_lock); | ||
13354 | spin_lock_irq(&phba->hbalock); | ||
13355 | break; | ||
13356 | } | ||
13357 | } | ||
13358 | } while (restart_loop); | ||
13359 | |||
12885 | spin_unlock_irq(&phba->hbalock); | 13360 | spin_unlock_irq(&phba->hbalock); |
12886 | 13361 | ||
12887 | /* Release the cleaned-up mailbox commands */ | 13362 | /* Release the cleaned-up mailbox commands */ |
12888 | while (!list_empty(&mbox_cmd_list)) { | 13363 | while (!list_empty(&mbox_cmd_list)) { |
12889 | list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); | 13364 | list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); |
12890 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { | 13365 | if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { |
12891 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
12892 | __lpfc_sli4_free_rpi(phba, | ||
12893 | mb->u.mb.un.varRegLogin.rpi); | ||
12894 | mp = (struct lpfc_dmabuf *) (mb->context1); | 13366 | mp = (struct lpfc_dmabuf *) (mb->context1); |
12895 | if (mp) { | 13367 | if (mp) { |
12896 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); | 13368 | __lpfc_mbuf_free(phba, mp->virt, mp->phys); |
@@ -12948,12 +13420,13 @@ lpfc_drain_txq(struct lpfc_hba *phba) | |||
12948 | while (pring->txq_cnt) { | 13420 | while (pring->txq_cnt) { |
12949 | spin_lock_irqsave(&phba->hbalock, iflags); | 13421 | spin_lock_irqsave(&phba->hbalock, iflags); |
12950 | 13422 | ||
12951 | sglq = __lpfc_sli_get_sglq(phba); | 13423 | piocbq = lpfc_sli_ringtx_get(phba, pring); |
13424 | sglq = __lpfc_sli_get_sglq(phba, piocbq); | ||
12952 | if (!sglq) { | 13425 | if (!sglq) { |
13426 | __lpfc_sli_ringtx_put(phba, pring, piocbq); | ||
12953 | spin_unlock_irqrestore(&phba->hbalock, iflags); | 13427 | spin_unlock_irqrestore(&phba->hbalock, iflags); |
12954 | break; | 13428 | break; |
12955 | } else { | 13429 | } else { |
12956 | piocbq = lpfc_sli_ringtx_get(phba, pring); | ||
12957 | if (!piocbq) { | 13430 | if (!piocbq) { |
12958 | /* The txq_cnt out of sync. This should | 13431 | /* The txq_cnt out of sync. This should |
12959 | * never happen | 13432 | * never happen |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index cd56d6cce6c3..453577c21c14 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -34,9 +34,11 @@ struct lpfc_cq_event { | |||
34 | union { | 34 | union { |
35 | struct lpfc_mcqe mcqe_cmpl; | 35 | struct lpfc_mcqe mcqe_cmpl; |
36 | struct lpfc_acqe_link acqe_link; | 36 | struct lpfc_acqe_link acqe_link; |
37 | struct lpfc_acqe_fcoe acqe_fcoe; | 37 | struct lpfc_acqe_fip acqe_fip; |
38 | struct lpfc_acqe_dcbx acqe_dcbx; | 38 | struct lpfc_acqe_dcbx acqe_dcbx; |
39 | struct lpfc_acqe_grp5 acqe_grp5; | 39 | struct lpfc_acqe_grp5 acqe_grp5; |
40 | struct lpfc_acqe_fc_la acqe_fc; | ||
41 | struct lpfc_acqe_sli acqe_sli; | ||
40 | struct lpfc_rcqe rcqe_cmpl; | 42 | struct lpfc_rcqe rcqe_cmpl; |
41 | struct sli4_wcqe_xri_aborted wcqe_axri; | 43 | struct sli4_wcqe_xri_aborted wcqe_axri; |
42 | struct lpfc_wcqe_complete wcqe_cmpl; | 44 | struct lpfc_wcqe_complete wcqe_cmpl; |
@@ -82,6 +84,7 @@ struct lpfc_iocbq { | |||
82 | struct lpfc_iocbq *rsp_iocb; | 84 | struct lpfc_iocbq *rsp_iocb; |
83 | struct lpfcMboxq *mbox; | 85 | struct lpfcMboxq *mbox; |
84 | struct lpfc_nodelist *ndlp; | 86 | struct lpfc_nodelist *ndlp; |
87 | struct lpfc_node_rrq *rrq; | ||
85 | } context_un; | 88 | } context_un; |
86 | 89 | ||
87 | void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, | 90 | void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index c4483feb8b71..c7217d579e0f 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -137,9 +137,11 @@ struct lpfc_sli4_link { | |||
137 | uint8_t speed; | 137 | uint8_t speed; |
138 | uint8_t duplex; | 138 | uint8_t duplex; |
139 | uint8_t status; | 139 | uint8_t status; |
140 | uint8_t physical; | 140 | uint8_t type; |
141 | uint8_t number; | ||
141 | uint8_t fault; | 142 | uint8_t fault; |
142 | uint16_t logical_speed; | 143 | uint16_t logical_speed; |
144 | uint16_t topology; | ||
143 | }; | 145 | }; |
144 | 146 | ||
145 | struct lpfc_fcf_rec { | 147 | struct lpfc_fcf_rec { |
@@ -367,23 +369,39 @@ struct lpfc_sli4_hba { | |||
367 | PCI BAR1, control registers */ | 369 | PCI BAR1, control registers */ |
368 | void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for | 370 | void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for |
369 | PCI BAR2, doorbell registers */ | 371 | PCI BAR2, doorbell registers */ |
370 | /* BAR0 PCI config space register memory map */ | 372 | union { |
371 | void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ | 373 | struct { |
372 | void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ | 374 | /* IF Type 0, BAR 0 PCI cfg space reg mem map */ |
373 | void __iomem *UEMASKLOregaddr; /* Address to UE_MASK_LO register */ | 375 | void __iomem *UERRLOregaddr; |
374 | void __iomem *UEMASKHIregaddr; /* Address to UE_MASK_HI register */ | 376 | void __iomem *UERRHIregaddr; |
375 | void __iomem *SLIINTFregaddr; /* Address to SLI_INTF register */ | 377 | void __iomem *UEMASKLOregaddr; |
376 | /* BAR1 FCoE function CSR register memory map */ | 378 | void __iomem *UEMASKHIregaddr; |
377 | void __iomem *STAregaddr; /* Address to HST_STATE register */ | 379 | } if_type0; |
378 | void __iomem *ISRregaddr; /* Address to HST_ISR register */ | 380 | struct { |
379 | void __iomem *IMRregaddr; /* Address to HST_IMR register */ | 381 | /* IF Type 2, BAR 0 PCI cfg space reg mem map. */ |
380 | void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ | 382 | void __iomem *STATUSregaddr; |
381 | /* BAR2 VF-0 doorbell register memory map */ | 383 | void __iomem *CTRLregaddr; |
382 | void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ | 384 | void __iomem *ERR1regaddr; |
383 | void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ | 385 | void __iomem *ERR2regaddr; |
384 | void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ | 386 | } if_type2; |
385 | void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ | 387 | } u; |
386 | void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ | 388 | |
389 | /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */ | ||
390 | void __iomem *PSMPHRregaddr; | ||
391 | |||
392 | /* Well-known SLI INTF register memory map. */ | ||
393 | void __iomem *SLIINTFregaddr; | ||
394 | |||
395 | /* IF type 0, BAR 1 function CSR register memory map */ | ||
396 | void __iomem *ISRregaddr; /* HST_ISR register */ | ||
397 | void __iomem *IMRregaddr; /* HST_IMR register */ | ||
398 | void __iomem *ISCRregaddr; /* HST_ISCR register */ | ||
399 | /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */ | ||
400 | void __iomem *RQDBregaddr; /* RQ_DOORBELL register */ | ||
401 | void __iomem *WQDBregaddr; /* WQ_DOORBELL register */ | ||
402 | void __iomem *EQCQDBregaddr; /* EQCQ_DOORBELL register */ | ||
403 | void __iomem *MQDBregaddr; /* MQ_DOORBELL register */ | ||
404 | void __iomem *BMBXregaddr; /* BootStrap MBX register */ | ||
387 | 405 | ||
388 | uint32_t ue_mask_lo; | 406 | uint32_t ue_mask_lo; |
389 | uint32_t ue_mask_hi; | 407 | uint32_t ue_mask_hi; |
@@ -466,6 +484,7 @@ struct lpfc_sglq { | |||
466 | struct list_head clist; | 484 | struct list_head clist; |
467 | enum lpfc_sge_type buff_type; /* is this a scsi sgl */ | 485 | enum lpfc_sge_type buff_type; /* is this a scsi sgl */ |
468 | enum lpfc_sgl_state state; | 486 | enum lpfc_sgl_state state; |
487 | struct lpfc_nodelist *ndlp; /* ndlp associated with IO */ | ||
469 | uint16_t iotag; /* pre-assigned IO tag */ | 488 | uint16_t iotag; /* pre-assigned IO tag */ |
470 | uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ | 489 | uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ |
471 | struct sli4_sge *sgl; /* pre-assigned SGL */ | 490 | struct sli4_sge *sgl; /* pre-assigned SGL */ |
@@ -532,7 +551,6 @@ int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); | |||
532 | struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); | 551 | struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); |
533 | void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); | 552 | void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); |
534 | int lpfc_sli4_alloc_rpi(struct lpfc_hba *); | 553 | int lpfc_sli4_alloc_rpi(struct lpfc_hba *); |
535 | void __lpfc_sli4_free_rpi(struct lpfc_hba *, int); | ||
536 | void lpfc_sli4_free_rpi(struct lpfc_hba *, int); | 554 | void lpfc_sli4_free_rpi(struct lpfc_hba *, int); |
537 | void lpfc_sli4_remove_rpis(struct lpfc_hba *); | 555 | void lpfc_sli4_remove_rpis(struct lpfc_hba *); |
538 | void lpfc_sli4_async_event_proc(struct lpfc_hba *); | 556 | void lpfc_sli4_async_event_proc(struct lpfc_hba *); |
@@ -548,7 +566,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *); | |||
548 | int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); | 566 | int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); |
549 | void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); | 567 | void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); |
550 | int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); | 568 | int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); |
551 | int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); | 569 | int lpfc_sli4_init_vpi(struct lpfc_vport *); |
552 | uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); | 570 | uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); |
553 | uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); | 571 | uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); |
554 | void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); | 572 | void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 7a1b5b112a0b..386cf92de492 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.18" | 21 | #define LPFC_DRIVER_VERSION "8.3.20" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" | 23 | #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" |
24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" | 24 | #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index a5281ce893d0..6b8d2952e32f 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -395,8 +395,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
395 | * by the port. | 395 | * by the port. |
396 | */ | 396 | */ |
397 | if ((phba->sli_rev == LPFC_SLI_REV4) && | 397 | if ((phba->sli_rev == LPFC_SLI_REV4) && |
398 | (pport->fc_flag & FC_VFI_REGISTERED)) { | 398 | (pport->fc_flag & FC_VFI_REGISTERED)) { |
399 | rc = lpfc_sli4_init_vpi(phba, vpi); | 399 | rc = lpfc_sli4_init_vpi(vport); |
400 | if (rc) { | 400 | if (rc) { |
401 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, | 401 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, |
402 | "1838 Failed to INIT_VPI on vpi %d " | 402 | "1838 Failed to INIT_VPI on vpi %d " |
@@ -418,7 +418,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
418 | 418 | ||
419 | if ((phba->link_state < LPFC_LINK_UP) || | 419 | if ((phba->link_state < LPFC_LINK_UP) || |
420 | (pport->port_state < LPFC_FABRIC_CFG_LINK) || | 420 | (pport->port_state < LPFC_FABRIC_CFG_LINK) || |
421 | (phba->fc_topology == TOPOLOGY_LOOP)) { | 421 | (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { |
422 | lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); | 422 | lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); |
423 | rc = VPORT_OK; | 423 | rc = VPORT_OK; |
424 | goto out; | 424 | goto out; |
@@ -514,7 +514,7 @@ enable_vport(struct fc_vport *fc_vport) | |||
514 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | 514 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); |
515 | 515 | ||
516 | if ((phba->link_state < LPFC_LINK_UP) || | 516 | if ((phba->link_state < LPFC_LINK_UP) || |
517 | (phba->fc_topology == TOPOLOGY_LOOP)) { | 517 | (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { |
518 | lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); | 518 | lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); |
519 | return VPORT_OK; | 519 | return VPORT_OK; |
520 | } | 520 | } |
@@ -665,7 +665,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) | |||
665 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && | 665 | if (ndlp && NLP_CHK_NODE_ACT(ndlp) && |
666 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && | 666 | ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && |
667 | phba->link_state >= LPFC_LINK_UP && | 667 | phba->link_state >= LPFC_LINK_UP && |
668 | phba->fc_topology != TOPOLOGY_LOOP) { | 668 | phba->fc_topology != LPFC_TOPOLOGY_LOOP) { |
669 | if (vport->cfg_enable_da_id) { | 669 | if (vport->cfg_enable_da_id) { |
670 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); | 670 | timeout = msecs_to_jiffies(phba->fc_ratov * 2000); |
671 | if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) | 671 | if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) |
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile index f469915b97c3..5826ed509e3e 100644 --- a/drivers/scsi/megaraid/Makefile +++ b/drivers/scsi/megaraid/Makefile | |||
@@ -1,3 +1,5 @@ | |||
1 | obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o | 1 | obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o |
2 | obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o | 2 | obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o |
3 | obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o | 3 | obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o |
4 | megaraid_sas-objs := megaraid_sas_base.o megaraid_sas_fusion.o \ | ||
5 | megaraid_sas_fp.o | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index ad16f5e60046..1b5e375732c0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -1,15 +1,30 @@ | |||
1 | /* | 1 | /* |
2 | * Linux MegaRAID driver for SAS based RAID controllers | ||
2 | * | 3 | * |
3 | * Linux MegaRAID driver for SAS based RAID controllers | 4 | * Copyright (c) 2009-2011 LSI Corporation. |
4 | * | 5 | * |
5 | * Copyright (c) 2003-2005 LSI Corporation. | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
6 | * | 10 | * |
7 | * This program is free software; you can redistribute it and/or | 11 | * This program is distributed in the hope that it will be useful, |
8 | * modify it under the terms of the GNU General Public License | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | * as published by the Free Software Foundation; either version | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | * 2 of the License, or (at your option) any later version. | 14 | * GNU General Public License for more details. |
11 | * | 15 | * |
12 | * FILE : megaraid_sas.h | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | * FILE: megaraid_sas.h | ||
21 | * | ||
22 | * Authors: LSI Corporation | ||
23 | * | ||
24 | * Send feedback to: <megaraidlinux@lsi.com> | ||
25 | * | ||
26 | * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 | ||
27 | * ATTN: Linuxraid | ||
13 | */ | 28 | */ |
14 | 29 | ||
15 | #ifndef LSI_MEGARAID_SAS_H | 30 | #ifndef LSI_MEGARAID_SAS_H |
@@ -18,9 +33,9 @@ | |||
18 | /* | 33 | /* |
19 | * MegaRAID SAS Driver meta data | 34 | * MegaRAID SAS Driver meta data |
20 | */ | 35 | */ |
21 | #define MEGASAS_VERSION "00.00.04.31-rc1" | 36 | #define MEGASAS_VERSION "00.00.05.29-rc1" |
22 | #define MEGASAS_RELDATE "May 3, 2010" | 37 | #define MEGASAS_RELDATE "Dec. 7, 2010" |
23 | #define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010" | 38 | #define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010" |
24 | 39 | ||
25 | /* | 40 | /* |
26 | * Device IDs | 41 | * Device IDs |
@@ -32,6 +47,7 @@ | |||
32 | #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 | 47 | #define PCI_DEVICE_ID_LSI_SAS0079GEN2 0x0079 |
33 | #define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 | 48 | #define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 |
34 | #define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 | 49 | #define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 |
50 | #define PCI_DEVICE_ID_LSI_FUSION 0x005b | ||
35 | 51 | ||
36 | /* | 52 | /* |
37 | * ===================================== | 53 | * ===================================== |
@@ -421,7 +437,6 @@ struct megasas_ctrl_prop { | |||
421 | * Add properties that can be controlled by | 437 | * Add properties that can be controlled by |
422 | * a bit in the following structure. | 438 | * a bit in the following structure. |
423 | */ | 439 | */ |
424 | |||
425 | struct { | 440 | struct { |
426 | u32 copyBackDisabled : 1; | 441 | u32 copyBackDisabled : 1; |
427 | u32 SMARTerEnabled : 1; | 442 | u32 SMARTerEnabled : 1; |
@@ -701,6 +716,7 @@ struct megasas_ctrl_info { | |||
701 | #define MEGASAS_DEFAULT_INIT_ID -1 | 716 | #define MEGASAS_DEFAULT_INIT_ID -1 |
702 | #define MEGASAS_MAX_LUN 8 | 717 | #define MEGASAS_MAX_LUN 8 |
703 | #define MEGASAS_MAX_LD 64 | 718 | #define MEGASAS_MAX_LD 64 |
719 | #define MEGASAS_DEFAULT_CMD_PER_LUN 128 | ||
704 | #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ | 720 | #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ |
705 | MEGASAS_MAX_DEV_PER_CHANNEL) | 721 | MEGASAS_MAX_DEV_PER_CHANNEL) |
706 | #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ | 722 | #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ |
@@ -769,7 +785,10 @@ struct megasas_ctrl_info { | |||
769 | */ | 785 | */ |
770 | 786 | ||
771 | struct megasas_register_set { | 787 | struct megasas_register_set { |
772 | u32 reserved_0[4]; /*0000h*/ | 788 | u32 doorbell; /*0000h*/ |
789 | u32 fusion_seq_offset; /*0004h*/ | ||
790 | u32 fusion_host_diag; /*0008h*/ | ||
791 | u32 reserved_01; /*000Ch*/ | ||
773 | 792 | ||
774 | u32 inbound_msg_0; /*0010h*/ | 793 | u32 inbound_msg_0; /*0010h*/ |
775 | u32 inbound_msg_1; /*0014h*/ | 794 | u32 inbound_msg_1; /*0014h*/ |
@@ -789,15 +808,18 @@ struct megasas_register_set { | |||
789 | u32 inbound_queue_port; /*0040h*/ | 808 | u32 inbound_queue_port; /*0040h*/ |
790 | u32 outbound_queue_port; /*0044h*/ | 809 | u32 outbound_queue_port; /*0044h*/ |
791 | 810 | ||
792 | u32 reserved_2[22]; /*0048h*/ | 811 | u32 reserved_2[9]; /*0048h*/ |
812 | u32 reply_post_host_index; /*006Ch*/ | ||
813 | u32 reserved_2_2[12]; /*0070h*/ | ||
793 | 814 | ||
794 | u32 outbound_doorbell_clear; /*00A0h*/ | 815 | u32 outbound_doorbell_clear; /*00A0h*/ |
795 | 816 | ||
796 | u32 reserved_3[3]; /*00A4h*/ | 817 | u32 reserved_3[3]; /*00A4h*/ |
797 | 818 | ||
798 | u32 outbound_scratch_pad ; /*00B0h*/ | 819 | u32 outbound_scratch_pad ; /*00B0h*/ |
820 | u32 outbound_scratch_pad_2; /*00B4h*/ | ||
799 | 821 | ||
800 | u32 reserved_4[3]; /*00B4h*/ | 822 | u32 reserved_4[2]; /*00B8h*/ |
801 | 823 | ||
802 | u32 inbound_low_queue_port ; /*00C0h*/ | 824 | u32 inbound_low_queue_port ; /*00C0h*/ |
803 | 825 | ||
@@ -1272,6 +1294,9 @@ struct megasas_instance { | |||
1272 | 1294 | ||
1273 | u16 max_num_sge; | 1295 | u16 max_num_sge; |
1274 | u16 max_fw_cmds; | 1296 | u16 max_fw_cmds; |
1297 | /* For Fusion its num IOCTL cmds, for others MFI based its | ||
1298 | max_fw_cmds */ | ||
1299 | u16 max_mfi_cmds; | ||
1275 | u32 max_sectors_per_req; | 1300 | u32 max_sectors_per_req; |
1276 | struct megasas_aen_event *ev; | 1301 | struct megasas_aen_event *ev; |
1277 | 1302 | ||
@@ -1320,6 +1345,16 @@ struct megasas_instance { | |||
1320 | 1345 | ||
1321 | struct timer_list io_completion_timer; | 1346 | struct timer_list io_completion_timer; |
1322 | struct list_head internal_reset_pending_q; | 1347 | struct list_head internal_reset_pending_q; |
1348 | |||
1349 | /* Ptr to hba specfic information */ | ||
1350 | void *ctrl_context; | ||
1351 | u8 msi_flag; | ||
1352 | struct msix_entry msixentry; | ||
1353 | u64 map_id; | ||
1354 | struct megasas_cmd *map_update_cmd; | ||
1355 | unsigned long bar; | ||
1356 | long reset_flags; | ||
1357 | struct mutex reset_mutex; | ||
1323 | }; | 1358 | }; |
1324 | 1359 | ||
1325 | enum { | 1360 | enum { |
@@ -1345,6 +1380,13 @@ struct megasas_instance_template { | |||
1345 | struct megasas_register_set __iomem *); | 1380 | struct megasas_register_set __iomem *); |
1346 | int (*check_reset)(struct megasas_instance *, \ | 1381 | int (*check_reset)(struct megasas_instance *, \ |
1347 | struct megasas_register_set __iomem *); | 1382 | struct megasas_register_set __iomem *); |
1383 | irqreturn_t (*service_isr)(int irq, void *devp); | ||
1384 | void (*tasklet)(unsigned long); | ||
1385 | u32 (*init_adapter)(struct megasas_instance *); | ||
1386 | u32 (*build_and_issue_cmd) (struct megasas_instance *, | ||
1387 | struct scsi_cmnd *); | ||
1388 | void (*issue_dcmd) (struct megasas_instance *instance, | ||
1389 | struct megasas_cmd *cmd); | ||
1348 | }; | 1390 | }; |
1349 | 1391 | ||
1350 | #define MEGASAS_IS_LOGICAL(scp) \ | 1392 | #define MEGASAS_IS_LOGICAL(scp) \ |
@@ -1371,7 +1413,13 @@ struct megasas_cmd { | |||
1371 | struct list_head list; | 1413 | struct list_head list; |
1372 | struct scsi_cmnd *scmd; | 1414 | struct scsi_cmnd *scmd; |
1373 | struct megasas_instance *instance; | 1415 | struct megasas_instance *instance; |
1374 | u32 frame_count; | 1416 | union { |
1417 | struct { | ||
1418 | u16 smid; | ||
1419 | u16 resvd; | ||
1420 | } context; | ||
1421 | u32 frame_count; | ||
1422 | }; | ||
1375 | }; | 1423 | }; |
1376 | 1424 | ||
1377 | #define MAX_MGMT_ADAPTERS 1024 | 1425 | #define MAX_MGMT_ADAPTERS 1024 |
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7451bc096a01..5d6d07bd1cd0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -1,27 +1,34 @@ | |||
1 | /* | 1 | /* |
2 | * Linux MegaRAID driver for SAS based RAID controllers | ||
2 | * | 3 | * |
3 | * Linux MegaRAID driver for SAS based RAID controllers | 4 | * Copyright (c) 2009-2011 LSI Corporation. |
4 | * | 5 | * |
5 | * Copyright (c) 2003-2005 LSI Corporation. | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
6 | * | 10 | * |
7 | * This program is free software; you can redistribute it and/or | 11 | * This program is distributed in the hope that it will be useful, |
8 | * modify it under the terms of the GNU General Public License | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | * as published by the Free Software Foundation; either version | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | * 2 of the License, or (at your option) any later version. | 14 | * GNU General Public License for more details. |
11 | * | 15 | * |
12 | * FILE : megaraid_sas.c | 16 | * You should have received a copy of the GNU General Public License |
13 | * Version : v00.00.04.31-rc1 | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
14 | * | 19 | * |
15 | * Authors: | 20 | * FILE: megaraid_sas_base.c |
16 | * (email-id : megaraidlinux@lsi.com) | 21 | * Version : v00.00.05.29-rc1 |
17 | * Sreenivas Bagalkote | ||
18 | * Sumant Patro | ||
19 | * Bo Yang | ||
20 | * | 22 | * |
21 | * List of supported controllers | 23 | * Authors: LSI Corporation |
24 | * Sreenivas Bagalkote | ||
25 | * Sumant Patro | ||
26 | * Bo Yang | ||
22 | * | 27 | * |
23 | * OEM Product Name VID DID SSVID SSID | 28 | * Send feedback to: <megaraidlinux@lsi.com> |
24 | * --- ------------ --- --- ---- ---- | 29 | * |
30 | * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 | ||
31 | * ATTN: Linuxraid | ||
25 | */ | 32 | */ |
26 | 33 | ||
27 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
@@ -46,6 +53,7 @@ | |||
46 | #include <scsi/scsi_cmnd.h> | 53 | #include <scsi/scsi_cmnd.h> |
47 | #include <scsi/scsi_device.h> | 54 | #include <scsi/scsi_device.h> |
48 | #include <scsi/scsi_host.h> | 55 | #include <scsi/scsi_host.h> |
56 | #include "megaraid_sas_fusion.h" | ||
49 | #include "megaraid_sas.h" | 57 | #include "megaraid_sas.h" |
50 | 58 | ||
51 | /* | 59 | /* |
@@ -65,12 +73,16 @@ module_param_named(max_sectors, max_sectors, int, 0); | |||
65 | MODULE_PARM_DESC(max_sectors, | 73 | MODULE_PARM_DESC(max_sectors, |
66 | "Maximum number of sectors per IO command"); | 74 | "Maximum number of sectors per IO command"); |
67 | 75 | ||
76 | static int msix_disable; | ||
77 | module_param(msix_disable, int, S_IRUGO); | ||
78 | MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0"); | ||
79 | |||
68 | MODULE_LICENSE("GPL"); | 80 | MODULE_LICENSE("GPL"); |
69 | MODULE_VERSION(MEGASAS_VERSION); | 81 | MODULE_VERSION(MEGASAS_VERSION); |
70 | MODULE_AUTHOR("megaraidlinux@lsi.com"); | 82 | MODULE_AUTHOR("megaraidlinux@lsi.com"); |
71 | MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); | 83 | MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); |
72 | 84 | ||
73 | static int megasas_transition_to_ready(struct megasas_instance *instance); | 85 | int megasas_transition_to_ready(struct megasas_instance *instance); |
74 | static int megasas_get_pd_list(struct megasas_instance *instance); | 86 | static int megasas_get_pd_list(struct megasas_instance *instance); |
75 | static int megasas_issue_init_mfi(struct megasas_instance *instance); | 87 | static int megasas_issue_init_mfi(struct megasas_instance *instance); |
76 | static int megasas_register_aen(struct megasas_instance *instance, | 88 | static int megasas_register_aen(struct megasas_instance *instance, |
@@ -98,6 +110,8 @@ static struct pci_device_id megasas_pci_table[] = { | |||
98 | /* xscale IOP, vega */ | 110 | /* xscale IOP, vega */ |
99 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, | 111 | {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)}, |
100 | /* xscale IOP */ | 112 | /* xscale IOP */ |
113 | {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, | ||
114 | /* Fusion */ | ||
101 | {} | 115 | {} |
102 | }; | 116 | }; |
103 | 117 | ||
@@ -111,23 +125,55 @@ static DEFINE_MUTEX(megasas_async_queue_mutex); | |||
111 | static int megasas_poll_wait_aen; | 125 | static int megasas_poll_wait_aen; |
112 | static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); | 126 | static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); |
113 | static u32 support_poll_for_event; | 127 | static u32 support_poll_for_event; |
114 | static u32 megasas_dbg_lvl; | 128 | u32 megasas_dbg_lvl; |
115 | static u32 support_device_change; | 129 | static u32 support_device_change; |
116 | 130 | ||
117 | /* define lock for aen poll */ | 131 | /* define lock for aen poll */ |
118 | spinlock_t poll_aen_lock; | 132 | spinlock_t poll_aen_lock; |
119 | 133 | ||
120 | static void | 134 | void |
121 | megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | 135 | megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, |
122 | u8 alt_status); | 136 | u8 alt_status); |
123 | 137 | ||
138 | static irqreturn_t megasas_isr(int irq, void *devp); | ||
139 | static u32 | ||
140 | megasas_init_adapter_mfi(struct megasas_instance *instance); | ||
141 | u32 | ||
142 | megasas_build_and_issue_cmd(struct megasas_instance *instance, | ||
143 | struct scsi_cmnd *scmd); | ||
144 | static void megasas_complete_cmd_dpc(unsigned long instance_addr); | ||
145 | void | ||
146 | megasas_release_fusion(struct megasas_instance *instance); | ||
147 | int | ||
148 | megasas_ioc_init_fusion(struct megasas_instance *instance); | ||
149 | void | ||
150 | megasas_free_cmds_fusion(struct megasas_instance *instance); | ||
151 | u8 | ||
152 | megasas_get_map_info(struct megasas_instance *instance); | ||
153 | int | ||
154 | megasas_sync_map_info(struct megasas_instance *instance); | ||
155 | int | ||
156 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); | ||
157 | void megasas_reset_reply_desc(struct megasas_instance *instance); | ||
158 | u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, | ||
159 | struct LD_LOAD_BALANCE_INFO *lbInfo); | ||
160 | int megasas_reset_fusion(struct Scsi_Host *shost); | ||
161 | void megasas_fusion_ocr_wq(struct work_struct *work); | ||
162 | |||
163 | void | ||
164 | megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) | ||
165 | { | ||
166 | instance->instancet->fire_cmd(instance, | ||
167 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
168 | } | ||
169 | |||
124 | /** | 170 | /** |
125 | * megasas_get_cmd - Get a command from the free pool | 171 | * megasas_get_cmd - Get a command from the free pool |
126 | * @instance: Adapter soft state | 172 | * @instance: Adapter soft state |
127 | * | 173 | * |
128 | * Returns a free command from the pool | 174 | * Returns a free command from the pool |
129 | */ | 175 | */ |
130 | static struct megasas_cmd *megasas_get_cmd(struct megasas_instance | 176 | struct megasas_cmd *megasas_get_cmd(struct megasas_instance |
131 | *instance) | 177 | *instance) |
132 | { | 178 | { |
133 | unsigned long flags; | 179 | unsigned long flags; |
@@ -152,7 +198,7 @@ static struct megasas_cmd *megasas_get_cmd(struct megasas_instance | |||
152 | * @instance: Adapter soft state | 198 | * @instance: Adapter soft state |
153 | * @cmd: Command packet to be returned to free command pool | 199 | * @cmd: Command packet to be returned to free command pool |
154 | */ | 200 | */ |
155 | static inline void | 201 | inline void |
156 | megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) | 202 | megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) |
157 | { | 203 | { |
158 | unsigned long flags; | 204 | unsigned long flags; |
@@ -160,6 +206,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) | |||
160 | spin_lock_irqsave(&instance->cmd_pool_lock, flags); | 206 | spin_lock_irqsave(&instance->cmd_pool_lock, flags); |
161 | 207 | ||
162 | cmd->scmd = NULL; | 208 | cmd->scmd = NULL; |
209 | cmd->frame_count = 0; | ||
163 | list_add_tail(&cmd->list, &instance->cmd_pool); | 210 | list_add_tail(&cmd->list, &instance->cmd_pool); |
164 | 211 | ||
165 | spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); | 212 | spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); |
@@ -167,7 +214,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) | |||
167 | 214 | ||
168 | 215 | ||
169 | /** | 216 | /** |
170 | * The following functions are defined for xscale | 217 | * The following functions are defined for xscale |
171 | * (deviceid : 1064R, PERC5) controllers | 218 | * (deviceid : 1064R, PERC5) controllers |
172 | */ | 219 | */ |
173 | 220 | ||
@@ -210,7 +257,7 @@ megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs) | |||
210 | * megasas_clear_interrupt_xscale - Check & clear interrupt | 257 | * megasas_clear_interrupt_xscale - Check & clear interrupt |
211 | * @regs: MFI register set | 258 | * @regs: MFI register set |
212 | */ | 259 | */ |
213 | static int | 260 | static int |
214 | megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) | 261 | megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) |
215 | { | 262 | { |
216 | u32 status; | 263 | u32 status; |
@@ -243,7 +290,7 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs) | |||
243 | * @frame_count : Number of frames for the command | 290 | * @frame_count : Number of frames for the command |
244 | * @regs : MFI register set | 291 | * @regs : MFI register set |
245 | */ | 292 | */ |
246 | static inline void | 293 | static inline void |
247 | megasas_fire_cmd_xscale(struct megasas_instance *instance, | 294 | megasas_fire_cmd_xscale(struct megasas_instance *instance, |
248 | dma_addr_t frame_phys_addr, | 295 | dma_addr_t frame_phys_addr, |
249 | u32 frame_count, | 296 | u32 frame_count, |
@@ -323,15 +370,20 @@ static struct megasas_instance_template megasas_instance_template_xscale = { | |||
323 | .read_fw_status_reg = megasas_read_fw_status_reg_xscale, | 370 | .read_fw_status_reg = megasas_read_fw_status_reg_xscale, |
324 | .adp_reset = megasas_adp_reset_xscale, | 371 | .adp_reset = megasas_adp_reset_xscale, |
325 | .check_reset = megasas_check_reset_xscale, | 372 | .check_reset = megasas_check_reset_xscale, |
373 | .service_isr = megasas_isr, | ||
374 | .tasklet = megasas_complete_cmd_dpc, | ||
375 | .init_adapter = megasas_init_adapter_mfi, | ||
376 | .build_and_issue_cmd = megasas_build_and_issue_cmd, | ||
377 | .issue_dcmd = megasas_issue_dcmd, | ||
326 | }; | 378 | }; |
327 | 379 | ||
328 | /** | 380 | /** |
329 | * This is the end of set of functions & definitions specific | 381 | * This is the end of set of functions & definitions specific |
330 | * to xscale (deviceid : 1064R, PERC5) controllers | 382 | * to xscale (deviceid : 1064R, PERC5) controllers |
331 | */ | 383 | */ |
332 | 384 | ||
333 | /** | 385 | /** |
334 | * The following functions are defined for ppc (deviceid : 0x60) | 386 | * The following functions are defined for ppc (deviceid : 0x60) |
335 | * controllers | 387 | * controllers |
336 | */ | 388 | */ |
337 | 389 | ||
@@ -343,7 +395,7 @@ static inline void | |||
343 | megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) | 395 | megasas_enable_intr_ppc(struct megasas_register_set __iomem * regs) |
344 | { | 396 | { |
345 | writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); | 397 | writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); |
346 | 398 | ||
347 | writel(~0x80000000, &(regs)->outbound_intr_mask); | 399 | writel(~0x80000000, &(regs)->outbound_intr_mask); |
348 | 400 | ||
349 | /* Dummy readl to force pci flush */ | 401 | /* Dummy readl to force pci flush */ |
@@ -377,7 +429,7 @@ megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs) | |||
377 | * megasas_clear_interrupt_ppc - Check & clear interrupt | 429 | * megasas_clear_interrupt_ppc - Check & clear interrupt |
378 | * @regs: MFI register set | 430 | * @regs: MFI register set |
379 | */ | 431 | */ |
380 | static int | 432 | static int |
381 | megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) | 433 | megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) |
382 | { | 434 | { |
383 | u32 status; | 435 | u32 status; |
@@ -406,7 +458,7 @@ megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs) | |||
406 | * @frame_count : Number of frames for the command | 458 | * @frame_count : Number of frames for the command |
407 | * @regs : MFI register set | 459 | * @regs : MFI register set |
408 | */ | 460 | */ |
409 | static inline void | 461 | static inline void |
410 | megasas_fire_cmd_ppc(struct megasas_instance *instance, | 462 | megasas_fire_cmd_ppc(struct megasas_instance *instance, |
411 | dma_addr_t frame_phys_addr, | 463 | dma_addr_t frame_phys_addr, |
412 | u32 frame_count, | 464 | u32 frame_count, |
@@ -414,7 +466,7 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance, | |||
414 | { | 466 | { |
415 | unsigned long flags; | 467 | unsigned long flags; |
416 | spin_lock_irqsave(&instance->hba_lock, flags); | 468 | spin_lock_irqsave(&instance->hba_lock, flags); |
417 | writel((frame_phys_addr | (frame_count<<1))|1, | 469 | writel((frame_phys_addr | (frame_count<<1))|1, |
418 | &(regs)->inbound_queue_port); | 470 | &(regs)->inbound_queue_port); |
419 | spin_unlock_irqrestore(&instance->hba_lock, flags); | 471 | spin_unlock_irqrestore(&instance->hba_lock, flags); |
420 | } | 472 | } |
@@ -441,7 +493,7 @@ megasas_check_reset_ppc(struct megasas_instance *instance, | |||
441 | return 0; | 493 | return 0; |
442 | } | 494 | } |
443 | static struct megasas_instance_template megasas_instance_template_ppc = { | 495 | static struct megasas_instance_template megasas_instance_template_ppc = { |
444 | 496 | ||
445 | .fire_cmd = megasas_fire_cmd_ppc, | 497 | .fire_cmd = megasas_fire_cmd_ppc, |
446 | .enable_intr = megasas_enable_intr_ppc, | 498 | .enable_intr = megasas_enable_intr_ppc, |
447 | .disable_intr = megasas_disable_intr_ppc, | 499 | .disable_intr = megasas_disable_intr_ppc, |
@@ -449,6 +501,11 @@ static struct megasas_instance_template megasas_instance_template_ppc = { | |||
449 | .read_fw_status_reg = megasas_read_fw_status_reg_ppc, | 501 | .read_fw_status_reg = megasas_read_fw_status_reg_ppc, |
450 | .adp_reset = megasas_adp_reset_ppc, | 502 | .adp_reset = megasas_adp_reset_ppc, |
451 | .check_reset = megasas_check_reset_ppc, | 503 | .check_reset = megasas_check_reset_ppc, |
504 | .service_isr = megasas_isr, | ||
505 | .tasklet = megasas_complete_cmd_dpc, | ||
506 | .init_adapter = megasas_init_adapter_mfi, | ||
507 | .build_and_issue_cmd = megasas_build_and_issue_cmd, | ||
508 | .issue_dcmd = megasas_issue_dcmd, | ||
452 | }; | 509 | }; |
453 | 510 | ||
454 | /** | 511 | /** |
@@ -570,6 +627,11 @@ static struct megasas_instance_template megasas_instance_template_skinny = { | |||
570 | .read_fw_status_reg = megasas_read_fw_status_reg_skinny, | 627 | .read_fw_status_reg = megasas_read_fw_status_reg_skinny, |
571 | .adp_reset = megasas_adp_reset_skinny, | 628 | .adp_reset = megasas_adp_reset_skinny, |
572 | .check_reset = megasas_check_reset_skinny, | 629 | .check_reset = megasas_check_reset_skinny, |
630 | .service_isr = megasas_isr, | ||
631 | .tasklet = megasas_complete_cmd_dpc, | ||
632 | .init_adapter = megasas_init_adapter_mfi, | ||
633 | .build_and_issue_cmd = megasas_build_and_issue_cmd, | ||
634 | .issue_dcmd = megasas_issue_dcmd, | ||
573 | }; | 635 | }; |
574 | 636 | ||
575 | 637 | ||
@@ -744,6 +806,11 @@ static struct megasas_instance_template megasas_instance_template_gen2 = { | |||
744 | .read_fw_status_reg = megasas_read_fw_status_reg_gen2, | 806 | .read_fw_status_reg = megasas_read_fw_status_reg_gen2, |
745 | .adp_reset = megasas_adp_reset_gen2, | 807 | .adp_reset = megasas_adp_reset_gen2, |
746 | .check_reset = megasas_check_reset_gen2, | 808 | .check_reset = megasas_check_reset_gen2, |
809 | .service_isr = megasas_isr, | ||
810 | .tasklet = megasas_complete_cmd_dpc, | ||
811 | .init_adapter = megasas_init_adapter_mfi, | ||
812 | .build_and_issue_cmd = megasas_build_and_issue_cmd, | ||
813 | .issue_dcmd = megasas_issue_dcmd, | ||
747 | }; | 814 | }; |
748 | 815 | ||
749 | /** | 816 | /** |
@@ -751,18 +818,21 @@ static struct megasas_instance_template megasas_instance_template_gen2 = { | |||
751 | * specific to gen2 (deviceid : 0x78, 0x79) controllers | 818 | * specific to gen2 (deviceid : 0x78, 0x79) controllers |
752 | */ | 819 | */ |
753 | 820 | ||
821 | /* | ||
822 | * Template added for TB (Fusion) | ||
823 | */ | ||
824 | extern struct megasas_instance_template megasas_instance_template_fusion; | ||
825 | |||
754 | /** | 826 | /** |
755 | * megasas_issue_polled - Issues a polling command | 827 | * megasas_issue_polled - Issues a polling command |
756 | * @instance: Adapter soft state | 828 | * @instance: Adapter soft state |
757 | * @cmd: Command packet to be issued | 829 | * @cmd: Command packet to be issued |
758 | * | 830 | * |
759 | * For polling, MFI requires the cmd_status to be set to 0xFF before posting. | 831 | * For polling, MFI requires the cmd_status to be set to 0xFF before posting. |
760 | */ | 832 | */ |
761 | static int | 833 | int |
762 | megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) | 834 | megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) |
763 | { | 835 | { |
764 | int i; | ||
765 | u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; | ||
766 | 836 | ||
767 | struct megasas_header *frame_hdr = &cmd->frame->hdr; | 837 | struct megasas_header *frame_hdr = &cmd->frame->hdr; |
768 | 838 | ||
@@ -772,21 +842,12 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) | |||
772 | /* | 842 | /* |
773 | * Issue the frame using inbound queue port | 843 | * Issue the frame using inbound queue port |
774 | */ | 844 | */ |
775 | instance->instancet->fire_cmd(instance, | 845 | instance->instancet->issue_dcmd(instance, cmd); |
776 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
777 | 846 | ||
778 | /* | 847 | /* |
779 | * Wait for cmd_status to change | 848 | * Wait for cmd_status to change |
780 | */ | 849 | */ |
781 | for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) { | 850 | return wait_and_poll(instance, cmd); |
782 | rmb(); | ||
783 | msleep(1); | ||
784 | } | ||
785 | |||
786 | if (frame_hdr->cmd_status == 0xff) | ||
787 | return -ETIME; | ||
788 | |||
789 | return 0; | ||
790 | } | 851 | } |
791 | 852 | ||
792 | /** | 853 | /** |
@@ -804,8 +865,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, | |||
804 | { | 865 | { |
805 | cmd->cmd_status = ENODATA; | 866 | cmd->cmd_status = ENODATA; |
806 | 867 | ||
807 | instance->instancet->fire_cmd(instance, | 868 | instance->instancet->issue_dcmd(instance, cmd); |
808 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
809 | 869 | ||
810 | wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); | 870 | wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); |
811 | 871 | ||
@@ -849,8 +909,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, | |||
849 | cmd->sync_cmd = 1; | 909 | cmd->sync_cmd = 1; |
850 | cmd->cmd_status = 0xFF; | 910 | cmd->cmd_status = 0xFF; |
851 | 911 | ||
852 | instance->instancet->fire_cmd(instance, | 912 | instance->instancet->issue_dcmd(instance, cmd); |
853 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
854 | 913 | ||
855 | /* | 914 | /* |
856 | * Wait for this cmd to complete | 915 | * Wait for this cmd to complete |
@@ -1242,11 +1301,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1242 | /** | 1301 | /** |
1243 | * megasas_is_ldio - Checks if the cmd is for logical drive | 1302 | * megasas_is_ldio - Checks if the cmd is for logical drive |
1244 | * @scmd: SCSI command | 1303 | * @scmd: SCSI command |
1245 | * | 1304 | * |
1246 | * Called by megasas_queue_command to find out if the command to be queued | 1305 | * Called by megasas_queue_command to find out if the command to be queued |
1247 | * is a logical drive command | 1306 | * is a logical drive command |
1248 | */ | 1307 | */ |
1249 | static inline int megasas_is_ldio(struct scsi_cmnd *cmd) | 1308 | inline int megasas_is_ldio(struct scsi_cmnd *cmd) |
1250 | { | 1309 | { |
1251 | if (!MEGASAS_IS_LOGICAL(cmd)) | 1310 | if (!MEGASAS_IS_LOGICAL(cmd)) |
1252 | return 0; | 1311 | return 0; |
@@ -1328,6 +1387,51 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1328 | printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); | 1387 | printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); |
1329 | } | 1388 | } |
1330 | 1389 | ||
1390 | u32 | ||
1391 | megasas_build_and_issue_cmd(struct megasas_instance *instance, | ||
1392 | struct scsi_cmnd *scmd) | ||
1393 | { | ||
1394 | struct megasas_cmd *cmd; | ||
1395 | u32 frame_count; | ||
1396 | |||
1397 | cmd = megasas_get_cmd(instance); | ||
1398 | if (!cmd) | ||
1399 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1400 | |||
1401 | /* | ||
1402 | * Logical drive command | ||
1403 | */ | ||
1404 | if (megasas_is_ldio(scmd)) | ||
1405 | frame_count = megasas_build_ldio(instance, scmd, cmd); | ||
1406 | else | ||
1407 | frame_count = megasas_build_dcdb(instance, scmd, cmd); | ||
1408 | |||
1409 | if (!frame_count) | ||
1410 | goto out_return_cmd; | ||
1411 | |||
1412 | cmd->scmd = scmd; | ||
1413 | scmd->SCp.ptr = (char *)cmd; | ||
1414 | |||
1415 | /* | ||
1416 | * Issue the command to the FW | ||
1417 | */ | ||
1418 | atomic_inc(&instance->fw_outstanding); | ||
1419 | |||
1420 | instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, | ||
1421 | cmd->frame_count-1, instance->reg_set); | ||
1422 | /* | ||
1423 | * Check if we have pend cmds to be completed | ||
1424 | */ | ||
1425 | if (poll_mode_io && atomic_read(&instance->fw_outstanding)) | ||
1426 | tasklet_schedule(&instance->isr_tasklet); | ||
1427 | |||
1428 | return 0; | ||
1429 | out_return_cmd: | ||
1430 | megasas_return_cmd(instance, cmd); | ||
1431 | return 1; | ||
1432 | } | ||
1433 | |||
1434 | |||
1331 | /** | 1435 | /** |
1332 | * megasas_queue_command - Queue entry point | 1436 | * megasas_queue_command - Queue entry point |
1333 | * @scmd: SCSI command to be queued | 1437 | * @scmd: SCSI command to be queued |
@@ -1336,8 +1440,6 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1336 | static int | 1440 | static int |
1337 | megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) | 1441 | megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) |
1338 | { | 1442 | { |
1339 | u32 frame_count; | ||
1340 | struct megasas_cmd *cmd; | ||
1341 | struct megasas_instance *instance; | 1443 | struct megasas_instance *instance; |
1342 | unsigned long flags; | 1444 | unsigned long flags; |
1343 | 1445 | ||
@@ -1376,42 +1478,13 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd | |||
1376 | break; | 1478 | break; |
1377 | } | 1479 | } |
1378 | 1480 | ||
1379 | cmd = megasas_get_cmd(instance); | 1481 | if (instance->instancet->build_and_issue_cmd(instance, scmd)) { |
1380 | if (!cmd) | 1482 | printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n"); |
1381 | return SCSI_MLQUEUE_HOST_BUSY; | 1483 | return SCSI_MLQUEUE_HOST_BUSY; |
1382 | 1484 | } | |
1383 | /* | ||
1384 | * Logical drive command | ||
1385 | */ | ||
1386 | if (megasas_is_ldio(scmd)) | ||
1387 | frame_count = megasas_build_ldio(instance, scmd, cmd); | ||
1388 | else | ||
1389 | frame_count = megasas_build_dcdb(instance, scmd, cmd); | ||
1390 | |||
1391 | if (!frame_count) | ||
1392 | goto out_return_cmd; | ||
1393 | |||
1394 | cmd->scmd = scmd; | ||
1395 | scmd->SCp.ptr = (char *)cmd; | ||
1396 | |||
1397 | /* | ||
1398 | * Issue the command to the FW | ||
1399 | */ | ||
1400 | atomic_inc(&instance->fw_outstanding); | ||
1401 | |||
1402 | instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, | ||
1403 | cmd->frame_count-1, instance->reg_set); | ||
1404 | /* | ||
1405 | * Check if we have pend cmds to be completed | ||
1406 | */ | ||
1407 | if (poll_mode_io && atomic_read(&instance->fw_outstanding)) | ||
1408 | tasklet_schedule(&instance->isr_tasklet); | ||
1409 | |||
1410 | 1485 | ||
1411 | return 0; | 1486 | return 0; |
1412 | 1487 | ||
1413 | out_return_cmd: | ||
1414 | megasas_return_cmd(instance, cmd); | ||
1415 | out_done: | 1488 | out_done: |
1416 | done(scmd); | 1489 | done(scmd); |
1417 | return 0; | 1490 | return 0; |
@@ -1492,15 +1565,44 @@ static int megasas_slave_alloc(struct scsi_device *sdev) | |||
1492 | return 0; | 1565 | return 0; |
1493 | } | 1566 | } |
1494 | 1567 | ||
1495 | static void megaraid_sas_kill_hba(struct megasas_instance *instance) | 1568 | void megaraid_sas_kill_hba(struct megasas_instance *instance) |
1496 | { | 1569 | { |
1497 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 1570 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
1498 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 1571 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || |
1499 | writel(MFI_STOP_ADP, | 1572 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { |
1500 | &instance->reg_set->reserved_0[0]); | 1573 | writel(MFI_STOP_ADP, &instance->reg_set->doorbell); |
1501 | } else { | 1574 | } else { |
1502 | writel(MFI_STOP_ADP, | 1575 | writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); |
1503 | &instance->reg_set->inbound_doorbell); | 1576 | } |
1577 | } | ||
1578 | |||
1579 | /** | ||
1580 | * megasas_check_and_restore_queue_depth - Check if queue depth needs to be | ||
1581 | * restored to max value | ||
1582 | * @instance: Adapter soft state | ||
1583 | * | ||
1584 | */ | ||
1585 | void | ||
1586 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance) | ||
1587 | { | ||
1588 | unsigned long flags; | ||
1589 | if (instance->flag & MEGASAS_FW_BUSY | ||
1590 | && time_after(jiffies, instance->last_time + 5 * HZ) | ||
1591 | && atomic_read(&instance->fw_outstanding) < 17) { | ||
1592 | |||
1593 | spin_lock_irqsave(instance->host->host_lock, flags); | ||
1594 | instance->flag &= ~MEGASAS_FW_BUSY; | ||
1595 | if ((instance->pdev->device == | ||
1596 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
1597 | (instance->pdev->device == | ||
1598 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
1599 | instance->host->can_queue = | ||
1600 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
1601 | } else | ||
1602 | instance->host->can_queue = | ||
1603 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
1604 | |||
1605 | spin_unlock_irqrestore(instance->host->host_lock, flags); | ||
1504 | } | 1606 | } |
1505 | } | 1607 | } |
1506 | 1608 | ||
@@ -1554,24 +1656,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) | |||
1554 | /* | 1656 | /* |
1555 | * Check if we can restore can_queue | 1657 | * Check if we can restore can_queue |
1556 | */ | 1658 | */ |
1557 | if (instance->flag & MEGASAS_FW_BUSY | 1659 | megasas_check_and_restore_queue_depth(instance); |
1558 | && time_after(jiffies, instance->last_time + 5 * HZ) | ||
1559 | && atomic_read(&instance->fw_outstanding) < 17) { | ||
1560 | |||
1561 | spin_lock_irqsave(instance->host->host_lock, flags); | ||
1562 | instance->flag &= ~MEGASAS_FW_BUSY; | ||
1563 | if ((instance->pdev->device == | ||
1564 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
1565 | (instance->pdev->device == | ||
1566 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | ||
1567 | instance->host->can_queue = | ||
1568 | instance->max_fw_cmds - MEGASAS_SKINNY_INT_CMDS; | ||
1569 | } else | ||
1570 | instance->host->can_queue = | ||
1571 | instance->max_fw_cmds - MEGASAS_INT_CMDS; | ||
1572 | |||
1573 | spin_unlock_irqrestore(instance->host->host_lock, flags); | ||
1574 | } | ||
1575 | } | 1660 | } |
1576 | 1661 | ||
1577 | static void | 1662 | static void |
@@ -1749,7 +1834,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) | |||
1749 | (instance->pdev->device == | 1834 | (instance->pdev->device == |
1750 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 1835 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { |
1751 | writel(MFI_STOP_ADP, | 1836 | writel(MFI_STOP_ADP, |
1752 | &instance->reg_set->reserved_0[0]); | 1837 | &instance->reg_set->doorbell); |
1753 | } else { | 1838 | } else { |
1754 | writel(MFI_STOP_ADP, | 1839 | writel(MFI_STOP_ADP, |
1755 | &instance->reg_set->inbound_doorbell); | 1840 | &instance->reg_set->inbound_doorbell); |
@@ -1853,11 +1938,16 @@ static int megasas_reset_device(struct scsi_cmnd *scmd) | |||
1853 | static int megasas_reset_bus_host(struct scsi_cmnd *scmd) | 1938 | static int megasas_reset_bus_host(struct scsi_cmnd *scmd) |
1854 | { | 1939 | { |
1855 | int ret; | 1940 | int ret; |
1941 | struct megasas_instance *instance; | ||
1942 | instance = (struct megasas_instance *)scmd->device->host->hostdata; | ||
1856 | 1943 | ||
1857 | /* | 1944 | /* |
1858 | * First wait for all commands to complete | 1945 | * First wait for all commands to complete |
1859 | */ | 1946 | */ |
1860 | ret = megasas_generic_reset(scmd); | 1947 | if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) |
1948 | ret = megasas_reset_fusion(scmd->device->host); | ||
1949 | else | ||
1950 | ret = megasas_generic_reset(scmd); | ||
1861 | 1951 | ||
1862 | return ret; | 1952 | return ret; |
1863 | } | 1953 | } |
@@ -2000,8 +2090,8 @@ megasas_complete_int_cmd(struct megasas_instance *instance, | |||
2000 | * @instance: Adapter soft state | 2090 | * @instance: Adapter soft state |
2001 | * @cmd: Cmd that was issued to abort another cmd | 2091 | * @cmd: Cmd that was issued to abort another cmd |
2002 | * | 2092 | * |
2003 | * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q | 2093 | * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q |
2004 | * after it issues an abort on a previously issued command. This function | 2094 | * after it issues an abort on a previously issued command. This function |
2005 | * wakes up all functions waiting on the same wait queue. | 2095 | * wakes up all functions waiting on the same wait queue. |
2006 | */ | 2096 | */ |
2007 | static void | 2097 | static void |
@@ -2021,19 +2111,20 @@ megasas_complete_abort(struct megasas_instance *instance, | |||
2021 | * megasas_complete_cmd - Completes a command | 2111 | * megasas_complete_cmd - Completes a command |
2022 | * @instance: Adapter soft state | 2112 | * @instance: Adapter soft state |
2023 | * @cmd: Command to be completed | 2113 | * @cmd: Command to be completed |
2024 | * @alt_status: If non-zero, use this value as status to | 2114 | * @alt_status: If non-zero, use this value as status to |
2025 | * SCSI mid-layer instead of the value returned | 2115 | * SCSI mid-layer instead of the value returned |
2026 | * by the FW. This should be used if caller wants | 2116 | * by the FW. This should be used if caller wants |
2027 | * an alternate status (as in the case of aborted | 2117 | * an alternate status (as in the case of aborted |
2028 | * commands) | 2118 | * commands) |
2029 | */ | 2119 | */ |
2030 | static void | 2120 | void |
2031 | megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | 2121 | megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, |
2032 | u8 alt_status) | 2122 | u8 alt_status) |
2033 | { | 2123 | { |
2034 | int exception = 0; | 2124 | int exception = 0; |
2035 | struct megasas_header *hdr = &cmd->frame->hdr; | 2125 | struct megasas_header *hdr = &cmd->frame->hdr; |
2036 | unsigned long flags; | 2126 | unsigned long flags; |
2127 | struct fusion_context *fusion = instance->ctrl_context; | ||
2037 | 2128 | ||
2038 | /* flag for the retry reset */ | 2129 | /* flag for the retry reset */ |
2039 | cmd->retry_for_fw_reset = 0; | 2130 | cmd->retry_for_fw_reset = 0; |
@@ -2126,6 +2217,37 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
2126 | case MFI_CMD_SMP: | 2217 | case MFI_CMD_SMP: |
2127 | case MFI_CMD_STP: | 2218 | case MFI_CMD_STP: |
2128 | case MFI_CMD_DCMD: | 2219 | case MFI_CMD_DCMD: |
2220 | /* Check for LD map update */ | ||
2221 | if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && | ||
2222 | (cmd->frame->dcmd.mbox.b[1] == 1)) { | ||
2223 | spin_lock_irqsave(instance->host->host_lock, flags); | ||
2224 | if (cmd->frame->hdr.cmd_status != 0) { | ||
2225 | if (cmd->frame->hdr.cmd_status != | ||
2226 | MFI_STAT_NOT_FOUND) | ||
2227 | printk(KERN_WARNING "megasas: map sync" | ||
2228 | "failed, status = 0x%x.\n", | ||
2229 | cmd->frame->hdr.cmd_status); | ||
2230 | else { | ||
2231 | megasas_return_cmd(instance, cmd); | ||
2232 | spin_unlock_irqrestore( | ||
2233 | instance->host->host_lock, | ||
2234 | flags); | ||
2235 | break; | ||
2236 | } | ||
2237 | } else | ||
2238 | instance->map_id++; | ||
2239 | megasas_return_cmd(instance, cmd); | ||
2240 | if (MR_ValidateMapInfo( | ||
2241 | fusion->ld_map[(instance->map_id & 1)], | ||
2242 | fusion->load_balance_info)) | ||
2243 | fusion->fast_path_io = 1; | ||
2244 | else | ||
2245 | fusion->fast_path_io = 0; | ||
2246 | megasas_sync_map_info(instance); | ||
2247 | spin_unlock_irqrestore(instance->host->host_lock, | ||
2248 | flags); | ||
2249 | break; | ||
2250 | } | ||
2129 | if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || | 2251 | if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || |
2130 | cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { | 2252 | cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { |
2131 | spin_lock_irqsave(&poll_aen_lock, flags); | 2253 | spin_lock_irqsave(&poll_aen_lock, flags); |
@@ -2464,7 +2586,7 @@ static irqreturn_t megasas_isr(int irq, void *devp) | |||
2464 | * states, driver must take steps to bring it to ready state. Otherwise, it | 2586 | * states, driver must take steps to bring it to ready state. Otherwise, it |
2465 | * has to wait for the ready state. | 2587 | * has to wait for the ready state. |
2466 | */ | 2588 | */ |
2467 | static int | 2589 | int |
2468 | megasas_transition_to_ready(struct megasas_instance* instance) | 2590 | megasas_transition_to_ready(struct megasas_instance* instance) |
2469 | { | 2591 | { |
2470 | int i; | 2592 | int i; |
@@ -2476,8 +2598,8 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2476 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; | 2598 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; |
2477 | 2599 | ||
2478 | if (fw_state != MFI_STATE_READY) | 2600 | if (fw_state != MFI_STATE_READY) |
2479 | printk(KERN_INFO "megasas: Waiting for FW to come to ready" | 2601 | printk(KERN_INFO "megasas: Waiting for FW to come to ready" |
2480 | " state\n"); | 2602 | " state\n"); |
2481 | 2603 | ||
2482 | while (fw_state != MFI_STATE_READY) { | 2604 | while (fw_state != MFI_STATE_READY) { |
2483 | 2605 | ||
@@ -2498,11 +2620,12 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2498 | if ((instance->pdev->device == | 2620 | if ((instance->pdev->device == |
2499 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 2621 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
2500 | (instance->pdev->device == | 2622 | (instance->pdev->device == |
2501 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 2623 | PCI_DEVICE_ID_LSI_SAS0071SKINNY) || |
2502 | 2624 | (instance->pdev->device == | |
2625 | PCI_DEVICE_ID_LSI_FUSION)) { | ||
2503 | writel( | 2626 | writel( |
2504 | MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, | 2627 | MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, |
2505 | &instance->reg_set->reserved_0[0]); | 2628 | &instance->reg_set->doorbell); |
2506 | } else { | 2629 | } else { |
2507 | writel( | 2630 | writel( |
2508 | MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, | 2631 | MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, |
@@ -2515,11 +2638,13 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2515 | 2638 | ||
2516 | case MFI_STATE_BOOT_MESSAGE_PENDING: | 2639 | case MFI_STATE_BOOT_MESSAGE_PENDING: |
2517 | if ((instance->pdev->device == | 2640 | if ((instance->pdev->device == |
2518 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 2641 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
2519 | (instance->pdev->device == | 2642 | (instance->pdev->device == |
2520 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 2643 | PCI_DEVICE_ID_LSI_SAS0071SKINNY) || |
2644 | (instance->pdev->device == | ||
2645 | PCI_DEVICE_ID_LSI_FUSION)) { | ||
2521 | writel(MFI_INIT_HOTPLUG, | 2646 | writel(MFI_INIT_HOTPLUG, |
2522 | &instance->reg_set->reserved_0[0]); | 2647 | &instance->reg_set->doorbell); |
2523 | } else | 2648 | } else |
2524 | writel(MFI_INIT_HOTPLUG, | 2649 | writel(MFI_INIT_HOTPLUG, |
2525 | &instance->reg_set->inbound_doorbell); | 2650 | &instance->reg_set->inbound_doorbell); |
@@ -2536,9 +2661,23 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2536 | if ((instance->pdev->device == | 2661 | if ((instance->pdev->device == |
2537 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | 2662 | PCI_DEVICE_ID_LSI_SAS0073SKINNY) || |
2538 | (instance->pdev->device == | 2663 | (instance->pdev->device == |
2539 | PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { | 2664 | PCI_DEVICE_ID_LSI_SAS0071SKINNY) || |
2665 | (instance->pdev->device | ||
2666 | == PCI_DEVICE_ID_LSI_FUSION)) { | ||
2540 | writel(MFI_RESET_FLAGS, | 2667 | writel(MFI_RESET_FLAGS, |
2541 | &instance->reg_set->reserved_0[0]); | 2668 | &instance->reg_set->doorbell); |
2669 | if (instance->pdev->device == | ||
2670 | PCI_DEVICE_ID_LSI_FUSION) { | ||
2671 | for (i = 0; i < (10 * 1000); i += 20) { | ||
2672 | if (readl( | ||
2673 | &instance-> | ||
2674 | reg_set-> | ||
2675 | doorbell) & 1) | ||
2676 | msleep(20); | ||
2677 | else | ||
2678 | break; | ||
2679 | } | ||
2680 | } | ||
2542 | } else | 2681 | } else |
2543 | writel(MFI_RESET_FLAGS, | 2682 | writel(MFI_RESET_FLAGS, |
2544 | &instance->reg_set->inbound_doorbell); | 2683 | &instance->reg_set->inbound_doorbell); |
@@ -2590,7 +2729,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2590 | * The cur_state should not last for more than max_wait secs | 2729 | * The cur_state should not last for more than max_wait secs |
2591 | */ | 2730 | */ |
2592 | for (i = 0; i < (max_wait * 1000); i++) { | 2731 | for (i = 0; i < (max_wait * 1000); i++) { |
2593 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & | 2732 | fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & |
2594 | MFI_STATE_MASK ; | 2733 | MFI_STATE_MASK ; |
2595 | curr_abs_state = | 2734 | curr_abs_state = |
2596 | instance->instancet->read_fw_status_reg(instance->reg_set); | 2735 | instance->instancet->read_fw_status_reg(instance->reg_set); |
@@ -2610,7 +2749,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2610 | return -ENODEV; | 2749 | return -ENODEV; |
2611 | } | 2750 | } |
2612 | } | 2751 | } |
2613 | printk(KERN_INFO "megasas: FW now in Ready state\n"); | 2752 | printk(KERN_INFO "megasas: FW now in Ready state\n"); |
2614 | 2753 | ||
2615 | return 0; | 2754 | return 0; |
2616 | } | 2755 | } |
@@ -2622,7 +2761,7 @@ megasas_transition_to_ready(struct megasas_instance* instance) | |||
2622 | static void megasas_teardown_frame_pool(struct megasas_instance *instance) | 2761 | static void megasas_teardown_frame_pool(struct megasas_instance *instance) |
2623 | { | 2762 | { |
2624 | int i; | 2763 | int i; |
2625 | u32 max_cmd = instance->max_fw_cmds; | 2764 | u32 max_cmd = instance->max_mfi_cmds; |
2626 | struct megasas_cmd *cmd; | 2765 | struct megasas_cmd *cmd; |
2627 | 2766 | ||
2628 | if (!instance->frame_dma_pool) | 2767 | if (!instance->frame_dma_pool) |
@@ -2673,7 +2812,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
2673 | u32 frame_count; | 2812 | u32 frame_count; |
2674 | struct megasas_cmd *cmd; | 2813 | struct megasas_cmd *cmd; |
2675 | 2814 | ||
2676 | max_cmd = instance->max_fw_cmds; | 2815 | max_cmd = instance->max_mfi_cmds; |
2677 | 2816 | ||
2678 | /* | 2817 | /* |
2679 | * Size of our frame is 64 bytes for MFI frame, followed by max SG | 2818 | * Size of our frame is 64 bytes for MFI frame, followed by max SG |
@@ -2760,14 +2899,15 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
2760 | * megasas_free_cmds - Free all the cmds in the free cmd pool | 2899 | * megasas_free_cmds - Free all the cmds in the free cmd pool |
2761 | * @instance: Adapter soft state | 2900 | * @instance: Adapter soft state |
2762 | */ | 2901 | */ |
2763 | static void megasas_free_cmds(struct megasas_instance *instance) | 2902 | void megasas_free_cmds(struct megasas_instance *instance) |
2764 | { | 2903 | { |
2765 | int i; | 2904 | int i; |
2766 | /* First free the MFI frame pool */ | 2905 | /* First free the MFI frame pool */ |
2767 | megasas_teardown_frame_pool(instance); | 2906 | megasas_teardown_frame_pool(instance); |
2768 | 2907 | ||
2769 | /* Free all the commands in the cmd_list */ | 2908 | /* Free all the commands in the cmd_list */ |
2770 | for (i = 0; i < instance->max_fw_cmds; i++) | 2909 | for (i = 0; i < instance->max_mfi_cmds; i++) |
2910 | |||
2771 | kfree(instance->cmd_list[i]); | 2911 | kfree(instance->cmd_list[i]); |
2772 | 2912 | ||
2773 | /* Free the cmd_list buffer itself */ | 2913 | /* Free the cmd_list buffer itself */ |
@@ -2795,14 +2935,14 @@ static void megasas_free_cmds(struct megasas_instance *instance) | |||
2795 | * This array is used only to look up the megasas_cmd given the context. The | 2935 | * This array is used only to look up the megasas_cmd given the context. The |
2796 | * free commands themselves are maintained in a linked list called cmd_pool. | 2936 | * free commands themselves are maintained in a linked list called cmd_pool. |
2797 | */ | 2937 | */ |
2798 | static int megasas_alloc_cmds(struct megasas_instance *instance) | 2938 | int megasas_alloc_cmds(struct megasas_instance *instance) |
2799 | { | 2939 | { |
2800 | int i; | 2940 | int i; |
2801 | int j; | 2941 | int j; |
2802 | u32 max_cmd; | 2942 | u32 max_cmd; |
2803 | struct megasas_cmd *cmd; | 2943 | struct megasas_cmd *cmd; |
2804 | 2944 | ||
2805 | max_cmd = instance->max_fw_cmds; | 2945 | max_cmd = instance->max_mfi_cmds; |
2806 | 2946 | ||
2807 | /* | 2947 | /* |
2808 | * instance->cmd_list is an array of struct megasas_cmd pointers. | 2948 | * instance->cmd_list is an array of struct megasas_cmd pointers. |
@@ -2816,6 +2956,7 @@ static int megasas_alloc_cmds(struct megasas_instance *instance) | |||
2816 | return -ENOMEM; | 2956 | return -ENOMEM; |
2817 | } | 2957 | } |
2818 | 2958 | ||
2959 | memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd); | ||
2819 | 2960 | ||
2820 | for (i = 0; i < max_cmd; i++) { | 2961 | for (i = 0; i < max_cmd; i++) { |
2821 | instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), | 2962 | instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), |
@@ -3210,76 +3351,15 @@ megasas_io_completion_timer(unsigned long instance_addr) | |||
3210 | jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL); | 3351 | jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL); |
3211 | } | 3352 | } |
3212 | 3353 | ||
3213 | /** | 3354 | static u32 |
3214 | * megasas_init_mfi - Initializes the FW | 3355 | megasas_init_adapter_mfi(struct megasas_instance *instance) |
3215 | * @instance: Adapter soft state | ||
3216 | * | ||
3217 | * This is the main function for initializing MFI firmware. | ||
3218 | */ | ||
3219 | static int megasas_init_mfi(struct megasas_instance *instance) | ||
3220 | { | 3356 | { |
3357 | struct megasas_register_set __iomem *reg_set; | ||
3221 | u32 context_sz; | 3358 | u32 context_sz; |
3222 | u32 reply_q_sz; | 3359 | u32 reply_q_sz; |
3223 | u32 max_sectors_1; | ||
3224 | u32 max_sectors_2; | ||
3225 | u32 tmp_sectors; | ||
3226 | struct megasas_register_set __iomem *reg_set; | ||
3227 | struct megasas_ctrl_info *ctrl_info; | ||
3228 | /* | ||
3229 | * Map the message registers | ||
3230 | */ | ||
3231 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1078GEN2) || | ||
3232 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || | ||
3233 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || | ||
3234 | (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0079GEN2)) { | ||
3235 | instance->base_addr = pci_resource_start(instance->pdev, 1); | ||
3236 | } else { | ||
3237 | instance->base_addr = pci_resource_start(instance->pdev, 0); | ||
3238 | } | ||
3239 | |||
3240 | if (pci_request_selected_regions(instance->pdev, | ||
3241 | pci_select_bars(instance->pdev, IORESOURCE_MEM), | ||
3242 | "megasas: LSI")) { | ||
3243 | printk(KERN_DEBUG "megasas: IO memory region busy!\n"); | ||
3244 | return -EBUSY; | ||
3245 | } | ||
3246 | |||
3247 | instance->reg_set = ioremap_nocache(instance->base_addr, 8192); | ||
3248 | |||
3249 | if (!instance->reg_set) { | ||
3250 | printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); | ||
3251 | goto fail_ioremap; | ||
3252 | } | ||
3253 | 3360 | ||
3254 | reg_set = instance->reg_set; | 3361 | reg_set = instance->reg_set; |
3255 | 3362 | ||
3256 | switch(instance->pdev->device) | ||
3257 | { | ||
3258 | case PCI_DEVICE_ID_LSI_SAS1078R: | ||
3259 | case PCI_DEVICE_ID_LSI_SAS1078DE: | ||
3260 | instance->instancet = &megasas_instance_template_ppc; | ||
3261 | break; | ||
3262 | case PCI_DEVICE_ID_LSI_SAS1078GEN2: | ||
3263 | case PCI_DEVICE_ID_LSI_SAS0079GEN2: | ||
3264 | instance->instancet = &megasas_instance_template_gen2; | ||
3265 | break; | ||
3266 | case PCI_DEVICE_ID_LSI_SAS0073SKINNY: | ||
3267 | case PCI_DEVICE_ID_LSI_SAS0071SKINNY: | ||
3268 | instance->instancet = &megasas_instance_template_skinny; | ||
3269 | break; | ||
3270 | case PCI_DEVICE_ID_LSI_SAS1064R: | ||
3271 | case PCI_DEVICE_ID_DELL_PERC5: | ||
3272 | default: | ||
3273 | instance->instancet = &megasas_instance_template_xscale; | ||
3274 | break; | ||
3275 | } | ||
3276 | |||
3277 | /* | ||
3278 | * We expect the FW state to be READY | ||
3279 | */ | ||
3280 | if (megasas_transition_to_ready(instance)) | ||
3281 | goto fail_ready_state; | ||
3282 | |||
3283 | /* | 3363 | /* |
3284 | * Get various operational parameters from status register | 3364 | * Get various operational parameters from status register |
3285 | */ | 3365 | */ |
@@ -3290,7 +3370,8 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
3290 | * does not exceed max cmds that the FW can support | 3370 | * does not exceed max cmds that the FW can support |
3291 | */ | 3371 | */ |
3292 | instance->max_fw_cmds = instance->max_fw_cmds-1; | 3372 | instance->max_fw_cmds = instance->max_fw_cmds-1; |
3293 | instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> | 3373 | instance->max_mfi_cmds = instance->max_fw_cmds; |
3374 | instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >> | ||
3294 | 0x10; | 3375 | 0x10; |
3295 | /* | 3376 | /* |
3296 | * Create a pool of commands | 3377 | * Create a pool of commands |
@@ -3333,6 +3414,90 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
3333 | if (instance->fw_support_ieee) | 3414 | if (instance->fw_support_ieee) |
3334 | instance->flag_ieee = 1; | 3415 | instance->flag_ieee = 1; |
3335 | 3416 | ||
3417 | return 0; | ||
3418 | |||
3419 | fail_fw_init: | ||
3420 | |||
3421 | pci_free_consistent(instance->pdev, reply_q_sz, | ||
3422 | instance->reply_queue, instance->reply_queue_h); | ||
3423 | fail_reply_queue: | ||
3424 | megasas_free_cmds(instance); | ||
3425 | |||
3426 | fail_alloc_cmds: | ||
3427 | iounmap(instance->reg_set); | ||
3428 | return 1; | ||
3429 | } | ||
3430 | |||
3431 | /** | ||
3432 | * megasas_init_fw - Initializes the FW | ||
3433 | * @instance: Adapter soft state | ||
3434 | * | ||
3435 | * This is the main function for initializing firmware | ||
3436 | */ | ||
3437 | |||
3438 | static int megasas_init_fw(struct megasas_instance *instance) | ||
3439 | { | ||
3440 | u32 max_sectors_1; | ||
3441 | u32 max_sectors_2; | ||
3442 | u32 tmp_sectors; | ||
3443 | struct megasas_register_set __iomem *reg_set; | ||
3444 | struct megasas_ctrl_info *ctrl_info; | ||
3445 | unsigned long bar_list; | ||
3446 | |||
3447 | /* Find first memory bar */ | ||
3448 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); | ||
3449 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); | ||
3450 | instance->base_addr = pci_resource_start(instance->pdev, instance->bar); | ||
3451 | if (pci_request_selected_regions(instance->pdev, instance->bar, | ||
3452 | "megasas: LSI")) { | ||
3453 | printk(KERN_DEBUG "megasas: IO memory region busy!\n"); | ||
3454 | return -EBUSY; | ||
3455 | } | ||
3456 | |||
3457 | instance->reg_set = ioremap_nocache(instance->base_addr, 8192); | ||
3458 | |||
3459 | if (!instance->reg_set) { | ||
3460 | printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); | ||
3461 | goto fail_ioremap; | ||
3462 | } | ||
3463 | |||
3464 | reg_set = instance->reg_set; | ||
3465 | |||
3466 | switch (instance->pdev->device) { | ||
3467 | case PCI_DEVICE_ID_LSI_FUSION: | ||
3468 | instance->instancet = &megasas_instance_template_fusion; | ||
3469 | break; | ||
3470 | case PCI_DEVICE_ID_LSI_SAS1078R: | ||
3471 | case PCI_DEVICE_ID_LSI_SAS1078DE: | ||
3472 | instance->instancet = &megasas_instance_template_ppc; | ||
3473 | break; | ||
3474 | case PCI_DEVICE_ID_LSI_SAS1078GEN2: | ||
3475 | case PCI_DEVICE_ID_LSI_SAS0079GEN2: | ||
3476 | instance->instancet = &megasas_instance_template_gen2; | ||
3477 | break; | ||
3478 | case PCI_DEVICE_ID_LSI_SAS0073SKINNY: | ||
3479 | case PCI_DEVICE_ID_LSI_SAS0071SKINNY: | ||
3480 | instance->instancet = &megasas_instance_template_skinny; | ||
3481 | break; | ||
3482 | case PCI_DEVICE_ID_LSI_SAS1064R: | ||
3483 | case PCI_DEVICE_ID_DELL_PERC5: | ||
3484 | default: | ||
3485 | instance->instancet = &megasas_instance_template_xscale; | ||
3486 | break; | ||
3487 | } | ||
3488 | |||
3489 | /* | ||
3490 | * We expect the FW state to be READY | ||
3491 | */ | ||
3492 | if (megasas_transition_to_ready(instance)) | ||
3493 | goto fail_ready_state; | ||
3494 | |||
3495 | /* Get operational params, sge flags, send init cmd to controller */ | ||
3496 | if (instance->instancet->init_adapter(instance)) | ||
3497 | return -ENODEV; | ||
3498 | |||
3499 | printk(KERN_ERR "megasas: INIT adapter done\n"); | ||
3500 | |||
3336 | /** for passthrough | 3501 | /** for passthrough |
3337 | * the following function will get the PD LIST. | 3502 | * the following function will get the PD LIST. |
3338 | */ | 3503 | */ |
@@ -3388,20 +3553,11 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
3388 | MEGASAS_COMPLETION_TIMER_INTERVAL); | 3553 | MEGASAS_COMPLETION_TIMER_INTERVAL); |
3389 | return 0; | 3554 | return 0; |
3390 | 3555 | ||
3391 | fail_fw_init: | 3556 | fail_ready_state: |
3392 | |||
3393 | pci_free_consistent(instance->pdev, reply_q_sz, | ||
3394 | instance->reply_queue, instance->reply_queue_h); | ||
3395 | fail_reply_queue: | ||
3396 | megasas_free_cmds(instance); | ||
3397 | |||
3398 | fail_alloc_cmds: | ||
3399 | fail_ready_state: | ||
3400 | iounmap(instance->reg_set); | 3557 | iounmap(instance->reg_set); |
3401 | 3558 | ||
3402 | fail_ioremap: | 3559 | fail_ioremap: |
3403 | pci_release_selected_regions(instance->pdev, | 3560 | pci_release_selected_regions(instance->pdev, instance->bar); |
3404 | pci_select_bars(instance->pdev, IORESOURCE_MEM)); | ||
3405 | 3561 | ||
3406 | return -EINVAL; | 3562 | return -EINVAL; |
3407 | } | 3563 | } |
@@ -3412,17 +3568,17 @@ static int megasas_init_mfi(struct megasas_instance *instance) | |||
3412 | */ | 3568 | */ |
3413 | static void megasas_release_mfi(struct megasas_instance *instance) | 3569 | static void megasas_release_mfi(struct megasas_instance *instance) |
3414 | { | 3570 | { |
3415 | u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1); | 3571 | u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1); |
3416 | 3572 | ||
3417 | pci_free_consistent(instance->pdev, reply_q_sz, | 3573 | if (instance->reply_queue) |
3574 | pci_free_consistent(instance->pdev, reply_q_sz, | ||
3418 | instance->reply_queue, instance->reply_queue_h); | 3575 | instance->reply_queue, instance->reply_queue_h); |
3419 | 3576 | ||
3420 | megasas_free_cmds(instance); | 3577 | megasas_free_cmds(instance); |
3421 | 3578 | ||
3422 | iounmap(instance->reg_set); | 3579 | iounmap(instance->reg_set); |
3423 | 3580 | ||
3424 | pci_release_selected_regions(instance->pdev, | 3581 | pci_release_selected_regions(instance->pdev, instance->bar); |
3425 | pci_select_bars(instance->pdev, IORESOURCE_MEM)); | ||
3426 | } | 3582 | } |
3427 | 3583 | ||
3428 | /** | 3584 | /** |
@@ -3609,8 +3765,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
3609 | /* | 3765 | /* |
3610 | * Issue the aen registration frame | 3766 | * Issue the aen registration frame |
3611 | */ | 3767 | */ |
3612 | instance->instancet->fire_cmd(instance, | 3768 | instance->instancet->issue_dcmd(instance, cmd); |
3613 | cmd->frame_phys_addr, 0, instance->reg_set); | ||
3614 | 3769 | ||
3615 | return 0; | 3770 | return 0; |
3616 | } | 3771 | } |
@@ -3687,12 +3842,18 @@ static int megasas_io_attach(struct megasas_instance *instance) | |||
3687 | } | 3842 | } |
3688 | 3843 | ||
3689 | host->max_sectors = instance->max_sectors_per_req; | 3844 | host->max_sectors = instance->max_sectors_per_req; |
3690 | host->cmd_per_lun = 128; | 3845 | host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN; |
3691 | host->max_channel = MEGASAS_MAX_CHANNELS - 1; | 3846 | host->max_channel = MEGASAS_MAX_CHANNELS - 1; |
3692 | host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; | 3847 | host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; |
3693 | host->max_lun = MEGASAS_MAX_LUN; | 3848 | host->max_lun = MEGASAS_MAX_LUN; |
3694 | host->max_cmd_len = 16; | 3849 | host->max_cmd_len = 16; |
3695 | 3850 | ||
3851 | /* Fusion only supports host reset */ | ||
3852 | if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { | ||
3853 | host->hostt->eh_device_reset_handler = NULL; | ||
3854 | host->hostt->eh_bus_reset_handler = NULL; | ||
3855 | } | ||
3856 | |||
3696 | /* | 3857 | /* |
3697 | * Notify the mid-layer about the new controller | 3858 | * Notify the mid-layer about the new controller |
3698 | */ | 3859 | */ |
@@ -3733,7 +3894,7 @@ fail_set_dma_mask: | |||
3733 | /** | 3894 | /** |
3734 | * megasas_probe_one - PCI hotplug entry point | 3895 | * megasas_probe_one - PCI hotplug entry point |
3735 | * @pdev: PCI device structure | 3896 | * @pdev: PCI device structure |
3736 | * @id: PCI ids of supported hotplugged adapter | 3897 | * @id: PCI ids of supported hotplugged adapter |
3737 | */ | 3898 | */ |
3738 | static int __devinit | 3899 | static int __devinit |
3739 | megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | 3900 | megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
@@ -3777,20 +3938,45 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3777 | instance = (struct megasas_instance *)host->hostdata; | 3938 | instance = (struct megasas_instance *)host->hostdata; |
3778 | memset(instance, 0, sizeof(*instance)); | 3939 | memset(instance, 0, sizeof(*instance)); |
3779 | atomic_set( &instance->fw_reset_no_pci_access, 0 ); | 3940 | atomic_set( &instance->fw_reset_no_pci_access, 0 ); |
3941 | instance->pdev = pdev; | ||
3780 | 3942 | ||
3781 | instance->producer = pci_alloc_consistent(pdev, sizeof(u32), | 3943 | switch (instance->pdev->device) { |
3782 | &instance->producer_h); | 3944 | case PCI_DEVICE_ID_LSI_FUSION: |
3783 | instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), | 3945 | { |
3784 | &instance->consumer_h); | 3946 | struct fusion_context *fusion; |
3947 | |||
3948 | instance->ctrl_context = | ||
3949 | kzalloc(sizeof(struct fusion_context), GFP_KERNEL); | ||
3950 | if (!instance->ctrl_context) { | ||
3951 | printk(KERN_DEBUG "megasas: Failed to allocate " | ||
3952 | "memory for Fusion context info\n"); | ||
3953 | goto fail_alloc_dma_buf; | ||
3954 | } | ||
3955 | fusion = instance->ctrl_context; | ||
3956 | INIT_LIST_HEAD(&fusion->cmd_pool); | ||
3957 | spin_lock_init(&fusion->cmd_pool_lock); | ||
3958 | } | ||
3959 | break; | ||
3960 | default: /* For all other supported controllers */ | ||
3961 | |||
3962 | instance->producer = | ||
3963 | pci_alloc_consistent(pdev, sizeof(u32), | ||
3964 | &instance->producer_h); | ||
3965 | instance->consumer = | ||
3966 | pci_alloc_consistent(pdev, sizeof(u32), | ||
3967 | &instance->consumer_h); | ||
3968 | |||
3969 | if (!instance->producer || !instance->consumer) { | ||
3970 | printk(KERN_DEBUG "megasas: Failed to allocate" | ||
3971 | "memory for producer, consumer\n"); | ||
3972 | goto fail_alloc_dma_buf; | ||
3973 | } | ||
3785 | 3974 | ||
3786 | if (!instance->producer || !instance->consumer) { | 3975 | *instance->producer = 0; |
3787 | printk(KERN_DEBUG "megasas: Failed to allocate memory for " | 3976 | *instance->consumer = 0; |
3788 | "producer, consumer\n"); | 3977 | break; |
3789 | goto fail_alloc_dma_buf; | ||
3790 | } | 3978 | } |
3791 | 3979 | ||
3792 | *instance->producer = 0; | ||
3793 | *instance->consumer = 0; | ||
3794 | megasas_poll_wait_aen = 0; | 3980 | megasas_poll_wait_aen = 0; |
3795 | instance->flag_ieee = 0; | 3981 | instance->flag_ieee = 0; |
3796 | instance->ev = NULL; | 3982 | instance->ev = NULL; |
@@ -3826,11 +4012,11 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3826 | spin_lock_init(&poll_aen_lock); | 4012 | spin_lock_init(&poll_aen_lock); |
3827 | 4013 | ||
3828 | mutex_init(&instance->aen_mutex); | 4014 | mutex_init(&instance->aen_mutex); |
4015 | mutex_init(&instance->reset_mutex); | ||
3829 | 4016 | ||
3830 | /* | 4017 | /* |
3831 | * Initialize PCI related and misc parameters | 4018 | * Initialize PCI related and misc parameters |
3832 | */ | 4019 | */ |
3833 | instance->pdev = pdev; | ||
3834 | instance->host = host; | 4020 | instance->host = host; |
3835 | instance->unique_id = pdev->bus->number << 8 | pdev->devfn; | 4021 | instance->unique_id = pdev->bus->number << 8 | pdev->devfn; |
3836 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; | 4022 | instance->init_id = MEGASAS_DEFAULT_INIT_ID; |
@@ -3848,18 +4034,31 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3848 | instance->last_time = 0; | 4034 | instance->last_time = 0; |
3849 | instance->disableOnlineCtrlReset = 1; | 4035 | instance->disableOnlineCtrlReset = 1; |
3850 | 4036 | ||
3851 | INIT_WORK(&instance->work_init, process_fw_state_change_wq); | 4037 | if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) |
4038 | INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); | ||
4039 | else | ||
4040 | INIT_WORK(&instance->work_init, process_fw_state_change_wq); | ||
3852 | 4041 | ||
3853 | /* | 4042 | /* |
3854 | * Initialize MFI Firmware | 4043 | * Initialize MFI Firmware |
3855 | */ | 4044 | */ |
3856 | if (megasas_init_mfi(instance)) | 4045 | if (megasas_init_fw(instance)) |
3857 | goto fail_init_mfi; | 4046 | goto fail_init_mfi; |
3858 | 4047 | ||
4048 | /* Try to enable MSI-X */ | ||
4049 | if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) && | ||
4050 | (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) && | ||
4051 | (instance->pdev->device != PCI_DEVICE_ID_LSI_VERDE_ZCR) && | ||
4052 | !msix_disable && !pci_enable_msix(instance->pdev, | ||
4053 | &instance->msixentry, 1)) | ||
4054 | instance->msi_flag = 1; | ||
4055 | |||
3859 | /* | 4056 | /* |
3860 | * Register IRQ | 4057 | * Register IRQ |
3861 | */ | 4058 | */ |
3862 | if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, "megasas", instance)) { | 4059 | if (request_irq(instance->msi_flag ? instance->msixentry.vector : |
4060 | pdev->irq, instance->instancet->service_isr, | ||
4061 | IRQF_SHARED, "megasas", instance)) { | ||
3863 | printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); | 4062 | printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); |
3864 | goto fail_irq; | 4063 | goto fail_irq; |
3865 | } | 4064 | } |
@@ -3904,9 +4103,10 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3904 | 4103 | ||
3905 | pci_set_drvdata(pdev, NULL); | 4104 | pci_set_drvdata(pdev, NULL); |
3906 | instance->instancet->disable_intr(instance->reg_set); | 4105 | instance->instancet->disable_intr(instance->reg_set); |
3907 | free_irq(instance->pdev->irq, instance); | 4106 | free_irq(instance->msi_flag ? instance->msixentry.vector : |
3908 | 4107 | instance->pdev->irq, instance); | |
3909 | megasas_release_mfi(instance); | 4108 | if (instance->msi_flag) |
4109 | pci_disable_msix(instance->pdev); | ||
3910 | 4110 | ||
3911 | fail_irq: | 4111 | fail_irq: |
3912 | fail_init_mfi: | 4112 | fail_init_mfi: |
@@ -3916,9 +4116,13 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3916 | instance->evt_detail, | 4116 | instance->evt_detail, |
3917 | instance->evt_detail_h); | 4117 | instance->evt_detail_h); |
3918 | 4118 | ||
3919 | if (instance->producer) | 4119 | if (instance->producer) { |
3920 | pci_free_consistent(pdev, sizeof(u32), instance->producer, | 4120 | pci_free_consistent(pdev, sizeof(u32), instance->producer, |
3921 | instance->producer_h); | 4121 | instance->producer_h); |
4122 | megasas_release_mfi(instance); | ||
4123 | } else { | ||
4124 | megasas_release_fusion(instance); | ||
4125 | } | ||
3922 | if (instance->consumer) | 4126 | if (instance->consumer) |
3923 | pci_free_consistent(pdev, sizeof(u32), instance->consumer, | 4127 | pci_free_consistent(pdev, sizeof(u32), instance->consumer, |
3924 | instance->consumer_h); | 4128 | instance->consumer_h); |
@@ -3990,7 +4194,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, | |||
3990 | 4194 | ||
3991 | if (instance->aen_cmd) | 4195 | if (instance->aen_cmd) |
3992 | megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); | 4196 | megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); |
3993 | 4197 | if (instance->map_update_cmd) | |
4198 | megasas_issue_blocked_abort_cmd(instance, | ||
4199 | instance->map_update_cmd); | ||
3994 | dcmd = &cmd->frame->dcmd; | 4200 | dcmd = &cmd->frame->dcmd; |
3995 | 4201 | ||
3996 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | 4202 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); |
@@ -4046,7 +4252,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) | |||
4046 | 4252 | ||
4047 | pci_set_drvdata(instance->pdev, instance); | 4253 | pci_set_drvdata(instance->pdev, instance); |
4048 | instance->instancet->disable_intr(instance->reg_set); | 4254 | instance->instancet->disable_intr(instance->reg_set); |
4049 | free_irq(instance->pdev->irq, instance); | 4255 | free_irq(instance->msi_flag ? instance->msixentry.vector : |
4256 | instance->pdev->irq, instance); | ||
4257 | if (instance->msi_flag) | ||
4258 | pci_disable_msix(instance->pdev); | ||
4050 | 4259 | ||
4051 | pci_save_state(pdev); | 4260 | pci_save_state(pdev); |
4052 | pci_disable_device(pdev); | 4261 | pci_disable_device(pdev); |
@@ -4092,9 +4301,6 @@ megasas_resume(struct pci_dev *pdev) | |||
4092 | * Initialize MFI Firmware | 4301 | * Initialize MFI Firmware |
4093 | */ | 4302 | */ |
4094 | 4303 | ||
4095 | *instance->producer = 0; | ||
4096 | *instance->consumer = 0; | ||
4097 | |||
4098 | atomic_set(&instance->fw_outstanding, 0); | 4304 | atomic_set(&instance->fw_outstanding, 0); |
4099 | 4305 | ||
4100 | /* | 4306 | /* |
@@ -4103,17 +4309,40 @@ megasas_resume(struct pci_dev *pdev) | |||
4103 | if (megasas_transition_to_ready(instance)) | 4309 | if (megasas_transition_to_ready(instance)) |
4104 | goto fail_ready_state; | 4310 | goto fail_ready_state; |
4105 | 4311 | ||
4106 | if (megasas_issue_init_mfi(instance)) | 4312 | switch (instance->pdev->device) { |
4107 | goto fail_init_mfi; | 4313 | case PCI_DEVICE_ID_LSI_FUSION: |
4314 | { | ||
4315 | megasas_reset_reply_desc(instance); | ||
4316 | if (megasas_ioc_init_fusion(instance)) { | ||
4317 | megasas_free_cmds(instance); | ||
4318 | megasas_free_cmds_fusion(instance); | ||
4319 | goto fail_init_mfi; | ||
4320 | } | ||
4321 | if (!megasas_get_map_info(instance)) | ||
4322 | megasas_sync_map_info(instance); | ||
4323 | } | ||
4324 | break; | ||
4325 | default: | ||
4326 | *instance->producer = 0; | ||
4327 | *instance->consumer = 0; | ||
4328 | if (megasas_issue_init_mfi(instance)) | ||
4329 | goto fail_init_mfi; | ||
4330 | break; | ||
4331 | } | ||
4108 | 4332 | ||
4109 | tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, | 4333 | tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, |
4110 | (unsigned long)instance); | 4334 | (unsigned long)instance); |
4335 | |||
4336 | /* Now re-enable MSI-X */ | ||
4337 | if (instance->msi_flag) | ||
4338 | pci_enable_msix(instance->pdev, &instance->msixentry, 1); | ||
4111 | 4339 | ||
4112 | /* | 4340 | /* |
4113 | * Register IRQ | 4341 | * Register IRQ |
4114 | */ | 4342 | */ |
4115 | if (request_irq(pdev->irq, megasas_isr, IRQF_SHARED, | 4343 | if (request_irq(instance->msi_flag ? instance->msixentry.vector : |
4116 | "megasas", instance)) { | 4344 | pdev->irq, instance->instancet->service_isr, |
4345 | IRQF_SHARED, "megasas", instance)) { | ||
4117 | printk(KERN_ERR "megasas: Failed to register IRQ\n"); | 4346 | printk(KERN_ERR "megasas: Failed to register IRQ\n"); |
4118 | goto fail_irq; | 4347 | goto fail_irq; |
4119 | } | 4348 | } |
@@ -4171,10 +4400,12 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) | |||
4171 | int i; | 4400 | int i; |
4172 | struct Scsi_Host *host; | 4401 | struct Scsi_Host *host; |
4173 | struct megasas_instance *instance; | 4402 | struct megasas_instance *instance; |
4403 | struct fusion_context *fusion; | ||
4174 | 4404 | ||
4175 | instance = pci_get_drvdata(pdev); | 4405 | instance = pci_get_drvdata(pdev); |
4176 | instance->unload = 1; | 4406 | instance->unload = 1; |
4177 | host = instance->host; | 4407 | host = instance->host; |
4408 | fusion = instance->ctrl_context; | ||
4178 | 4409 | ||
4179 | if (poll_mode_io) | 4410 | if (poll_mode_io) |
4180 | del_timer_sync(&instance->io_completion_timer); | 4411 | del_timer_sync(&instance->io_completion_timer); |
@@ -4211,18 +4442,37 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) | |||
4211 | 4442 | ||
4212 | instance->instancet->disable_intr(instance->reg_set); | 4443 | instance->instancet->disable_intr(instance->reg_set); |
4213 | 4444 | ||
4214 | free_irq(instance->pdev->irq, instance); | 4445 | free_irq(instance->msi_flag ? instance->msixentry.vector : |
4215 | 4446 | instance->pdev->irq, instance); | |
4216 | megasas_release_mfi(instance); | 4447 | if (instance->msi_flag) |
4217 | 4448 | pci_disable_msix(instance->pdev); | |
4218 | pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), | 4449 | |
4219 | instance->evt_detail, instance->evt_detail_h); | 4450 | switch (instance->pdev->device) { |
4220 | 4451 | case PCI_DEVICE_ID_LSI_FUSION: | |
4221 | pci_free_consistent(pdev, sizeof(u32), instance->producer, | 4452 | megasas_release_fusion(instance); |
4222 | instance->producer_h); | 4453 | for (i = 0; i < 2 ; i++) |
4223 | 4454 | if (fusion->ld_map[i]) | |
4224 | pci_free_consistent(pdev, sizeof(u32), instance->consumer, | 4455 | dma_free_coherent(&instance->pdev->dev, |
4225 | instance->consumer_h); | 4456 | fusion->map_sz, |
4457 | fusion->ld_map[i], | ||
4458 | fusion-> | ||
4459 | ld_map_phys[i]); | ||
4460 | kfree(instance->ctrl_context); | ||
4461 | break; | ||
4462 | default: | ||
4463 | megasas_release_mfi(instance); | ||
4464 | pci_free_consistent(pdev, | ||
4465 | sizeof(struct megasas_evt_detail), | ||
4466 | instance->evt_detail, | ||
4467 | instance->evt_detail_h); | ||
4468 | pci_free_consistent(pdev, sizeof(u32), | ||
4469 | instance->producer, | ||
4470 | instance->producer_h); | ||
4471 | pci_free_consistent(pdev, sizeof(u32), | ||
4472 | instance->consumer, | ||
4473 | instance->consumer_h); | ||
4474 | break; | ||
4475 | } | ||
4226 | 4476 | ||
4227 | scsi_host_put(host); | 4477 | scsi_host_put(host); |
4228 | 4478 | ||
@@ -4986,6 +5236,7 @@ megasas_aen_polling(struct work_struct *work) | |||
4986 | break; | 5236 | break; |
4987 | case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: | 5237 | case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: |
4988 | case MR_EVT_FOREIGN_CFG_IMPORTED: | 5238 | case MR_EVT_FOREIGN_CFG_IMPORTED: |
5239 | case MR_EVT_LD_STATE_CHANGE: | ||
4989 | doscan = 1; | 5240 | doscan = 1; |
4990 | break; | 5241 | break; |
4991 | default: | 5242 | default: |
@@ -5165,7 +5416,7 @@ err_dcf_attr_ver: | |||
5165 | pci_unregister_driver(&megasas_pci_driver); | 5416 | pci_unregister_driver(&megasas_pci_driver); |
5166 | err_pcidrv: | 5417 | err_pcidrv: |
5167 | unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); | 5418 | unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); |
5168 | return rval; | 5419 | return rval; |
5169 | } | 5420 | } |
5170 | 5421 | ||
5171 | /** | 5422 | /** |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c new file mode 100644 index 000000000000..53fa96ae2b3e --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * Linux MegaRAID driver for SAS based RAID controllers | ||
3 | * | ||
4 | * Copyright (c) 2009-2011 LSI Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | * FILE: megaraid_sas_fp.c | ||
21 | * | ||
22 | * Authors: LSI Corporation | ||
23 | * Sumant Patro | ||
24 | * Varad Talamacki | ||
25 | * Manoj Jose | ||
26 | * | ||
27 | * Send feedback to: <megaraidlinux@lsi.com> | ||
28 | * | ||
29 | * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 | ||
30 | * ATTN: Linuxraid | ||
31 | */ | ||
32 | |||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/list.h> | ||
37 | #include <linux/moduleparam.h> | ||
38 | #include <linux/module.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/interrupt.h> | ||
41 | #include <linux/delay.h> | ||
42 | #include <linux/smp_lock.h> | ||
43 | #include <linux/uio.h> | ||
44 | #include <linux/uaccess.h> | ||
45 | #include <linux/fs.h> | ||
46 | #include <linux/compat.h> | ||
47 | #include <linux/blkdev.h> | ||
48 | #include <linux/poll.h> | ||
49 | |||
50 | #include <scsi/scsi.h> | ||
51 | #include <scsi/scsi_cmnd.h> | ||
52 | #include <scsi/scsi_device.h> | ||
53 | #include <scsi/scsi_host.h> | ||
54 | |||
55 | #include "megaraid_sas_fusion.h" | ||
56 | #include <asm/div64.h> | ||
57 | |||
58 | #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) | ||
59 | #define MR_LD_STATE_OPTIMAL 3 | ||
60 | #define FALSE 0 | ||
61 | #define TRUE 1 | ||
62 | |||
63 | /* Prototypes */ | ||
64 | void | ||
65 | mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, | ||
66 | struct LD_LOAD_BALANCE_INFO *lbInfo); | ||
67 | |||
68 | u32 mega_mod64(u64 dividend, u32 divisor) | ||
69 | { | ||
70 | u64 d; | ||
71 | u32 remainder; | ||
72 | |||
73 | if (!divisor) | ||
74 | printk(KERN_ERR "megasas : DIVISOR is zero, in div fn\n"); | ||
75 | d = dividend; | ||
76 | remainder = do_div(d, divisor); | ||
77 | return remainder; | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * @param dividend : Dividend | ||
82 | * @param divisor : Divisor | ||
83 | * | ||
84 | * @return quotient | ||
85 | **/ | ||
86 | u64 mega_div64_32(uint64_t dividend, uint32_t divisor) | ||
87 | { | ||
88 | u32 remainder; | ||
89 | u64 d; | ||
90 | |||
91 | if (!divisor) | ||
92 | printk(KERN_ERR "megasas : DIVISOR is zero in mod fn\n"); | ||
93 | |||
94 | d = dividend; | ||
95 | remainder = do_div(d, divisor); | ||
96 | |||
97 | return d; | ||
98 | } | ||
99 | |||
100 | struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map) | ||
101 | { | ||
102 | return &map->raidMap.ldSpanMap[ld].ldRaid; | ||
103 | } | ||
104 | |||
105 | static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, | ||
106 | struct MR_FW_RAID_MAP_ALL | ||
107 | *map) | ||
108 | { | ||
109 | return &map->raidMap.ldSpanMap[ld].spanBlock[0]; | ||
110 | } | ||
111 | |||
112 | static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map) | ||
113 | { | ||
114 | return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; | ||
115 | } | ||
116 | |||
117 | static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) | ||
118 | { | ||
119 | return map->raidMap.arMapInfo[ar].pd[arm]; | ||
120 | } | ||
121 | |||
122 | static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) | ||
123 | { | ||
124 | return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; | ||
125 | } | ||
126 | |||
127 | static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) | ||
128 | { | ||
129 | return map->raidMap.devHndlInfo[pd].curDevHdl; | ||
130 | } | ||
131 | |||
132 | u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) | ||
133 | { | ||
134 | return map->raidMap.ldSpanMap[ld].ldRaid.targetId; | ||
135 | } | ||
136 | |||
137 | u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) | ||
138 | { | ||
139 | return map->raidMap.ldTgtIdToLd[ldTgtId]; | ||
140 | } | ||
141 | |||
142 | static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, | ||
143 | struct MR_FW_RAID_MAP_ALL *map) | ||
144 | { | ||
145 | return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * This function will validate Map info data provided by FW | ||
150 | */ | ||
151 | u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, | ||
152 | struct LD_LOAD_BALANCE_INFO *lbInfo) | ||
153 | { | ||
154 | struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; | ||
155 | |||
156 | if (pFwRaidMap->totalSize != | ||
157 | (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + | ||
158 | (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { | ||
159 | printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", | ||
160 | (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - | ||
161 | sizeof(struct MR_LD_SPAN_MAP)) + | ||
162 | (sizeof(struct MR_LD_SPAN_MAP) * | ||
163 | pFwRaidMap->ldCount))); | ||
164 | printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " | ||
165 | ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), | ||
166 | pFwRaidMap->totalSize); | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | mr_update_load_balance_params(map, lbInfo); | ||
171 | |||
172 | return 1; | ||
173 | } | ||
174 | |||
175 | u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, | ||
176 | struct MR_FW_RAID_MAP_ALL *map, int *div_error) | ||
177 | { | ||
178 | struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); | ||
179 | struct MR_QUAD_ELEMENT *quad; | ||
180 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | ||
181 | u32 span, j; | ||
182 | |||
183 | for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { | ||
184 | |||
185 | for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { | ||
186 | quad = &pSpanBlock->block_span_info.quad[j]; | ||
187 | |||
188 | if (quad->diff == 0) { | ||
189 | *div_error = 1; | ||
190 | return span; | ||
191 | } | ||
192 | if (quad->logStart <= row && row <= quad->logEnd && | ||
193 | (mega_mod64(row-quad->logStart, quad->diff)) == 0) { | ||
194 | if (span_blk != NULL) { | ||
195 | u64 blk, debugBlk; | ||
196 | blk = | ||
197 | mega_div64_32( | ||
198 | (row-quad->logStart), | ||
199 | quad->diff); | ||
200 | debugBlk = blk; | ||
201 | |||
202 | blk = (blk + quad->offsetInSpan) << | ||
203 | raid->stripeShift; | ||
204 | *span_blk = blk; | ||
205 | } | ||
206 | return span; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | return span; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | ****************************************************************************** | ||
215 | * | ||
216 | * This routine calculates the arm, span and block for the specified stripe and | ||
217 | * reference in stripe. | ||
218 | * | ||
219 | * Inputs : | ||
220 | * | ||
221 | * ld - Logical drive number | ||
222 | * stripRow - Stripe number | ||
223 | * stripRef - Reference in stripe | ||
224 | * | ||
225 | * Outputs : | ||
226 | * | ||
227 | * span - Span number | ||
228 | * block - Absolute Block number in the physical disk | ||
229 | */ | ||
230 | u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock, | ||
231 | u16 *pDevHandle, struct RAID_CONTEXT *pRAID_Context, | ||
232 | struct MR_FW_RAID_MAP_ALL *map) | ||
233 | { | ||
234 | struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); | ||
235 | u32 pd, arRef; | ||
236 | u8 physArm, span; | ||
237 | u64 row; | ||
238 | u8 retval = TRUE; | ||
239 | int error_code = 0; | ||
240 | |||
241 | row = mega_div64_32(stripRow, raid->rowDataSize); | ||
242 | |||
243 | if (raid->level == 6) { | ||
244 | /* logical arm within row */ | ||
245 | u32 logArm = mega_mod64(stripRow, raid->rowDataSize); | ||
246 | u32 rowMod, armQ, arm; | ||
247 | |||
248 | if (raid->rowSize == 0) | ||
249 | return FALSE; | ||
250 | /* get logical row mod */ | ||
251 | rowMod = mega_mod64(row, raid->rowSize); | ||
252 | armQ = raid->rowSize-1-rowMod; /* index of Q drive */ | ||
253 | arm = armQ+1+logArm; /* data always logically follows Q */ | ||
254 | if (arm >= raid->rowSize) /* handle wrap condition */ | ||
255 | arm -= raid->rowSize; | ||
256 | physArm = (u8)arm; | ||
257 | } else { | ||
258 | if (raid->modFactor == 0) | ||
259 | return FALSE; | ||
260 | physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, | ||
261 | raid->modFactor), | ||
262 | map); | ||
263 | } | ||
264 | |||
265 | if (raid->spanDepth == 1) { | ||
266 | span = 0; | ||
267 | *pdBlock = row << raid->stripeShift; | ||
268 | } else { | ||
269 | span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code); | ||
270 | if (error_code == 1) | ||
271 | return FALSE; | ||
272 | } | ||
273 | |||
274 | /* Get the array on which this span is present */ | ||
275 | arRef = MR_LdSpanArrayGet(ld, span, map); | ||
276 | pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ | ||
277 | |||
278 | if (pd != MR_PD_INVALID) | ||
279 | /* Get dev handle from Pd. */ | ||
280 | *pDevHandle = MR_PdDevHandleGet(pd, map); | ||
281 | else { | ||
282 | *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ | ||
283 | if (raid->level >= 5) | ||
284 | pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; | ||
285 | else if (raid->level == 1) { | ||
286 | /* Get alternate Pd. */ | ||
287 | pd = MR_ArPdGet(arRef, physArm + 1, map); | ||
288 | if (pd != MR_PD_INVALID) | ||
289 | /* Get dev handle from Pd */ | ||
290 | *pDevHandle = MR_PdDevHandleGet(pd, map); | ||
291 | } | ||
292 | retval = FALSE; | ||
293 | } | ||
294 | |||
295 | *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; | ||
296 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | | ||
297 | physArm; | ||
298 | return retval; | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | ****************************************************************************** | ||
303 | * | ||
304 | * MR_BuildRaidContext function | ||
305 | * | ||
306 | * This function will initiate command processing. The start/end row and strip | ||
307 | * information is calculated then the lock is acquired. | ||
308 | * This function will return 0 if region lock was acquired OR return num strips | ||
309 | */ | ||
310 | u8 | ||
311 | MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, | ||
312 | struct RAID_CONTEXT *pRAID_Context, | ||
313 | struct MR_FW_RAID_MAP_ALL *map) | ||
314 | { | ||
315 | struct MR_LD_RAID *raid; | ||
316 | u32 ld, stripSize, stripe_mask; | ||
317 | u64 endLba, endStrip, endRow, start_row, start_strip; | ||
318 | u64 regStart; | ||
319 | u32 regSize; | ||
320 | u8 num_strips, numRows; | ||
321 | u16 ref_in_start_stripe, ref_in_end_stripe; | ||
322 | u64 ldStartBlock; | ||
323 | u32 numBlocks, ldTgtId; | ||
324 | u8 isRead; | ||
325 | u8 retval = 0; | ||
326 | |||
327 | ldStartBlock = io_info->ldStartBlock; | ||
328 | numBlocks = io_info->numBlocks; | ||
329 | ldTgtId = io_info->ldTgtId; | ||
330 | isRead = io_info->isRead; | ||
331 | |||
332 | ld = MR_TargetIdToLdGet(ldTgtId, map); | ||
333 | raid = MR_LdRaidGet(ld, map); | ||
334 | |||
335 | stripSize = 1 << raid->stripeShift; | ||
336 | stripe_mask = stripSize-1; | ||
337 | /* | ||
338 | * calculate starting row and stripe, and number of strips and rows | ||
339 | */ | ||
340 | start_strip = ldStartBlock >> raid->stripeShift; | ||
341 | ref_in_start_stripe = (u16)(ldStartBlock & stripe_mask); | ||
342 | endLba = ldStartBlock + numBlocks - 1; | ||
343 | ref_in_end_stripe = (u16)(endLba & stripe_mask); | ||
344 | endStrip = endLba >> raid->stripeShift; | ||
345 | num_strips = (u8)(endStrip - start_strip + 1); /* End strip */ | ||
346 | if (raid->rowDataSize == 0) | ||
347 | return FALSE; | ||
348 | start_row = mega_div64_32(start_strip, raid->rowDataSize); | ||
349 | endRow = mega_div64_32(endStrip, raid->rowDataSize); | ||
350 | numRows = (u8)(endRow - start_row + 1); | ||
351 | |||
352 | /* | ||
353 | * calculate region info. | ||
354 | */ | ||
355 | |||
356 | /* assume region is at the start of the first row */ | ||
357 | regStart = start_row << raid->stripeShift; | ||
358 | /* assume this IO needs the full row - we'll adjust if not true */ | ||
359 | regSize = stripSize; | ||
360 | |||
361 | /* If IO spans more than 1 strip, fp is not possible | ||
362 | FP is not possible for writes on non-0 raid levels | ||
363 | FP is not possible if LD is not capable */ | ||
364 | if (num_strips > 1 || (!isRead && raid->level != 0) || | ||
365 | !raid->capability.fpCapable) { | ||
366 | io_info->fpOkForIo = FALSE; | ||
367 | } else { | ||
368 | io_info->fpOkForIo = TRUE; | ||
369 | } | ||
370 | |||
371 | if (numRows == 1) { | ||
372 | /* single-strip IOs can always lock only the data needed */ | ||
373 | if (num_strips == 1) { | ||
374 | regStart += ref_in_start_stripe; | ||
375 | regSize = numBlocks; | ||
376 | } | ||
377 | /* multi-strip IOs always need to full stripe locked */ | ||
378 | } else { | ||
379 | if (start_strip == (start_row + 1) * raid->rowDataSize - 1) { | ||
380 | /* If the start strip is the last in the start row */ | ||
381 | regStart += ref_in_start_stripe; | ||
382 | regSize = stripSize - ref_in_start_stripe; | ||
383 | /* initialize count to sectors from startref to end | ||
384 | of strip */ | ||
385 | } | ||
386 | |||
387 | if (numRows > 2) | ||
388 | /* Add complete rows in the middle of the transfer */ | ||
389 | regSize += (numRows-2) << raid->stripeShift; | ||
390 | |||
391 | /* if IO ends within first strip of last row */ | ||
392 | if (endStrip == endRow*raid->rowDataSize) | ||
393 | regSize += ref_in_end_stripe+1; | ||
394 | else | ||
395 | regSize += stripSize; | ||
396 | } | ||
397 | |||
398 | pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; | ||
399 | pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : | ||
400 | raid->regTypeReqOnWrite; | ||
401 | pRAID_Context->VirtualDiskTgtId = raid->targetId; | ||
402 | pRAID_Context->regLockRowLBA = regStart; | ||
403 | pRAID_Context->regLockLength = regSize; | ||
404 | pRAID_Context->configSeqNum = raid->seqNum; | ||
405 | |||
406 | /*Get Phy Params only if FP capable, or else leave it to MR firmware | ||
407 | to do the calculation.*/ | ||
408 | if (io_info->fpOkForIo) { | ||
409 | retval = MR_GetPhyParams(ld, start_strip, ref_in_start_stripe, | ||
410 | &io_info->pdBlock, | ||
411 | &io_info->devHandle, pRAID_Context, | ||
412 | map); | ||
413 | /* If IO on an invalid Pd, then FP i snot possible */ | ||
414 | if (io_info->devHandle == MR_PD_INVALID) | ||
415 | io_info->fpOkForIo = FALSE; | ||
416 | return retval; | ||
417 | } else if (isRead) { | ||
418 | uint stripIdx; | ||
419 | for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { | ||
420 | if (!MR_GetPhyParams(ld, start_strip + stripIdx, | ||
421 | ref_in_start_stripe, | ||
422 | &io_info->pdBlock, | ||
423 | &io_info->devHandle, | ||
424 | pRAID_Context, map)) | ||
425 | return TRUE; | ||
426 | } | ||
427 | } | ||
428 | return TRUE; | ||
429 | } | ||
430 | |||
431 | void | ||
432 | mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, | ||
433 | struct LD_LOAD_BALANCE_INFO *lbInfo) | ||
434 | { | ||
435 | int ldCount; | ||
436 | u16 ld; | ||
437 | struct MR_LD_RAID *raid; | ||
438 | |||
439 | for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { | ||
440 | ld = MR_TargetIdToLdGet(ldCount, map); | ||
441 | if (ld >= MAX_LOGICAL_DRIVES) { | ||
442 | lbInfo[ldCount].loadBalanceFlag = 0; | ||
443 | continue; | ||
444 | } | ||
445 | |||
446 | raid = MR_LdRaidGet(ld, map); | ||
447 | |||
448 | /* Two drive Optimal RAID 1 */ | ||
449 | if ((raid->level == 1) && (raid->rowSize == 2) && | ||
450 | (raid->spanDepth == 1) && raid->ldState == | ||
451 | MR_LD_STATE_OPTIMAL) { | ||
452 | u32 pd, arRef; | ||
453 | |||
454 | lbInfo[ldCount].loadBalanceFlag = 1; | ||
455 | |||
456 | /* Get the array on which this span is present */ | ||
457 | arRef = MR_LdSpanArrayGet(ld, 0, map); | ||
458 | |||
459 | /* Get the Pd */ | ||
460 | pd = MR_ArPdGet(arRef, 0, map); | ||
461 | /* Get dev handle from Pd */ | ||
462 | lbInfo[ldCount].raid1DevHandle[0] = | ||
463 | MR_PdDevHandleGet(pd, map); | ||
464 | /* Get the Pd */ | ||
465 | pd = MR_ArPdGet(arRef, 1, map); | ||
466 | |||
467 | /* Get the dev handle from Pd */ | ||
468 | lbInfo[ldCount].raid1DevHandle[1] = | ||
469 | MR_PdDevHandleGet(pd, map); | ||
470 | } else | ||
471 | lbInfo[ldCount].loadBalanceFlag = 0; | ||
472 | } | ||
473 | } | ||
474 | |||
475 | u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block, | ||
476 | u32 count) | ||
477 | { | ||
478 | u16 pend0, pend1; | ||
479 | u64 diff0, diff1; | ||
480 | u8 bestArm; | ||
481 | |||
482 | /* get the pending cmds for the data and mirror arms */ | ||
483 | pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]); | ||
484 | pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]); | ||
485 | |||
486 | /* Determine the disk whose head is nearer to the req. block */ | ||
487 | diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); | ||
488 | diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); | ||
489 | bestArm = (diff0 <= diff1 ? 0 : 1); | ||
490 | |||
491 | if ((bestArm == arm && pend0 > pend1 + 16) || | ||
492 | (bestArm != arm && pend1 > pend0 + 16)) | ||
493 | bestArm ^= 1; | ||
494 | |||
495 | /* Update the last accessed block on the correct pd */ | ||
496 | lbInfo->last_accessed_block[bestArm] = block + count - 1; | ||
497 | |||
498 | return bestArm; | ||
499 | } | ||
500 | |||
501 | u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, | ||
502 | struct IO_REQUEST_INFO *io_info) | ||
503 | { | ||
504 | u8 arm, old_arm; | ||
505 | u16 devHandle; | ||
506 | |||
507 | old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; | ||
508 | |||
509 | /* get best new arm */ | ||
510 | arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, | ||
511 | io_info->numBlocks); | ||
512 | devHandle = lbInfo->raid1DevHandle[arm]; | ||
513 | atomic_inc(&lbInfo->scsi_pending_cmds[arm]); | ||
514 | |||
515 | return devHandle; | ||
516 | } | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c new file mode 100644 index 000000000000..c1e09d5a6196 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -0,0 +1,2248 @@ | |||
1 | /* | ||
2 | * Linux MegaRAID driver for SAS based RAID controllers | ||
3 | * | ||
4 | * Copyright (c) 2009-2011 LSI Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | * FILE: megaraid_sas_fusion.c | ||
21 | * | ||
22 | * Authors: LSI Corporation | ||
23 | * Sumant Patro | ||
24 | * Adam Radford <linuxraid@lsi.com> | ||
25 | * | ||
26 | * Send feedback to: <megaraidlinux@lsi.com> | ||
27 | * | ||
28 | * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 | ||
29 | * ATTN: Linuxraid | ||
30 | */ | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/pci.h> | ||
35 | #include <linux/list.h> | ||
36 | #include <linux/moduleparam.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/interrupt.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/smp_lock.h> | ||
42 | #include <linux/uio.h> | ||
43 | #include <linux/uaccess.h> | ||
44 | #include <linux/fs.h> | ||
45 | #include <linux/compat.h> | ||
46 | #include <linux/blkdev.h> | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/poll.h> | ||
49 | |||
50 | #include <scsi/scsi.h> | ||
51 | #include <scsi/scsi_cmnd.h> | ||
52 | #include <scsi/scsi_device.h> | ||
53 | #include <scsi/scsi_host.h> | ||
54 | |||
55 | #include "megaraid_sas_fusion.h" | ||
56 | #include "megaraid_sas.h" | ||
57 | |||
58 | extern void megasas_free_cmds(struct megasas_instance *instance); | ||
59 | extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance | ||
60 | *instance); | ||
61 | extern void | ||
62 | megasas_complete_cmd(struct megasas_instance *instance, | ||
63 | struct megasas_cmd *cmd, u8 alt_status); | ||
64 | int megasas_is_ldio(struct scsi_cmnd *cmd); | ||
65 | int | ||
66 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); | ||
67 | |||
68 | void | ||
69 | megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); | ||
70 | int megasas_alloc_cmds(struct megasas_instance *instance); | ||
71 | int | ||
72 | megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); | ||
73 | int | ||
74 | megasas_issue_polled(struct megasas_instance *instance, | ||
75 | struct megasas_cmd *cmd); | ||
76 | |||
77 | u8 | ||
78 | MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, | ||
79 | struct RAID_CONTEXT *pRAID_Context, | ||
80 | struct MR_FW_RAID_MAP_ALL *map); | ||
81 | u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); | ||
82 | struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); | ||
83 | |||
84 | u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); | ||
85 | u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, | ||
86 | struct LD_LOAD_BALANCE_INFO *lbInfo); | ||
87 | u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, | ||
88 | struct IO_REQUEST_INFO *in_info); | ||
89 | int megasas_transition_to_ready(struct megasas_instance *instance); | ||
90 | void megaraid_sas_kill_hba(struct megasas_instance *instance); | ||
91 | |||
92 | extern u32 megasas_dbg_lvl; | ||
93 | |||
94 | /** | ||
95 | * megasas_enable_intr_fusion - Enables interrupts | ||
96 | * @regs: MFI register set | ||
97 | */ | ||
98 | void | ||
99 | megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) | ||
100 | { | ||
101 | writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); | ||
102 | |||
103 | /* Dummy readl to force pci flush */ | ||
104 | readl(®s->outbound_intr_mask); | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * megasas_disable_intr_fusion - Disables interrupt | ||
109 | * @regs: MFI register set | ||
110 | */ | ||
111 | void | ||
112 | megasas_disable_intr_fusion(struct megasas_register_set __iomem *regs) | ||
113 | { | ||
114 | u32 mask = 0xFFFFFFFF; | ||
115 | u32 status; | ||
116 | |||
117 | writel(mask, ®s->outbound_intr_mask); | ||
118 | /* Dummy readl to force pci flush */ | ||
119 | status = readl(®s->outbound_intr_mask); | ||
120 | } | ||
121 | |||
122 | int | ||
123 | megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) | ||
124 | { | ||
125 | u32 status; | ||
126 | /* | ||
127 | * Check if it is our interrupt | ||
128 | */ | ||
129 | status = readl(®s->outbound_intr_status); | ||
130 | |||
131 | if (status & 1) { | ||
132 | writel(status, ®s->outbound_intr_status); | ||
133 | readl(®s->outbound_intr_status); | ||
134 | return 1; | ||
135 | } | ||
136 | if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) | ||
137 | return 0; | ||
138 | |||
139 | /* | ||
140 | * dummy read to flush PCI | ||
141 | */ | ||
142 | readl(®s->outbound_intr_status); | ||
143 | |||
144 | return 1; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * megasas_get_cmd_fusion - Get a command from the free pool | ||
149 | * @instance: Adapter soft state | ||
150 | * | ||
151 | * Returns a free command from the pool | ||
152 | */ | ||
153 | struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance | ||
154 | *instance) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | struct fusion_context *fusion = | ||
158 | (struct fusion_context *)instance->ctrl_context; | ||
159 | struct megasas_cmd_fusion *cmd = NULL; | ||
160 | |||
161 | spin_lock_irqsave(&fusion->cmd_pool_lock, flags); | ||
162 | |||
163 | if (!list_empty(&fusion->cmd_pool)) { | ||
164 | cmd = list_entry((&fusion->cmd_pool)->next, | ||
165 | struct megasas_cmd_fusion, list); | ||
166 | list_del_init(&cmd->list); | ||
167 | } else { | ||
168 | printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); | ||
169 | } | ||
170 | |||
171 | spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); | ||
172 | return cmd; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * megasas_return_cmd_fusion - Return a cmd to free command pool | ||
177 | * @instance: Adapter soft state | ||
178 | * @cmd: Command packet to be returned to free command pool | ||
179 | */ | ||
180 | static inline void | ||
181 | megasas_return_cmd_fusion(struct megasas_instance *instance, | ||
182 | struct megasas_cmd_fusion *cmd) | ||
183 | { | ||
184 | unsigned long flags; | ||
185 | struct fusion_context *fusion = | ||
186 | (struct fusion_context *)instance->ctrl_context; | ||
187 | |||
188 | spin_lock_irqsave(&fusion->cmd_pool_lock, flags); | ||
189 | |||
190 | cmd->scmd = NULL; | ||
191 | cmd->sync_cmd_idx = (u32)ULONG_MAX; | ||
192 | list_add_tail(&cmd->list, &fusion->cmd_pool); | ||
193 | |||
194 | spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool | ||
199 | * @instance: Adapter soft state | ||
200 | */ | ||
201 | static void megasas_teardown_frame_pool_fusion( | ||
202 | struct megasas_instance *instance) | ||
203 | { | ||
204 | int i; | ||
205 | struct fusion_context *fusion = instance->ctrl_context; | ||
206 | |||
207 | u16 max_cmd = instance->max_fw_cmds; | ||
208 | |||
209 | struct megasas_cmd_fusion *cmd; | ||
210 | |||
211 | if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) { | ||
212 | printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, " | ||
213 | "sense pool : %p\n", fusion->sg_dma_pool, | ||
214 | fusion->sense_dma_pool); | ||
215 | return; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Return all frames to pool | ||
220 | */ | ||
221 | for (i = 0; i < max_cmd; i++) { | ||
222 | |||
223 | cmd = fusion->cmd_list[i]; | ||
224 | |||
225 | if (cmd->sg_frame) | ||
226 | pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, | ||
227 | cmd->sg_frame_phys_addr); | ||
228 | |||
229 | if (cmd->sense) | ||
230 | pci_pool_free(fusion->sense_dma_pool, cmd->sense, | ||
231 | cmd->sense_phys_addr); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Now destroy the pool itself | ||
236 | */ | ||
237 | pci_pool_destroy(fusion->sg_dma_pool); | ||
238 | pci_pool_destroy(fusion->sense_dma_pool); | ||
239 | |||
240 | fusion->sg_dma_pool = NULL; | ||
241 | fusion->sense_dma_pool = NULL; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool | ||
246 | * @instance: Adapter soft state | ||
247 | */ | ||
248 | void | ||
249 | megasas_free_cmds_fusion(struct megasas_instance *instance) | ||
250 | { | ||
251 | int i; | ||
252 | struct fusion_context *fusion = instance->ctrl_context; | ||
253 | |||
254 | u32 max_cmds, req_sz, reply_sz, io_frames_sz; | ||
255 | |||
256 | |||
257 | req_sz = fusion->request_alloc_sz; | ||
258 | reply_sz = fusion->reply_alloc_sz; | ||
259 | io_frames_sz = fusion->io_frames_alloc_sz; | ||
260 | |||
261 | max_cmds = instance->max_fw_cmds; | ||
262 | |||
263 | /* Free descriptors and request Frames memory */ | ||
264 | if (fusion->req_frames_desc) | ||
265 | dma_free_coherent(&instance->pdev->dev, req_sz, | ||
266 | fusion->req_frames_desc, | ||
267 | fusion->req_frames_desc_phys); | ||
268 | |||
269 | if (fusion->reply_frames_desc) { | ||
270 | pci_pool_free(fusion->reply_frames_desc_pool, | ||
271 | fusion->reply_frames_desc, | ||
272 | fusion->reply_frames_desc_phys); | ||
273 | pci_pool_destroy(fusion->reply_frames_desc_pool); | ||
274 | } | ||
275 | |||
276 | if (fusion->io_request_frames) { | ||
277 | pci_pool_free(fusion->io_request_frames_pool, | ||
278 | fusion->io_request_frames, | ||
279 | fusion->io_request_frames_phys); | ||
280 | pci_pool_destroy(fusion->io_request_frames_pool); | ||
281 | } | ||
282 | |||
283 | /* Free the Fusion frame pool */ | ||
284 | megasas_teardown_frame_pool_fusion(instance); | ||
285 | |||
286 | /* Free all the commands in the cmd_list */ | ||
287 | for (i = 0; i < max_cmds; i++) | ||
288 | kfree(fusion->cmd_list[i]); | ||
289 | |||
290 | /* Free the cmd_list buffer itself */ | ||
291 | kfree(fusion->cmd_list); | ||
292 | fusion->cmd_list = NULL; | ||
293 | |||
294 | INIT_LIST_HEAD(&fusion->cmd_pool); | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames | ||
299 | * @instance: Adapter soft state | ||
300 | * | ||
301 | */ | ||
302 | static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) | ||
303 | { | ||
304 | int i; | ||
305 | u32 max_cmd; | ||
306 | struct fusion_context *fusion; | ||
307 | struct megasas_cmd_fusion *cmd; | ||
308 | u32 total_sz_chain_frame; | ||
309 | |||
310 | fusion = instance->ctrl_context; | ||
311 | max_cmd = instance->max_fw_cmds; | ||
312 | |||
313 | total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME; | ||
314 | |||
315 | /* | ||
316 | * Use DMA pool facility provided by PCI layer | ||
317 | */ | ||
318 | |||
319 | fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion", | ||
320 | instance->pdev, | ||
321 | total_sz_chain_frame, 4, | ||
322 | 0); | ||
323 | if (!fusion->sg_dma_pool) { | ||
324 | printk(KERN_DEBUG "megasas: failed to setup request pool " | ||
325 | "fusion\n"); | ||
326 | return -ENOMEM; | ||
327 | } | ||
328 | fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion", | ||
329 | instance->pdev, | ||
330 | SCSI_SENSE_BUFFERSIZE, 64, 0); | ||
331 | |||
332 | if (!fusion->sense_dma_pool) { | ||
333 | printk(KERN_DEBUG "megasas: failed to setup sense pool " | ||
334 | "fusion\n"); | ||
335 | pci_pool_destroy(fusion->sg_dma_pool); | ||
336 | fusion->sg_dma_pool = NULL; | ||
337 | return -ENOMEM; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * Allocate and attach a frame to each of the commands in cmd_list | ||
342 | */ | ||
343 | for (i = 0; i < max_cmd; i++) { | ||
344 | |||
345 | cmd = fusion->cmd_list[i]; | ||
346 | |||
347 | cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, | ||
348 | GFP_KERNEL, | ||
349 | &cmd->sg_frame_phys_addr); | ||
350 | |||
351 | cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, | ||
352 | GFP_KERNEL, &cmd->sense_phys_addr); | ||
353 | /* | ||
354 | * megasas_teardown_frame_pool_fusion() takes care of freeing | ||
355 | * whatever has been allocated | ||
356 | */ | ||
357 | if (!cmd->sg_frame || !cmd->sense) { | ||
358 | printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n"); | ||
359 | megasas_teardown_frame_pool_fusion(instance); | ||
360 | return -ENOMEM; | ||
361 | } | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /** | ||
367 | * megasas_alloc_cmds_fusion - Allocates the command packets | ||
368 | * @instance: Adapter soft state | ||
369 | * | ||
370 | * | ||
371 | * Each frame has a 32-bit field called context. This context is used to get | ||
372 | * back the megasas_cmd_fusion from the frame when a frame gets completed | ||
373 | * In this driver, the 32 bit values are the indices into an array cmd_list. | ||
374 | * This array is used only to look up the megasas_cmd_fusion given the context. | ||
375 | * The free commands themselves are maintained in a linked list called cmd_pool. | ||
376 | * | ||
377 | * cmds are formed in the io_request and sg_frame members of the | ||
378 | * megasas_cmd_fusion. The context field is used to get a request descriptor | ||
379 | * and is used as SMID of the cmd. | ||
380 | * SMID value range is from 1 to max_fw_cmds. | ||
381 | */ | ||
382 | int | ||
383 | megasas_alloc_cmds_fusion(struct megasas_instance *instance) | ||
384 | { | ||
385 | int i, j; | ||
386 | u32 max_cmd, io_frames_sz; | ||
387 | struct fusion_context *fusion; | ||
388 | struct megasas_cmd_fusion *cmd; | ||
389 | union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; | ||
390 | u32 offset; | ||
391 | dma_addr_t io_req_base_phys; | ||
392 | u8 *io_req_base; | ||
393 | |||
394 | fusion = instance->ctrl_context; | ||
395 | |||
396 | max_cmd = instance->max_fw_cmds; | ||
397 | |||
398 | fusion->req_frames_desc = | ||
399 | dma_alloc_coherent(&instance->pdev->dev, | ||
400 | fusion->request_alloc_sz, | ||
401 | &fusion->req_frames_desc_phys, GFP_KERNEL); | ||
402 | |||
403 | if (!fusion->req_frames_desc) { | ||
404 | printk(KERN_ERR "megasas; Could not allocate memory for " | ||
405 | "request_frames\n"); | ||
406 | goto fail_req_desc; | ||
407 | } | ||
408 | |||
409 | fusion->reply_frames_desc_pool = | ||
410 | pci_pool_create("reply_frames pool", instance->pdev, | ||
411 | fusion->reply_alloc_sz, 16, 0); | ||
412 | |||
413 | if (!fusion->reply_frames_desc_pool) { | ||
414 | printk(KERN_ERR "megasas; Could not allocate memory for " | ||
415 | "reply_frame pool\n"); | ||
416 | goto fail_reply_desc; | ||
417 | } | ||
418 | |||
419 | fusion->reply_frames_desc = | ||
420 | pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, | ||
421 | &fusion->reply_frames_desc_phys); | ||
422 | if (!fusion->reply_frames_desc) { | ||
423 | printk(KERN_ERR "megasas; Could not allocate memory for " | ||
424 | "reply_frame pool\n"); | ||
425 | pci_pool_destroy(fusion->reply_frames_desc_pool); | ||
426 | goto fail_reply_desc; | ||
427 | } | ||
428 | |||
429 | reply_desc = fusion->reply_frames_desc; | ||
430 | for (i = 0; i < fusion->reply_q_depth; i++, reply_desc++) | ||
431 | reply_desc->Words = ULLONG_MAX; | ||
432 | |||
433 | io_frames_sz = fusion->io_frames_alloc_sz; | ||
434 | |||
435 | fusion->io_request_frames_pool = | ||
436 | pci_pool_create("io_request_frames pool", instance->pdev, | ||
437 | fusion->io_frames_alloc_sz, 16, 0); | ||
438 | |||
439 | if (!fusion->io_request_frames_pool) { | ||
440 | printk(KERN_ERR "megasas: Could not allocate memory for " | ||
441 | "io_request_frame pool\n"); | ||
442 | goto fail_io_frames; | ||
443 | } | ||
444 | |||
445 | fusion->io_request_frames = | ||
446 | pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, | ||
447 | &fusion->io_request_frames_phys); | ||
448 | if (!fusion->io_request_frames) { | ||
449 | printk(KERN_ERR "megasas: Could not allocate memory for " | ||
450 | "io_request_frames frames\n"); | ||
451 | pci_pool_destroy(fusion->io_request_frames_pool); | ||
452 | goto fail_io_frames; | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. | ||
457 | * Allocate the dynamic array first and then allocate individual | ||
458 | * commands. | ||
459 | */ | ||
460 | fusion->cmd_list = kmalloc(sizeof(struct megasas_cmd_fusion *) | ||
461 | *max_cmd, GFP_KERNEL); | ||
462 | |||
463 | if (!fusion->cmd_list) { | ||
464 | printk(KERN_DEBUG "megasas: out of memory. Could not alloc " | ||
465 | "memory for cmd_list_fusion\n"); | ||
466 | goto fail_cmd_list; | ||
467 | } | ||
468 | |||
469 | memset(fusion->cmd_list, 0, sizeof(struct megasas_cmd_fusion *) | ||
470 | *max_cmd); | ||
471 | |||
472 | max_cmd = instance->max_fw_cmds; | ||
473 | for (i = 0; i < max_cmd; i++) { | ||
474 | fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), | ||
475 | GFP_KERNEL); | ||
476 | if (!fusion->cmd_list[i]) { | ||
477 | printk(KERN_ERR "Could not alloc cmd list fusion\n"); | ||
478 | |||
479 | for (j = 0; j < i; j++) | ||
480 | kfree(fusion->cmd_list[j]); | ||
481 | |||
482 | kfree(fusion->cmd_list); | ||
483 | fusion->cmd_list = NULL; | ||
484 | goto fail_cmd_list; | ||
485 | } | ||
486 | } | ||
487 | |||
488 | /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */ | ||
489 | io_req_base = fusion->io_request_frames + | ||
490 | MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; | ||
491 | io_req_base_phys = fusion->io_request_frames_phys + | ||
492 | MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; | ||
493 | |||
494 | /* | ||
495 | * Add all the commands to command pool (fusion->cmd_pool) | ||
496 | */ | ||
497 | |||
498 | /* SMID 0 is reserved. Set SMID/index from 1 */ | ||
499 | for (i = 0; i < max_cmd; i++) { | ||
500 | cmd = fusion->cmd_list[i]; | ||
501 | offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; | ||
502 | memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); | ||
503 | cmd->index = i + 1; | ||
504 | cmd->scmd = NULL; | ||
505 | cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */ | ||
506 | cmd->instance = instance; | ||
507 | cmd->io_request = | ||
508 | (struct MPI2_RAID_SCSI_IO_REQUEST *) | ||
509 | (io_req_base + offset); | ||
510 | memset(cmd->io_request, 0, | ||
511 | sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); | ||
512 | cmd->io_request_phys_addr = io_req_base_phys + offset; | ||
513 | |||
514 | list_add_tail(&cmd->list, &fusion->cmd_pool); | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * Create a frame pool and assign one frame to each cmd | ||
519 | */ | ||
520 | if (megasas_create_frame_pool_fusion(instance)) { | ||
521 | printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); | ||
522 | megasas_free_cmds_fusion(instance); | ||
523 | goto fail_req_desc; | ||
524 | } | ||
525 | |||
526 | return 0; | ||
527 | |||
528 | fail_cmd_list: | ||
529 | pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, | ||
530 | fusion->io_request_frames_phys); | ||
531 | pci_pool_destroy(fusion->io_request_frames_pool); | ||
532 | fail_io_frames: | ||
533 | dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, | ||
534 | fusion->reply_frames_desc, | ||
535 | fusion->reply_frames_desc_phys); | ||
536 | pci_pool_free(fusion->reply_frames_desc_pool, | ||
537 | fusion->reply_frames_desc, | ||
538 | fusion->reply_frames_desc_phys); | ||
539 | pci_pool_destroy(fusion->reply_frames_desc_pool); | ||
540 | |||
541 | fail_reply_desc: | ||
542 | dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, | ||
543 | fusion->req_frames_desc, | ||
544 | fusion->req_frames_desc_phys); | ||
545 | fail_req_desc: | ||
546 | return -ENOMEM; | ||
547 | } | ||
548 | |||
549 | /** | ||
550 | * wait_and_poll - Issues a polling command | ||
551 | * @instance: Adapter soft state | ||
552 | * @cmd: Command packet to be issued | ||
553 | * | ||
554 | * For polling, MFI requires the cmd_status to be set to 0xFF before posting. | ||
555 | */ | ||
556 | int | ||
557 | wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd) | ||
558 | { | ||
559 | int i; | ||
560 | struct megasas_header *frame_hdr = &cmd->frame->hdr; | ||
561 | |||
562 | u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; | ||
563 | |||
564 | /* | ||
565 | * Wait for cmd_status to change | ||
566 | */ | ||
567 | for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { | ||
568 | rmb(); | ||
569 | msleep(20); | ||
570 | } | ||
571 | |||
572 | if (frame_hdr->cmd_status == 0xff) | ||
573 | return -ETIME; | ||
574 | |||
575 | return 0; | ||
576 | } | ||
577 | |||
578 | /** | ||
579 | * megasas_ioc_init_fusion - Initializes the FW | ||
580 | * @instance: Adapter soft state | ||
581 | * | ||
582 | * Issues the IOC Init cmd | ||
583 | */ | ||
584 | int | ||
585 | megasas_ioc_init_fusion(struct megasas_instance *instance) | ||
586 | { | ||
587 | struct megasas_init_frame *init_frame; | ||
588 | struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; | ||
589 | dma_addr_t ioc_init_handle; | ||
590 | u32 context; | ||
591 | struct megasas_cmd *cmd; | ||
592 | u8 ret; | ||
593 | struct fusion_context *fusion; | ||
594 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | ||
595 | int i; | ||
596 | struct megasas_header *frame_hdr; | ||
597 | |||
598 | fusion = instance->ctrl_context; | ||
599 | |||
600 | cmd = megasas_get_cmd(instance); | ||
601 | |||
602 | if (!cmd) { | ||
603 | printk(KERN_ERR "Could not allocate cmd for INIT Frame\n"); | ||
604 | ret = 1; | ||
605 | goto fail_get_cmd; | ||
606 | } | ||
607 | |||
608 | IOCInitMessage = | ||
609 | dma_alloc_coherent(&instance->pdev->dev, | ||
610 | sizeof(struct MPI2_IOC_INIT_REQUEST), | ||
611 | &ioc_init_handle, GFP_KERNEL); | ||
612 | |||
613 | if (!IOCInitMessage) { | ||
614 | printk(KERN_ERR "Could not allocate memory for " | ||
615 | "IOCInitMessage\n"); | ||
616 | ret = 1; | ||
617 | goto fail_fw_init; | ||
618 | } | ||
619 | |||
620 | memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); | ||
621 | |||
622 | IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; | ||
623 | IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; | ||
624 | IOCInitMessage->MsgVersion = MPI2_VERSION; | ||
625 | IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; | ||
626 | IOCInitMessage->SystemRequestFrameSize = | ||
627 | MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; | ||
628 | |||
629 | IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; | ||
630 | IOCInitMessage->ReplyDescriptorPostQueueAddress = | ||
631 | fusion->reply_frames_desc_phys; | ||
632 | IOCInitMessage->SystemRequestFrameBaseAddress = | ||
633 | fusion->io_request_frames_phys; | ||
634 | |||
635 | init_frame = (struct megasas_init_frame *)cmd->frame; | ||
636 | memset(init_frame, 0, MEGAMFI_FRAME_SIZE); | ||
637 | |||
638 | frame_hdr = &cmd->frame->hdr; | ||
639 | context = init_frame->context; | ||
640 | init_frame->context = context; | ||
641 | |||
642 | frame_hdr->cmd_status = 0xFF; | ||
643 | frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; | ||
644 | |||
645 | init_frame->cmd = MFI_CMD_INIT; | ||
646 | init_frame->cmd_status = 0xFF; | ||
647 | |||
648 | init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; | ||
649 | init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); | ||
650 | |||
651 | req_desc = | ||
652 | (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; | ||
653 | |||
654 | req_desc->Words = cmd->frame_phys_addr; | ||
655 | req_desc->MFAIo.RequestFlags = | ||
656 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << | ||
657 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
658 | |||
659 | /* | ||
660 | * disable the intr before firing the init frame | ||
661 | */ | ||
662 | instance->instancet->disable_intr(instance->reg_set); | ||
663 | |||
664 | for (i = 0; i < (10 * 1000); i += 20) { | ||
665 | if (readl(&instance->reg_set->doorbell) & 1) | ||
666 | msleep(20); | ||
667 | else | ||
668 | break; | ||
669 | } | ||
670 | |||
671 | instance->instancet->fire_cmd(instance, req_desc->u.low, | ||
672 | req_desc->u.high, instance->reg_set); | ||
673 | |||
674 | wait_and_poll(instance, cmd); | ||
675 | |||
676 | frame_hdr = &cmd->frame->hdr; | ||
677 | if (frame_hdr->cmd_status != 0) { | ||
678 | ret = 1; | ||
679 | goto fail_fw_init; | ||
680 | } | ||
681 | printk(KERN_ERR "megasas:IOC Init cmd success\n"); | ||
682 | |||
683 | ret = 0; | ||
684 | |||
685 | fail_fw_init: | ||
686 | megasas_return_cmd(instance, cmd); | ||
687 | if (IOCInitMessage) | ||
688 | dma_free_coherent(&instance->pdev->dev, | ||
689 | sizeof(struct MPI2_IOC_INIT_REQUEST), | ||
690 | IOCInitMessage, ioc_init_handle); | ||
691 | fail_get_cmd: | ||
692 | return ret; | ||
693 | } | ||
694 | |||
695 | /* | ||
696 | * megasas_return_cmd_for_smid - Returns a cmd_fusion for a SMID | ||
697 | * @instance: Adapter soft state | ||
698 | * | ||
699 | */ | ||
700 | void | ||
701 | megasas_return_cmd_for_smid(struct megasas_instance *instance, u16 smid) | ||
702 | { | ||
703 | struct fusion_context *fusion; | ||
704 | struct megasas_cmd_fusion *cmd; | ||
705 | |||
706 | fusion = instance->ctrl_context; | ||
707 | cmd = fusion->cmd_list[smid - 1]; | ||
708 | megasas_return_cmd_fusion(instance, cmd); | ||
709 | } | ||
710 | |||
711 | /* | ||
712 | * megasas_get_ld_map_info - Returns FW's ld_map structure | ||
713 | * @instance: Adapter soft state | ||
714 | * @pend: Pend the command or not | ||
715 | * Issues an internal command (DCMD) to get the FW's controller PD | ||
716 | * list structure. This information is mainly used to find out SYSTEM | ||
717 | * supported by the FW. | ||
718 | */ | ||
719 | static int | ||
720 | megasas_get_ld_map_info(struct megasas_instance *instance) | ||
721 | { | ||
722 | int ret = 0; | ||
723 | struct megasas_cmd *cmd; | ||
724 | struct megasas_dcmd_frame *dcmd; | ||
725 | struct MR_FW_RAID_MAP_ALL *ci; | ||
726 | dma_addr_t ci_h = 0; | ||
727 | u32 size_map_info; | ||
728 | struct fusion_context *fusion; | ||
729 | |||
730 | cmd = megasas_get_cmd(instance); | ||
731 | |||
732 | if (!cmd) { | ||
733 | printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n"); | ||
734 | return -ENOMEM; | ||
735 | } | ||
736 | |||
737 | fusion = instance->ctrl_context; | ||
738 | |||
739 | if (!fusion) { | ||
740 | megasas_return_cmd(instance, cmd); | ||
741 | return 1; | ||
742 | } | ||
743 | |||
744 | dcmd = &cmd->frame->dcmd; | ||
745 | |||
746 | size_map_info = sizeof(struct MR_FW_RAID_MAP) + | ||
747 | (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); | ||
748 | |||
749 | ci = fusion->ld_map[(instance->map_id & 1)]; | ||
750 | ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; | ||
751 | |||
752 | if (!ci) { | ||
753 | printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n"); | ||
754 | megasas_return_cmd(instance, cmd); | ||
755 | return -ENOMEM; | ||
756 | } | ||
757 | |||
758 | memset(ci, 0, sizeof(*ci)); | ||
759 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | ||
760 | |||
761 | dcmd->cmd = MFI_CMD_DCMD; | ||
762 | dcmd->cmd_status = 0xFF; | ||
763 | dcmd->sge_count = 1; | ||
764 | dcmd->flags = MFI_FRAME_DIR_READ; | ||
765 | dcmd->timeout = 0; | ||
766 | dcmd->pad_0 = 0; | ||
767 | dcmd->data_xfer_len = size_map_info; | ||
768 | dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; | ||
769 | dcmd->sgl.sge32[0].phys_addr = ci_h; | ||
770 | dcmd->sgl.sge32[0].length = size_map_info; | ||
771 | |||
772 | if (!megasas_issue_polled(instance, cmd)) | ||
773 | ret = 0; | ||
774 | else { | ||
775 | printk(KERN_ERR "megasas: Get LD Map Info Failed\n"); | ||
776 | ret = -1; | ||
777 | } | ||
778 | |||
779 | megasas_return_cmd(instance, cmd); | ||
780 | |||
781 | return ret; | ||
782 | } | ||
783 | |||
784 | u8 | ||
785 | megasas_get_map_info(struct megasas_instance *instance) | ||
786 | { | ||
787 | struct fusion_context *fusion = instance->ctrl_context; | ||
788 | |||
789 | fusion->fast_path_io = 0; | ||
790 | if (!megasas_get_ld_map_info(instance)) { | ||
791 | if (MR_ValidateMapInfo(fusion->ld_map[(instance->map_id & 1)], | ||
792 | fusion->load_balance_info)) { | ||
793 | fusion->fast_path_io = 1; | ||
794 | return 0; | ||
795 | } | ||
796 | } | ||
797 | return 1; | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | * megasas_sync_map_info - Returns FW's ld_map structure | ||
802 | * @instance: Adapter soft state | ||
803 | * | ||
804 | * Issues an internal command (DCMD) to get the FW's controller PD | ||
805 | * list structure. This information is mainly used to find out SYSTEM | ||
806 | * supported by the FW. | ||
807 | */ | ||
808 | int | ||
809 | megasas_sync_map_info(struct megasas_instance *instance) | ||
810 | { | ||
811 | int ret = 0, i; | ||
812 | struct megasas_cmd *cmd; | ||
813 | struct megasas_dcmd_frame *dcmd; | ||
814 | u32 size_sync_info, num_lds; | ||
815 | struct fusion_context *fusion; | ||
816 | struct MR_LD_TARGET_SYNC *ci = NULL; | ||
817 | struct MR_FW_RAID_MAP_ALL *map; | ||
818 | struct MR_LD_RAID *raid; | ||
819 | struct MR_LD_TARGET_SYNC *ld_sync; | ||
820 | dma_addr_t ci_h = 0; | ||
821 | u32 size_map_info; | ||
822 | |||
823 | cmd = megasas_get_cmd(instance); | ||
824 | |||
825 | if (!cmd) { | ||
826 | printk(KERN_DEBUG "megasas: Failed to get cmd for sync" | ||
827 | "info.\n"); | ||
828 | return -ENOMEM; | ||
829 | } | ||
830 | |||
831 | fusion = instance->ctrl_context; | ||
832 | |||
833 | if (!fusion) { | ||
834 | megasas_return_cmd(instance, cmd); | ||
835 | return 1; | ||
836 | } | ||
837 | |||
838 | map = fusion->ld_map[instance->map_id & 1]; | ||
839 | |||
840 | num_lds = map->raidMap.ldCount; | ||
841 | |||
842 | dcmd = &cmd->frame->dcmd; | ||
843 | |||
844 | size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds; | ||
845 | |||
846 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | ||
847 | |||
848 | ci = (struct MR_LD_TARGET_SYNC *) | ||
849 | fusion->ld_map[(instance->map_id - 1) & 1]; | ||
850 | memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL)); | ||
851 | |||
852 | ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; | ||
853 | |||
854 | ld_sync = (struct MR_LD_TARGET_SYNC *)ci; | ||
855 | |||
856 | for (i = 0; i < num_lds; i++, ld_sync++) { | ||
857 | raid = MR_LdRaidGet(i, map); | ||
858 | ld_sync->targetId = MR_GetLDTgtId(i, map); | ||
859 | ld_sync->seqNum = raid->seqNum; | ||
860 | } | ||
861 | |||
862 | size_map_info = sizeof(struct MR_FW_RAID_MAP) + | ||
863 | (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); | ||
864 | |||
865 | dcmd->cmd = MFI_CMD_DCMD; | ||
866 | dcmd->cmd_status = 0xFF; | ||
867 | dcmd->sge_count = 1; | ||
868 | dcmd->flags = MFI_FRAME_DIR_WRITE; | ||
869 | dcmd->timeout = 0; | ||
870 | dcmd->pad_0 = 0; | ||
871 | dcmd->data_xfer_len = size_map_info; | ||
872 | dcmd->mbox.b[0] = num_lds; | ||
873 | dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; | ||
874 | dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; | ||
875 | dcmd->sgl.sge32[0].phys_addr = ci_h; | ||
876 | dcmd->sgl.sge32[0].length = size_map_info; | ||
877 | |||
878 | instance->map_update_cmd = cmd; | ||
879 | |||
880 | instance->instancet->issue_dcmd(instance, cmd); | ||
881 | |||
882 | return ret; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * megasas_init_adapter_fusion - Initializes the FW | ||
887 | * @instance: Adapter soft state | ||
888 | * | ||
889 | * This is the main function for initializing firmware. | ||
890 | */ | ||
891 | u32 | ||
892 | megasas_init_adapter_fusion(struct megasas_instance *instance) | ||
893 | { | ||
894 | struct megasas_register_set __iomem *reg_set; | ||
895 | struct fusion_context *fusion; | ||
896 | u32 max_cmd; | ||
897 | int i = 0; | ||
898 | |||
899 | fusion = instance->ctrl_context; | ||
900 | |||
901 | reg_set = instance->reg_set; | ||
902 | |||
903 | /* | ||
904 | * Get various operational parameters from status register | ||
905 | */ | ||
906 | instance->max_fw_cmds = | ||
907 | instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; | ||
908 | instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008); | ||
909 | |||
910 | /* | ||
911 | * Reduce the max supported cmds by 1. This is to ensure that the | ||
912 | * reply_q_sz (1 more than the max cmd that driver may send) | ||
913 | * does not exceed max cmds that the FW can support | ||
914 | */ | ||
915 | instance->max_fw_cmds = instance->max_fw_cmds-1; | ||
916 | /* Only internal cmds (DCMD) need to have MFI frames */ | ||
917 | instance->max_mfi_cmds = MEGASAS_INT_CMDS; | ||
918 | |||
919 | max_cmd = instance->max_fw_cmds; | ||
920 | |||
921 | fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16; | ||
922 | |||
923 | fusion->request_alloc_sz = | ||
924 | sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; | ||
925 | fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) | ||
926 | *(fusion->reply_q_depth); | ||
927 | fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + | ||
928 | (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * | ||
929 | (max_cmd + 1)); /* Extra 1 for SMID 0 */ | ||
930 | |||
931 | fusion->max_sge_in_main_msg = | ||
932 | (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - | ||
933 | offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; | ||
934 | |||
935 | fusion->max_sge_in_chain = | ||
936 | MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION); | ||
937 | |||
938 | instance->max_num_sge = fusion->max_sge_in_main_msg + | ||
939 | fusion->max_sge_in_chain - 2; | ||
940 | |||
941 | /* Used for pass thru MFI frame (DCMD) */ | ||
942 | fusion->chain_offset_mfi_pthru = | ||
943 | offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; | ||
944 | |||
945 | fusion->chain_offset_io_request = | ||
946 | (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - | ||
947 | sizeof(union MPI2_SGE_IO_UNION))/16; | ||
948 | |||
949 | fusion->last_reply_idx = 0; | ||
950 | |||
951 | /* | ||
952 | * Allocate memory for descriptors | ||
953 | * Create a pool of commands | ||
954 | */ | ||
955 | if (megasas_alloc_cmds(instance)) | ||
956 | goto fail_alloc_mfi_cmds; | ||
957 | if (megasas_alloc_cmds_fusion(instance)) | ||
958 | goto fail_alloc_cmds; | ||
959 | |||
960 | if (megasas_ioc_init_fusion(instance)) | ||
961 | goto fail_ioc_init; | ||
962 | |||
963 | instance->flag_ieee = 1; | ||
964 | |||
965 | fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) + | ||
966 | (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); | ||
967 | |||
968 | fusion->fast_path_io = 0; | ||
969 | |||
970 | for (i = 0; i < 2; i++) { | ||
971 | fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, | ||
972 | fusion->map_sz, | ||
973 | &fusion->ld_map_phys[i], | ||
974 | GFP_KERNEL); | ||
975 | if (!fusion->ld_map[i]) { | ||
976 | printk(KERN_ERR "megasas: Could not allocate memory " | ||
977 | "for map info\n"); | ||
978 | goto fail_map_info; | ||
979 | } | ||
980 | } | ||
981 | |||
982 | if (!megasas_get_map_info(instance)) | ||
983 | megasas_sync_map_info(instance); | ||
984 | |||
985 | return 0; | ||
986 | |||
987 | fail_alloc_cmds: | ||
988 | fail_alloc_mfi_cmds: | ||
989 | fail_map_info: | ||
990 | if (i == 1) | ||
991 | dma_free_coherent(&instance->pdev->dev, fusion->map_sz, | ||
992 | fusion->ld_map[0], fusion->ld_map_phys[0]); | ||
993 | fail_ioc_init: | ||
994 | return 1; | ||
995 | } | ||
996 | |||
997 | /** | ||
998 | * megasas_fire_cmd_fusion - Sends command to the FW | ||
999 | * @frame_phys_addr : Physical address of cmd | ||
1000 | * @frame_count : Number of frames for the command | ||
1001 | * @regs : MFI register set | ||
1002 | */ | ||
1003 | void | ||
1004 | megasas_fire_cmd_fusion(struct megasas_instance *instance, | ||
1005 | dma_addr_t req_desc_lo, | ||
1006 | u32 req_desc_hi, | ||
1007 | struct megasas_register_set __iomem *regs) | ||
1008 | { | ||
1009 | unsigned long flags; | ||
1010 | |||
1011 | spin_lock_irqsave(&instance->hba_lock, flags); | ||
1012 | |||
1013 | writel(req_desc_lo, | ||
1014 | &(regs)->inbound_low_queue_port); | ||
1015 | writel(req_desc_hi, &(regs)->inbound_high_queue_port); | ||
1016 | spin_unlock_irqrestore(&instance->hba_lock, flags); | ||
1017 | } | ||
1018 | |||
1019 | /** | ||
1020 | * map_cmd_status - Maps FW cmd status to OS cmd status | ||
1021 | * @cmd : Pointer to cmd | ||
1022 | * @status : status of cmd returned by FW | ||
1023 | * @ext_status : ext status of cmd returned by FW | ||
1024 | */ | ||
1025 | |||
1026 | void | ||
1027 | map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) | ||
1028 | { | ||
1029 | |||
1030 | switch (status) { | ||
1031 | |||
1032 | case MFI_STAT_OK: | ||
1033 | cmd->scmd->result = DID_OK << 16; | ||
1034 | break; | ||
1035 | |||
1036 | case MFI_STAT_SCSI_IO_FAILED: | ||
1037 | case MFI_STAT_LD_INIT_IN_PROGRESS: | ||
1038 | cmd->scmd->result = (DID_ERROR << 16) | ext_status; | ||
1039 | break; | ||
1040 | |||
1041 | case MFI_STAT_SCSI_DONE_WITH_ERROR: | ||
1042 | |||
1043 | cmd->scmd->result = (DID_OK << 16) | ext_status; | ||
1044 | if (ext_status == SAM_STAT_CHECK_CONDITION) { | ||
1045 | memset(cmd->scmd->sense_buffer, 0, | ||
1046 | SCSI_SENSE_BUFFERSIZE); | ||
1047 | memcpy(cmd->scmd->sense_buffer, cmd->sense, | ||
1048 | SCSI_SENSE_BUFFERSIZE); | ||
1049 | cmd->scmd->result |= DRIVER_SENSE << 24; | ||
1050 | } | ||
1051 | break; | ||
1052 | |||
1053 | case MFI_STAT_LD_OFFLINE: | ||
1054 | case MFI_STAT_DEVICE_NOT_FOUND: | ||
1055 | cmd->scmd->result = DID_BAD_TARGET << 16; | ||
1056 | break; | ||
1057 | |||
1058 | default: | ||
1059 | printk(KERN_DEBUG "megasas: FW status %#x\n", status); | ||
1060 | cmd->scmd->result = DID_ERROR << 16; | ||
1061 | break; | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | /** | ||
1066 | * megasas_make_sgl_fusion - Prepares 32-bit SGL | ||
1067 | * @instance: Adapter soft state | ||
1068 | * @scp: SCSI command from the mid-layer | ||
1069 | * @sgl_ptr: SGL to be filled in | ||
1070 | * @cmd: cmd we are working on | ||
1071 | * | ||
1072 | * If successful, this function returns the number of SG elements. | ||
1073 | */ | ||
1074 | static int | ||
1075 | megasas_make_sgl_fusion(struct megasas_instance *instance, | ||
1076 | struct scsi_cmnd *scp, | ||
1077 | struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, | ||
1078 | struct megasas_cmd_fusion *cmd) | ||
1079 | { | ||
1080 | int i, sg_processed; | ||
1081 | int sge_count, sge_idx; | ||
1082 | struct scatterlist *os_sgl; | ||
1083 | struct fusion_context *fusion; | ||
1084 | |||
1085 | fusion = instance->ctrl_context; | ||
1086 | |||
1087 | cmd->io_request->ChainOffset = 0; | ||
1088 | |||
1089 | sge_count = scsi_dma_map(scp); | ||
1090 | |||
1091 | BUG_ON(sge_count < 0); | ||
1092 | |||
1093 | if (sge_count > instance->max_num_sge || !sge_count) | ||
1094 | return sge_count; | ||
1095 | |||
1096 | if (sge_count > fusion->max_sge_in_main_msg) { | ||
1097 | /* One element to store the chain info */ | ||
1098 | sge_idx = fusion->max_sge_in_main_msg - 1; | ||
1099 | } else | ||
1100 | sge_idx = sge_count; | ||
1101 | |||
1102 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { | ||
1103 | sgl_ptr->Length = sg_dma_len(os_sgl); | ||
1104 | sgl_ptr->Address = sg_dma_address(os_sgl); | ||
1105 | sgl_ptr->Flags = 0; | ||
1106 | sgl_ptr++; | ||
1107 | |||
1108 | sg_processed = i + 1; | ||
1109 | |||
1110 | if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && | ||
1111 | (sge_count > fusion->max_sge_in_main_msg)) { | ||
1112 | |||
1113 | struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; | ||
1114 | cmd->io_request->ChainOffset = | ||
1115 | fusion->chain_offset_io_request; | ||
1116 | sg_chain = sgl_ptr; | ||
1117 | /* Prepare chain element */ | ||
1118 | sg_chain->NextChainOffset = 0; | ||
1119 | sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | | ||
1120 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); | ||
1121 | sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) | ||
1122 | *(sge_count - sg_processed)); | ||
1123 | sg_chain->Address = cmd->sg_frame_phys_addr; | ||
1124 | |||
1125 | sgl_ptr = | ||
1126 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | return sge_count; | ||
1131 | } | ||
1132 | |||
1133 | /** | ||
1134 | * megasas_set_pd_lba - Sets PD LBA | ||
1135 | * @cdb: CDB | ||
1136 | * @cdb_len: cdb length | ||
1137 | * @start_blk: Start block of IO | ||
1138 | * | ||
1139 | * Used to set the PD LBA in CDB for FP IOs | ||
1140 | */ | ||
1141 | void | ||
1142 | megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | ||
1143 | struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, | ||
1144 | struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) | ||
1145 | { | ||
1146 | struct MR_LD_RAID *raid; | ||
1147 | u32 ld; | ||
1148 | u64 start_blk = io_info->pdBlock; | ||
1149 | u8 *cdb = io_request->CDB.CDB32; | ||
1150 | u32 num_blocks = io_info->numBlocks; | ||
1151 | u8 opcode, flagvals, groupnum, control; | ||
1152 | |||
1153 | /* Check if T10 PI (DIF) is enabled for this LD */ | ||
1154 | ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); | ||
1155 | raid = MR_LdRaidGet(ld, local_map_ptr); | ||
1156 | if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { | ||
1157 | memset(cdb, 0, sizeof(io_request->CDB.CDB32)); | ||
1158 | cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; | ||
1159 | cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; | ||
1160 | |||
1161 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | ||
1162 | cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; | ||
1163 | else | ||
1164 | cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; | ||
1165 | cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; | ||
1166 | |||
1167 | /* LBA */ | ||
1168 | cdb[12] = (u8)((start_blk >> 56) & 0xff); | ||
1169 | cdb[13] = (u8)((start_blk >> 48) & 0xff); | ||
1170 | cdb[14] = (u8)((start_blk >> 40) & 0xff); | ||
1171 | cdb[15] = (u8)((start_blk >> 32) & 0xff); | ||
1172 | cdb[16] = (u8)((start_blk >> 24) & 0xff); | ||
1173 | cdb[17] = (u8)((start_blk >> 16) & 0xff); | ||
1174 | cdb[18] = (u8)((start_blk >> 8) & 0xff); | ||
1175 | cdb[19] = (u8)(start_blk & 0xff); | ||
1176 | |||
1177 | /* Logical block reference tag */ | ||
1178 | io_request->CDB.EEDP32.PrimaryReferenceTag = | ||
1179 | cpu_to_be32(ref_tag); | ||
1180 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; | ||
1181 | |||
1182 | io_request->DataLength = num_blocks * 512; | ||
1183 | io_request->IoFlags = 32; /* Specify 32-byte cdb */ | ||
1184 | |||
1185 | /* Transfer length */ | ||
1186 | cdb[28] = (u8)((num_blocks >> 24) & 0xff); | ||
1187 | cdb[29] = (u8)((num_blocks >> 16) & 0xff); | ||
1188 | cdb[30] = (u8)((num_blocks >> 8) & 0xff); | ||
1189 | cdb[31] = (u8)(num_blocks & 0xff); | ||
1190 | |||
1191 | /* set SCSI IO EEDPFlags */ | ||
1192 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { | ||
1193 | io_request->EEDPFlags = | ||
1194 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | | ||
1195 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | | ||
1196 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | | ||
1197 | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | | ||
1198 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; | ||
1199 | } else { | ||
1200 | io_request->EEDPFlags = | ||
1201 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | | ||
1202 | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; | ||
1203 | } | ||
1204 | io_request->Control |= (0x4 << 26); | ||
1205 | io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE; | ||
1206 | } else { | ||
1207 | /* Some drives don't support 16/12 byte CDB's, convert to 10 */ | ||
1208 | if (((cdb_len == 12) || (cdb_len == 16)) && | ||
1209 | (start_blk <= 0xffffffff)) { | ||
1210 | if (cdb_len == 16) { | ||
1211 | opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; | ||
1212 | flagvals = cdb[1]; | ||
1213 | groupnum = cdb[14]; | ||
1214 | control = cdb[15]; | ||
1215 | } else { | ||
1216 | opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; | ||
1217 | flagvals = cdb[1]; | ||
1218 | groupnum = cdb[10]; | ||
1219 | control = cdb[11]; | ||
1220 | } | ||
1221 | |||
1222 | memset(cdb, 0, sizeof(io_request->CDB.CDB32)); | ||
1223 | |||
1224 | cdb[0] = opcode; | ||
1225 | cdb[1] = flagvals; | ||
1226 | cdb[6] = groupnum; | ||
1227 | cdb[9] = control; | ||
1228 | |||
1229 | /* Transfer length */ | ||
1230 | cdb[8] = (u8)(num_blocks & 0xff); | ||
1231 | cdb[7] = (u8)((num_blocks >> 8) & 0xff); | ||
1232 | |||
1233 | cdb_len = 10; | ||
1234 | } | ||
1235 | |||
1236 | /* Normal case, just load LBA here */ | ||
1237 | switch (cdb_len) { | ||
1238 | case 6: | ||
1239 | { | ||
1240 | u8 val = cdb[1] & 0xE0; | ||
1241 | cdb[3] = (u8)(start_blk & 0xff); | ||
1242 | cdb[2] = (u8)((start_blk >> 8) & 0xff); | ||
1243 | cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); | ||
1244 | break; | ||
1245 | } | ||
1246 | case 10: | ||
1247 | cdb[5] = (u8)(start_blk & 0xff); | ||
1248 | cdb[4] = (u8)((start_blk >> 8) & 0xff); | ||
1249 | cdb[3] = (u8)((start_blk >> 16) & 0xff); | ||
1250 | cdb[2] = (u8)((start_blk >> 24) & 0xff); | ||
1251 | break; | ||
1252 | case 12: | ||
1253 | cdb[5] = (u8)(start_blk & 0xff); | ||
1254 | cdb[4] = (u8)((start_blk >> 8) & 0xff); | ||
1255 | cdb[3] = (u8)((start_blk >> 16) & 0xff); | ||
1256 | cdb[2] = (u8)((start_blk >> 24) & 0xff); | ||
1257 | break; | ||
1258 | case 16: | ||
1259 | cdb[9] = (u8)(start_blk & 0xff); | ||
1260 | cdb[8] = (u8)((start_blk >> 8) & 0xff); | ||
1261 | cdb[7] = (u8)((start_blk >> 16) & 0xff); | ||
1262 | cdb[6] = (u8)((start_blk >> 24) & 0xff); | ||
1263 | cdb[5] = (u8)((start_blk >> 32) & 0xff); | ||
1264 | cdb[4] = (u8)((start_blk >> 40) & 0xff); | ||
1265 | cdb[3] = (u8)((start_blk >> 48) & 0xff); | ||
1266 | cdb[2] = (u8)((start_blk >> 56) & 0xff); | ||
1267 | break; | ||
1268 | } | ||
1269 | } | ||
1270 | } | ||
1271 | |||
1272 | /** | ||
1273 | * megasas_build_ldio_fusion - Prepares IOs to devices | ||
1274 | * @instance: Adapter soft state | ||
1275 | * @scp: SCSI command | ||
1276 | * @cmd: Command to be prepared | ||
1277 | * | ||
1278 | * Prepares the io_request and chain elements (sg_frame) for IO | ||
1279 | * The IO can be for PD (Fast Path) or LD | ||
1280 | */ | ||
1281 | void | ||
1282 | megasas_build_ldio_fusion(struct megasas_instance *instance, | ||
1283 | struct scsi_cmnd *scp, | ||
1284 | struct megasas_cmd_fusion *cmd) | ||
1285 | { | ||
1286 | u8 fp_possible; | ||
1287 | u32 start_lba_lo, start_lba_hi, device_id; | ||
1288 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; | ||
1289 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | ||
1290 | struct IO_REQUEST_INFO io_info; | ||
1291 | struct fusion_context *fusion; | ||
1292 | struct MR_FW_RAID_MAP_ALL *local_map_ptr; | ||
1293 | |||
1294 | device_id = MEGASAS_DEV_INDEX(instance, scp); | ||
1295 | |||
1296 | fusion = instance->ctrl_context; | ||
1297 | |||
1298 | io_request = cmd->io_request; | ||
1299 | io_request->RaidContext.VirtualDiskTgtId = device_id; | ||
1300 | io_request->RaidContext.status = 0; | ||
1301 | io_request->RaidContext.exStatus = 0; | ||
1302 | |||
1303 | req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; | ||
1304 | |||
1305 | start_lba_lo = 0; | ||
1306 | start_lba_hi = 0; | ||
1307 | fp_possible = 0; | ||
1308 | |||
1309 | /* | ||
1310 | * 6-byte READ(0x08) or WRITE(0x0A) cdb | ||
1311 | */ | ||
1312 | if (scp->cmd_len == 6) { | ||
1313 | io_request->DataLength = (u32) scp->cmnd[4]; | ||
1314 | start_lba_lo = ((u32) scp->cmnd[1] << 16) | | ||
1315 | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; | ||
1316 | |||
1317 | start_lba_lo &= 0x1FFFFF; | ||
1318 | } | ||
1319 | |||
1320 | /* | ||
1321 | * 10-byte READ(0x28) or WRITE(0x2A) cdb | ||
1322 | */ | ||
1323 | else if (scp->cmd_len == 10) { | ||
1324 | io_request->DataLength = (u32) scp->cmnd[8] | | ||
1325 | ((u32) scp->cmnd[7] << 8); | ||
1326 | start_lba_lo = ((u32) scp->cmnd[2] << 24) | | ||
1327 | ((u32) scp->cmnd[3] << 16) | | ||
1328 | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * 12-byte READ(0xA8) or WRITE(0xAA) cdb | ||
1333 | */ | ||
1334 | else if (scp->cmd_len == 12) { | ||
1335 | io_request->DataLength = ((u32) scp->cmnd[6] << 24) | | ||
1336 | ((u32) scp->cmnd[7] << 16) | | ||
1337 | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; | ||
1338 | start_lba_lo = ((u32) scp->cmnd[2] << 24) | | ||
1339 | ((u32) scp->cmnd[3] << 16) | | ||
1340 | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * 16-byte READ(0x88) or WRITE(0x8A) cdb | ||
1345 | */ | ||
1346 | else if (scp->cmd_len == 16) { | ||
1347 | io_request->DataLength = ((u32) scp->cmnd[10] << 24) | | ||
1348 | ((u32) scp->cmnd[11] << 16) | | ||
1349 | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; | ||
1350 | start_lba_lo = ((u32) scp->cmnd[6] << 24) | | ||
1351 | ((u32) scp->cmnd[7] << 16) | | ||
1352 | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; | ||
1353 | |||
1354 | start_lba_hi = ((u32) scp->cmnd[2] << 24) | | ||
1355 | ((u32) scp->cmnd[3] << 16) | | ||
1356 | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; | ||
1357 | } | ||
1358 | |||
1359 | memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); | ||
1360 | io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; | ||
1361 | io_info.numBlocks = io_request->DataLength; | ||
1362 | io_info.ldTgtId = device_id; | ||
1363 | |||
1364 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | ||
1365 | io_info.isRead = 1; | ||
1366 | |||
1367 | local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; | ||
1368 | |||
1369 | if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= | ||
1370 | MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) { | ||
1371 | io_request->RaidContext.regLockFlags = 0; | ||
1372 | fp_possible = 0; | ||
1373 | } else { | ||
1374 | if (MR_BuildRaidContext(&io_info, &io_request->RaidContext, | ||
1375 | local_map_ptr)) | ||
1376 | fp_possible = io_info.fpOkForIo; | ||
1377 | } | ||
1378 | |||
1379 | if (fp_possible) { | ||
1380 | megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, | ||
1381 | local_map_ptr, start_lba_lo); | ||
1382 | io_request->DataLength = scsi_bufflen(scp); | ||
1383 | io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; | ||
1384 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1385 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY | ||
1386 | << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1387 | if ((fusion->load_balance_info[device_id].loadBalanceFlag) && | ||
1388 | (io_info.isRead)) { | ||
1389 | io_info.devHandle = | ||
1390 | get_updated_dev_handle( | ||
1391 | &fusion->load_balance_info[device_id], | ||
1392 | &io_info); | ||
1393 | scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; | ||
1394 | } else | ||
1395 | scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; | ||
1396 | cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; | ||
1397 | io_request->DevHandle = io_info.devHandle; | ||
1398 | } else { | ||
1399 | io_request->RaidContext.timeoutValue = | ||
1400 | local_map_ptr->raidMap.fpPdIoTimeoutSec; | ||
1401 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; | ||
1402 | io_request->DevHandle = device_id; | ||
1403 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1404 | (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO | ||
1405 | << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1406 | } /* Not FP */ | ||
1407 | } | ||
1408 | |||
1409 | /** | ||
1410 | * megasas_build_dcdb_fusion - Prepares IOs to devices | ||
1411 | * @instance: Adapter soft state | ||
1412 | * @scp: SCSI command | ||
1413 | * @cmd: Command to be prepared | ||
1414 | * | ||
1415 | * Prepares the io_request frame for non-io cmds | ||
1416 | */ | ||
1417 | static void | ||
1418 | megasas_build_dcdb_fusion(struct megasas_instance *instance, | ||
1419 | struct scsi_cmnd *scmd, | ||
1420 | struct megasas_cmd_fusion *cmd) | ||
1421 | { | ||
1422 | u32 device_id; | ||
1423 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; | ||
1424 | u16 pd_index = 0; | ||
1425 | struct MR_FW_RAID_MAP_ALL *local_map_ptr; | ||
1426 | struct fusion_context *fusion = instance->ctrl_context; | ||
1427 | |||
1428 | io_request = cmd->io_request; | ||
1429 | device_id = MEGASAS_DEV_INDEX(instance, scmd); | ||
1430 | pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) | ||
1431 | +scmd->device->id; | ||
1432 | local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; | ||
1433 | |||
1434 | /* Check if this is a system PD I/O */ | ||
1435 | if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) && | ||
1436 | (instance->pd_list[pd_index].driveType == TYPE_DISK)) { | ||
1437 | io_request->Function = 0; | ||
1438 | io_request->DevHandle = | ||
1439 | local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; | ||
1440 | io_request->RaidContext.timeoutValue = | ||
1441 | local_map_ptr->raidMap.fpPdIoTimeoutSec; | ||
1442 | io_request->RaidContext.regLockFlags = 0; | ||
1443 | io_request->RaidContext.regLockRowLBA = 0; | ||
1444 | io_request->RaidContext.regLockLength = 0; | ||
1445 | io_request->RaidContext.RAIDFlags = | ||
1446 | MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << | ||
1447 | MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; | ||
1448 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1449 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | ||
1450 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1451 | } else { | ||
1452 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; | ||
1453 | io_request->DevHandle = device_id; | ||
1454 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1455 | (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << | ||
1456 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1457 | } | ||
1458 | io_request->RaidContext.VirtualDiskTgtId = device_id; | ||
1459 | io_request->LUN[0] = scmd->device->lun; | ||
1460 | io_request->DataLength = scsi_bufflen(scmd); | ||
1461 | } | ||
1462 | |||
1463 | /** | ||
1464 | * megasas_build_io_fusion - Prepares IOs to devices | ||
1465 | * @instance: Adapter soft state | ||
1466 | * @scp: SCSI command | ||
1467 | * @cmd: Command to be prepared | ||
1468 | * | ||
1469 | * Invokes helper functions to prepare request frames | ||
1470 | * and sets flags appropriate for IO/Non-IO cmd | ||
1471 | */ | ||
1472 | int | ||
1473 | megasas_build_io_fusion(struct megasas_instance *instance, | ||
1474 | struct scsi_cmnd *scp, | ||
1475 | struct megasas_cmd_fusion *cmd) | ||
1476 | { | ||
1477 | u32 device_id, sge_count; | ||
1478 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; | ||
1479 | |||
1480 | device_id = MEGASAS_DEV_INDEX(instance, scp); | ||
1481 | |||
1482 | /* Zero out some fields so they don't get reused */ | ||
1483 | io_request->LUN[0] = 0; | ||
1484 | io_request->CDB.EEDP32.PrimaryReferenceTag = 0; | ||
1485 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; | ||
1486 | io_request->EEDPFlags = 0; | ||
1487 | io_request->Control = 0; | ||
1488 | io_request->EEDPBlockSize = 0; | ||
1489 | io_request->IoFlags = 0; | ||
1490 | io_request->RaidContext.RAIDFlags = 0; | ||
1491 | |||
1492 | memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); | ||
1493 | /* | ||
1494 | * Just the CDB length,rest of the Flags are zero | ||
1495 | * This will be modified for FP in build_ldio_fusion | ||
1496 | */ | ||
1497 | io_request->IoFlags = scp->cmd_len; | ||
1498 | |||
1499 | if (megasas_is_ldio(scp)) | ||
1500 | megasas_build_ldio_fusion(instance, scp, cmd); | ||
1501 | else | ||
1502 | megasas_build_dcdb_fusion(instance, scp, cmd); | ||
1503 | |||
1504 | /* | ||
1505 | * Construct SGL | ||
1506 | */ | ||
1507 | |||
1508 | sge_count = | ||
1509 | megasas_make_sgl_fusion(instance, scp, | ||
1510 | (struct MPI25_IEEE_SGE_CHAIN64 *) | ||
1511 | &io_request->SGL, cmd); | ||
1512 | |||
1513 | if (sge_count > instance->max_num_sge) { | ||
1514 | printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds " | ||
1515 | "max (0x%x) allowed\n", sge_count, | ||
1516 | instance->max_num_sge); | ||
1517 | return 1; | ||
1518 | } | ||
1519 | |||
1520 | io_request->RaidContext.numSGE = sge_count; | ||
1521 | |||
1522 | io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; | ||
1523 | |||
1524 | if (scp->sc_data_direction == PCI_DMA_TODEVICE) | ||
1525 | io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; | ||
1526 | else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | ||
1527 | io_request->Control |= MPI2_SCSIIO_CONTROL_READ; | ||
1528 | |||
1529 | io_request->SGLOffset0 = | ||
1530 | offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; | ||
1531 | |||
1532 | io_request->SenseBufferLowAddress = cmd->sense_phys_addr; | ||
1533 | io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; | ||
1534 | |||
1535 | cmd->scmd = scp; | ||
1536 | scp->SCp.ptr = (char *)cmd; | ||
1537 | |||
1538 | return 0; | ||
1539 | } | ||
1540 | |||
1541 | union MEGASAS_REQUEST_DESCRIPTOR_UNION * | ||
1542 | megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) | ||
1543 | { | ||
1544 | u8 *p; | ||
1545 | struct fusion_context *fusion; | ||
1546 | |||
1547 | if (index >= instance->max_fw_cmds) { | ||
1548 | printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " | ||
1549 | "descriptor\n", index); | ||
1550 | return NULL; | ||
1551 | } | ||
1552 | fusion = instance->ctrl_context; | ||
1553 | p = fusion->req_frames_desc | ||
1554 | +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; | ||
1555 | |||
1556 | return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; | ||
1557 | } | ||
1558 | |||
1559 | /** | ||
1560 | * megasas_build_and_issue_cmd_fusion -Main routine for building and | ||
1561 | * issuing non IOCTL cmd | ||
1562 | * @instance: Adapter soft state | ||
1563 | * @scmd: pointer to scsi cmd from OS | ||
1564 | */ | ||
1565 | static u32 | ||
1566 | megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, | ||
1567 | struct scsi_cmnd *scmd) | ||
1568 | { | ||
1569 | struct megasas_cmd_fusion *cmd; | ||
1570 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | ||
1571 | u32 index; | ||
1572 | struct fusion_context *fusion; | ||
1573 | |||
1574 | fusion = instance->ctrl_context; | ||
1575 | |||
1576 | cmd = megasas_get_cmd_fusion(instance); | ||
1577 | if (!cmd) | ||
1578 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1579 | |||
1580 | index = cmd->index; | ||
1581 | |||
1582 | req_desc = megasas_get_request_descriptor(instance, index-1); | ||
1583 | if (!req_desc) | ||
1584 | return 1; | ||
1585 | |||
1586 | req_desc->Words = 0; | ||
1587 | cmd->request_desc = req_desc; | ||
1588 | cmd->request_desc->Words = 0; | ||
1589 | |||
1590 | if (megasas_build_io_fusion(instance, scmd, cmd)) { | ||
1591 | megasas_return_cmd_fusion(instance, cmd); | ||
1592 | printk(KERN_ERR "megasas: Error building command.\n"); | ||
1593 | cmd->request_desc = NULL; | ||
1594 | return 1; | ||
1595 | } | ||
1596 | |||
1597 | req_desc = cmd->request_desc; | ||
1598 | req_desc->SCSIIO.SMID = index; | ||
1599 | |||
1600 | if (cmd->io_request->ChainOffset != 0 && | ||
1601 | cmd->io_request->ChainOffset != 0xF) | ||
1602 | printk(KERN_ERR "megasas: The chain offset value is not " | ||
1603 | "correct : %x\n", cmd->io_request->ChainOffset); | ||
1604 | |||
1605 | /* | ||
1606 | * Issue the command to the FW | ||
1607 | */ | ||
1608 | atomic_inc(&instance->fw_outstanding); | ||
1609 | |||
1610 | instance->instancet->fire_cmd(instance, | ||
1611 | req_desc->u.low, req_desc->u.high, | ||
1612 | instance->reg_set); | ||
1613 | |||
1614 | return 0; | ||
1615 | } | ||
1616 | |||
1617 | /** | ||
1618 | * complete_cmd_fusion - Completes command | ||
1619 | * @instance: Adapter soft state | ||
1620 | * Completes all commands that is in reply descriptor queue | ||
1621 | */ | ||
1622 | int | ||
1623 | complete_cmd_fusion(struct megasas_instance *instance) | ||
1624 | { | ||
1625 | union MPI2_REPLY_DESCRIPTORS_UNION *desc; | ||
1626 | struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; | ||
1627 | struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; | ||
1628 | struct fusion_context *fusion; | ||
1629 | struct megasas_cmd *cmd_mfi; | ||
1630 | struct megasas_cmd_fusion *cmd_fusion; | ||
1631 | u16 smid, num_completed; | ||
1632 | u8 reply_descript_type, arm; | ||
1633 | u32 status, extStatus, device_id; | ||
1634 | union desc_value d_val; | ||
1635 | struct LD_LOAD_BALANCE_INFO *lbinfo; | ||
1636 | |||
1637 | fusion = instance->ctrl_context; | ||
1638 | |||
1639 | if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) | ||
1640 | return IRQ_HANDLED; | ||
1641 | |||
1642 | desc = fusion->reply_frames_desc; | ||
1643 | desc += fusion->last_reply_idx; | ||
1644 | |||
1645 | reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; | ||
1646 | |||
1647 | d_val.word = desc->Words; | ||
1648 | |||
1649 | reply_descript_type = reply_desc->ReplyFlags & | ||
1650 | MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; | ||
1651 | |||
1652 | if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) | ||
1653 | return IRQ_NONE; | ||
1654 | |||
1655 | d_val.word = desc->Words; | ||
1656 | |||
1657 | num_completed = 0; | ||
1658 | |||
1659 | while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { | ||
1660 | smid = reply_desc->SMID; | ||
1661 | |||
1662 | cmd_fusion = fusion->cmd_list[smid - 1]; | ||
1663 | |||
1664 | scsi_io_req = | ||
1665 | (struct MPI2_RAID_SCSI_IO_REQUEST *) | ||
1666 | cmd_fusion->io_request; | ||
1667 | |||
1668 | if (cmd_fusion->scmd) | ||
1669 | cmd_fusion->scmd->SCp.ptr = NULL; | ||
1670 | |||
1671 | status = scsi_io_req->RaidContext.status; | ||
1672 | extStatus = scsi_io_req->RaidContext.exStatus; | ||
1673 | |||
1674 | switch (scsi_io_req->Function) { | ||
1675 | case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ | ||
1676 | /* Update load balancing info */ | ||
1677 | device_id = MEGASAS_DEV_INDEX(instance, | ||
1678 | cmd_fusion->scmd); | ||
1679 | lbinfo = &fusion->load_balance_info[device_id]; | ||
1680 | if (cmd_fusion->scmd->SCp.Status & | ||
1681 | MEGASAS_LOAD_BALANCE_FLAG) { | ||
1682 | arm = lbinfo->raid1DevHandle[0] == | ||
1683 | cmd_fusion->io_request->DevHandle ? 0 : | ||
1684 | 1; | ||
1685 | atomic_dec(&lbinfo->scsi_pending_cmds[arm]); | ||
1686 | cmd_fusion->scmd->SCp.Status &= | ||
1687 | ~MEGASAS_LOAD_BALANCE_FLAG; | ||
1688 | } | ||
1689 | if (reply_descript_type == | ||
1690 | MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { | ||
1691 | if (megasas_dbg_lvl == 5) | ||
1692 | printk(KERN_ERR "\nmegasas: FAST Path " | ||
1693 | "IO Success\n"); | ||
1694 | } | ||
1695 | /* Fall thru and complete IO */ | ||
1696 | case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ | ||
1697 | /* Map the FW Cmd Status */ | ||
1698 | map_cmd_status(cmd_fusion, status, extStatus); | ||
1699 | scsi_dma_unmap(cmd_fusion->scmd); | ||
1700 | cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); | ||
1701 | scsi_io_req->RaidContext.status = 0; | ||
1702 | scsi_io_req->RaidContext.exStatus = 0; | ||
1703 | megasas_return_cmd_fusion(instance, cmd_fusion); | ||
1704 | atomic_dec(&instance->fw_outstanding); | ||
1705 | |||
1706 | break; | ||
1707 | case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ | ||
1708 | cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; | ||
1709 | megasas_complete_cmd(instance, cmd_mfi, DID_OK); | ||
1710 | cmd_fusion->flags = 0; | ||
1711 | megasas_return_cmd_fusion(instance, cmd_fusion); | ||
1712 | |||
1713 | break; | ||
1714 | } | ||
1715 | |||
1716 | fusion->last_reply_idx++; | ||
1717 | if (fusion->last_reply_idx >= fusion->reply_q_depth) | ||
1718 | fusion->last_reply_idx = 0; | ||
1719 | |||
1720 | desc->Words = ULLONG_MAX; | ||
1721 | num_completed++; | ||
1722 | |||
1723 | /* Get the next reply descriptor */ | ||
1724 | if (!fusion->last_reply_idx) | ||
1725 | desc = fusion->reply_frames_desc; | ||
1726 | else | ||
1727 | desc++; | ||
1728 | |||
1729 | reply_desc = | ||
1730 | (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; | ||
1731 | |||
1732 | d_val.word = desc->Words; | ||
1733 | |||
1734 | reply_descript_type = reply_desc->ReplyFlags & | ||
1735 | MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; | ||
1736 | |||
1737 | if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) | ||
1738 | break; | ||
1739 | } | ||
1740 | |||
1741 | if (!num_completed) | ||
1742 | return IRQ_NONE; | ||
1743 | |||
1744 | wmb(); | ||
1745 | writel(fusion->last_reply_idx, | ||
1746 | &instance->reg_set->reply_post_host_index); | ||
1747 | |||
1748 | return IRQ_HANDLED; | ||
1749 | } | ||
1750 | |||
1751 | /** | ||
1752 | * megasas_complete_cmd_dpc_fusion - Completes command | ||
1753 | * @instance: Adapter soft state | ||
1754 | * | ||
1755 | * Tasklet to complete cmds | ||
1756 | */ | ||
1757 | void | ||
1758 | megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) | ||
1759 | { | ||
1760 | struct megasas_instance *instance = | ||
1761 | (struct megasas_instance *)instance_addr; | ||
1762 | unsigned long flags; | ||
1763 | |||
1764 | /* If we have already declared adapter dead, donot complete cmds */ | ||
1765 | spin_lock_irqsave(&instance->hba_lock, flags); | ||
1766 | if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { | ||
1767 | spin_unlock_irqrestore(&instance->hba_lock, flags); | ||
1768 | return; | ||
1769 | } | ||
1770 | spin_unlock_irqrestore(&instance->hba_lock, flags); | ||
1771 | |||
1772 | spin_lock_irqsave(&instance->completion_lock, flags); | ||
1773 | complete_cmd_fusion(instance); | ||
1774 | spin_unlock_irqrestore(&instance->completion_lock, flags); | ||
1775 | } | ||
1776 | |||
1777 | /** | ||
1778 | * megasas_isr_fusion - isr entry point | ||
1779 | */ | ||
1780 | irqreturn_t megasas_isr_fusion(int irq, void *devp) | ||
1781 | { | ||
1782 | struct megasas_instance *instance = (struct megasas_instance *)devp; | ||
1783 | u32 mfiStatus, fw_state; | ||
1784 | |||
1785 | if (!instance->msi_flag) { | ||
1786 | mfiStatus = instance->instancet->clear_intr(instance->reg_set); | ||
1787 | if (!mfiStatus) | ||
1788 | return IRQ_NONE; | ||
1789 | } | ||
1790 | |||
1791 | /* If we are resetting, bail */ | ||
1792 | if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) | ||
1793 | return IRQ_HANDLED; | ||
1794 | |||
1795 | if (!complete_cmd_fusion(instance)) { | ||
1796 | /* If we didn't complete any commands, check for FW fault */ | ||
1797 | fw_state = instance->instancet->read_fw_status_reg( | ||
1798 | instance->reg_set) & MFI_STATE_MASK; | ||
1799 | if (fw_state == MFI_STATE_FAULT) | ||
1800 | schedule_work(&instance->work_init); | ||
1801 | } | ||
1802 | |||
1803 | return IRQ_HANDLED; | ||
1804 | } | ||
1805 | |||
1806 | /** | ||
1807 | * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru | ||
1808 | * @instance: Adapter soft state | ||
1809 | * mfi_cmd: megasas_cmd pointer | ||
1810 | * | ||
1811 | */ | ||
1812 | u8 | ||
1813 | build_mpt_mfi_pass_thru(struct megasas_instance *instance, | ||
1814 | struct megasas_cmd *mfi_cmd) | ||
1815 | { | ||
1816 | struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; | ||
1817 | struct MPI2_RAID_SCSI_IO_REQUEST *io_req; | ||
1818 | struct megasas_cmd_fusion *cmd; | ||
1819 | struct fusion_context *fusion; | ||
1820 | struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; | ||
1821 | |||
1822 | cmd = megasas_get_cmd_fusion(instance); | ||
1823 | if (!cmd) | ||
1824 | return 1; | ||
1825 | |||
1826 | /* Save the smid. To be used for returning the cmd */ | ||
1827 | mfi_cmd->context.smid = cmd->index; | ||
1828 | |||
1829 | cmd->sync_cmd_idx = mfi_cmd->index; | ||
1830 | |||
1831 | /* | ||
1832 | * For cmds where the flag is set, store the flag and check | ||
1833 | * on completion. For cmds with this flag, don't call | ||
1834 | * megasas_complete_cmd | ||
1835 | */ | ||
1836 | |||
1837 | if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) | ||
1838 | cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; | ||
1839 | |||
1840 | fusion = instance->ctrl_context; | ||
1841 | io_req = cmd->io_request; | ||
1842 | mpi25_ieee_chain = | ||
1843 | (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; | ||
1844 | |||
1845 | io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; | ||
1846 | io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, | ||
1847 | SGL) / 4; | ||
1848 | io_req->ChainOffset = fusion->chain_offset_mfi_pthru; | ||
1849 | |||
1850 | mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; | ||
1851 | |||
1852 | mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | | ||
1853 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; | ||
1854 | |||
1855 | mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; | ||
1856 | |||
1857 | return 0; | ||
1858 | } | ||
1859 | |||
1860 | /** | ||
1861 | * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd | ||
1862 | * @instance: Adapter soft state | ||
1863 | * @cmd: mfi cmd to build | ||
1864 | * | ||
1865 | */ | ||
1866 | union MEGASAS_REQUEST_DESCRIPTOR_UNION * | ||
1867 | build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) | ||
1868 | { | ||
1869 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | ||
1870 | u16 index; | ||
1871 | |||
1872 | if (build_mpt_mfi_pass_thru(instance, cmd)) { | ||
1873 | printk(KERN_ERR "Couldn't build MFI pass thru cmd\n"); | ||
1874 | return NULL; | ||
1875 | } | ||
1876 | |||
1877 | index = cmd->context.smid; | ||
1878 | |||
1879 | req_desc = megasas_get_request_descriptor(instance, index - 1); | ||
1880 | |||
1881 | if (!req_desc) | ||
1882 | return NULL; | ||
1883 | |||
1884 | req_desc->Words = 0; | ||
1885 | req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << | ||
1886 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1887 | |||
1888 | req_desc->SCSIIO.SMID = index; | ||
1889 | |||
1890 | return req_desc; | ||
1891 | } | ||
1892 | |||
1893 | /** | ||
1894 | * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd | ||
1895 | * @instance: Adapter soft state | ||
1896 | * @cmd: mfi cmd pointer | ||
1897 | * | ||
1898 | */ | ||
1899 | void | ||
1900 | megasas_issue_dcmd_fusion(struct megasas_instance *instance, | ||
1901 | struct megasas_cmd *cmd) | ||
1902 | { | ||
1903 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | ||
1904 | union desc_value d_val; | ||
1905 | |||
1906 | req_desc = build_mpt_cmd(instance, cmd); | ||
1907 | if (!req_desc) { | ||
1908 | printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); | ||
1909 | return; | ||
1910 | } | ||
1911 | d_val.word = req_desc->Words; | ||
1912 | |||
1913 | instance->instancet->fire_cmd(instance, req_desc->u.low, | ||
1914 | req_desc->u.high, instance->reg_set); | ||
1915 | } | ||
1916 | |||
1917 | /** | ||
1918 | * megasas_release_fusion - Reverses the FW initialization | ||
1919 | * @intance: Adapter soft state | ||
1920 | */ | ||
1921 | void | ||
1922 | megasas_release_fusion(struct megasas_instance *instance) | ||
1923 | { | ||
1924 | megasas_free_cmds(instance); | ||
1925 | megasas_free_cmds_fusion(instance); | ||
1926 | |||
1927 | iounmap(instance->reg_set); | ||
1928 | |||
1929 | pci_release_selected_regions(instance->pdev, instance->bar); | ||
1930 | } | ||
1931 | |||
1932 | /** | ||
1933 | * megasas_read_fw_status_reg_fusion - returns the current FW status value | ||
1934 | * @regs: MFI register set | ||
1935 | */ | ||
1936 | static u32 | ||
1937 | megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) | ||
1938 | { | ||
1939 | return readl(&(regs)->outbound_scratch_pad); | ||
1940 | } | ||
1941 | |||
1942 | /** | ||
1943 | * megasas_adp_reset_fusion - For controller reset | ||
1944 | * @regs: MFI register set | ||
1945 | */ | ||
1946 | static int | ||
1947 | megasas_adp_reset_fusion(struct megasas_instance *instance, | ||
1948 | struct megasas_register_set __iomem *regs) | ||
1949 | { | ||
1950 | return 0; | ||
1951 | } | ||
1952 | |||
1953 | /** | ||
1954 | * megasas_check_reset_fusion - For controller reset check | ||
1955 | * @regs: MFI register set | ||
1956 | */ | ||
1957 | static int | ||
1958 | megasas_check_reset_fusion(struct megasas_instance *instance, | ||
1959 | struct megasas_register_set __iomem *regs) | ||
1960 | { | ||
1961 | return 0; | ||
1962 | } | ||
1963 | |||
1964 | /* This function waits for outstanding commands on fusion to complete */ | ||
1965 | int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) | ||
1966 | { | ||
1967 | int i, outstanding, retval = 0; | ||
1968 | u32 fw_state, wait_time = MEGASAS_RESET_WAIT_TIME; | ||
1969 | |||
1970 | for (i = 0; i < wait_time; i++) { | ||
1971 | /* Check if firmware is in fault state */ | ||
1972 | fw_state = instance->instancet->read_fw_status_reg( | ||
1973 | instance->reg_set) & MFI_STATE_MASK; | ||
1974 | if (fw_state == MFI_STATE_FAULT) { | ||
1975 | printk(KERN_WARNING "megasas: Found FW in FAULT state," | ||
1976 | " will reset adapter.\n"); | ||
1977 | retval = 1; | ||
1978 | goto out; | ||
1979 | } | ||
1980 | |||
1981 | outstanding = atomic_read(&instance->fw_outstanding); | ||
1982 | if (!outstanding) | ||
1983 | goto out; | ||
1984 | |||
1985 | if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { | ||
1986 | printk(KERN_NOTICE "megasas: [%2d]waiting for %d " | ||
1987 | "commands to complete\n", i, outstanding); | ||
1988 | megasas_complete_cmd_dpc_fusion( | ||
1989 | (unsigned long)instance); | ||
1990 | } | ||
1991 | msleep(1000); | ||
1992 | } | ||
1993 | |||
1994 | if (atomic_read(&instance->fw_outstanding)) { | ||
1995 | printk("megaraid_sas: pending commands remain after waiting, " | ||
1996 | "will reset adapter.\n"); | ||
1997 | retval = 1; | ||
1998 | } | ||
1999 | out: | ||
2000 | return retval; | ||
2001 | } | ||
2002 | |||
2003 | void megasas_reset_reply_desc(struct megasas_instance *instance) | ||
2004 | { | ||
2005 | int i; | ||
2006 | struct fusion_context *fusion; | ||
2007 | union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; | ||
2008 | |||
2009 | fusion = instance->ctrl_context; | ||
2010 | fusion->last_reply_idx = 0; | ||
2011 | reply_desc = fusion->reply_frames_desc; | ||
2012 | for (i = 0 ; i < fusion->reply_q_depth; i++, reply_desc++) | ||
2013 | reply_desc->Words = ULLONG_MAX; | ||
2014 | } | ||
2015 | |||
2016 | /* Core fusion reset function */ | ||
2017 | int megasas_reset_fusion(struct Scsi_Host *shost) | ||
2018 | { | ||
2019 | int retval = SUCCESS, i, j, retry = 0; | ||
2020 | struct megasas_instance *instance; | ||
2021 | struct megasas_cmd_fusion *cmd_fusion; | ||
2022 | struct fusion_context *fusion; | ||
2023 | struct megasas_cmd *cmd_mfi; | ||
2024 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; | ||
2025 | u32 host_diag, abs_state; | ||
2026 | |||
2027 | instance = (struct megasas_instance *)shost->hostdata; | ||
2028 | fusion = instance->ctrl_context; | ||
2029 | |||
2030 | mutex_lock(&instance->reset_mutex); | ||
2031 | set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); | ||
2032 | instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; | ||
2033 | instance->instancet->disable_intr(instance->reg_set); | ||
2034 | msleep(1000); | ||
2035 | |||
2036 | if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { | ||
2037 | printk(KERN_WARNING "megaraid_sas: Hardware critical error, " | ||
2038 | "returning FAILED.\n"); | ||
2039 | retval = FAILED; | ||
2040 | goto out; | ||
2041 | } | ||
2042 | |||
2043 | /* First try waiting for commands to complete */ | ||
2044 | if (megasas_wait_for_outstanding_fusion(instance)) { | ||
2045 | printk(KERN_WARNING "megaraid_sas: resetting fusion " | ||
2046 | "adapter.\n"); | ||
2047 | /* Now return commands back to the OS */ | ||
2048 | for (i = 0 ; i < instance->max_fw_cmds; i++) { | ||
2049 | cmd_fusion = fusion->cmd_list[i]; | ||
2050 | if (cmd_fusion->scmd) { | ||
2051 | scsi_dma_unmap(cmd_fusion->scmd); | ||
2052 | cmd_fusion->scmd->result = (DID_RESET << 16); | ||
2053 | cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); | ||
2054 | megasas_return_cmd_fusion(instance, cmd_fusion); | ||
2055 | atomic_dec(&instance->fw_outstanding); | ||
2056 | } | ||
2057 | } | ||
2058 | |||
2059 | if (instance->disableOnlineCtrlReset == 1) { | ||
2060 | /* Reset not supported, kill adapter */ | ||
2061 | printk(KERN_WARNING "megaraid_sas: Reset not supported" | ||
2062 | ", killing adapter.\n"); | ||
2063 | megaraid_sas_kill_hba(instance); | ||
2064 | instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; | ||
2065 | retval = FAILED; | ||
2066 | goto out; | ||
2067 | } | ||
2068 | |||
2069 | /* Now try to reset the chip */ | ||
2070 | for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { | ||
2071 | writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, | ||
2072 | &instance->reg_set->fusion_seq_offset); | ||
2073 | writel(MPI2_WRSEQ_1ST_KEY_VALUE, | ||
2074 | &instance->reg_set->fusion_seq_offset); | ||
2075 | writel(MPI2_WRSEQ_2ND_KEY_VALUE, | ||
2076 | &instance->reg_set->fusion_seq_offset); | ||
2077 | writel(MPI2_WRSEQ_3RD_KEY_VALUE, | ||
2078 | &instance->reg_set->fusion_seq_offset); | ||
2079 | writel(MPI2_WRSEQ_4TH_KEY_VALUE, | ||
2080 | &instance->reg_set->fusion_seq_offset); | ||
2081 | writel(MPI2_WRSEQ_5TH_KEY_VALUE, | ||
2082 | &instance->reg_set->fusion_seq_offset); | ||
2083 | writel(MPI2_WRSEQ_6TH_KEY_VALUE, | ||
2084 | &instance->reg_set->fusion_seq_offset); | ||
2085 | |||
2086 | /* Check that the diag write enable (DRWE) bit is on */ | ||
2087 | host_diag = readl(&instance->reg_set->fusion_host_diag); | ||
2088 | while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { | ||
2089 | msleep(100); | ||
2090 | host_diag = | ||
2091 | readl(&instance->reg_set->fusion_host_diag); | ||
2092 | if (retry++ == 100) { | ||
2093 | printk(KERN_WARNING "megaraid_sas: " | ||
2094 | "Host diag unlock failed!\n"); | ||
2095 | break; | ||
2096 | } | ||
2097 | } | ||
2098 | if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) | ||
2099 | continue; | ||
2100 | |||
2101 | /* Send chip reset command */ | ||
2102 | writel(host_diag | HOST_DIAG_RESET_ADAPTER, | ||
2103 | &instance->reg_set->fusion_host_diag); | ||
2104 | msleep(3000); | ||
2105 | |||
2106 | /* Make sure reset adapter bit is cleared */ | ||
2107 | host_diag = readl(&instance->reg_set->fusion_host_diag); | ||
2108 | retry = 0; | ||
2109 | while (host_diag & HOST_DIAG_RESET_ADAPTER) { | ||
2110 | msleep(100); | ||
2111 | host_diag = | ||
2112 | readl(&instance->reg_set->fusion_host_diag); | ||
2113 | if (retry++ == 1000) { | ||
2114 | printk(KERN_WARNING "megaraid_sas: " | ||
2115 | "Diag reset adapter never " | ||
2116 | "cleared!\n"); | ||
2117 | break; | ||
2118 | } | ||
2119 | } | ||
2120 | if (host_diag & HOST_DIAG_RESET_ADAPTER) | ||
2121 | continue; | ||
2122 | |||
2123 | abs_state = | ||
2124 | instance->instancet->read_fw_status_reg( | ||
2125 | instance->reg_set); | ||
2126 | retry = 0; | ||
2127 | |||
2128 | while ((abs_state <= MFI_STATE_FW_INIT) && | ||
2129 | (retry++ < 1000)) { | ||
2130 | msleep(100); | ||
2131 | abs_state = | ||
2132 | instance->instancet->read_fw_status_reg( | ||
2133 | instance->reg_set); | ||
2134 | } | ||
2135 | if (abs_state <= MFI_STATE_FW_INIT) { | ||
2136 | printk(KERN_WARNING "megaraid_sas: firmware " | ||
2137 | "state < MFI_STATE_FW_INIT, state = " | ||
2138 | "0x%x\n", abs_state); | ||
2139 | continue; | ||
2140 | } | ||
2141 | |||
2142 | /* Wait for FW to become ready */ | ||
2143 | if (megasas_transition_to_ready(instance)) { | ||
2144 | printk(KERN_WARNING "megaraid_sas: Failed to " | ||
2145 | "transition controller to ready.\n"); | ||
2146 | continue; | ||
2147 | } | ||
2148 | |||
2149 | megasas_reset_reply_desc(instance); | ||
2150 | if (megasas_ioc_init_fusion(instance)) { | ||
2151 | printk(KERN_WARNING "megaraid_sas: " | ||
2152 | "megasas_ioc_init_fusion() failed!\n"); | ||
2153 | continue; | ||
2154 | } | ||
2155 | |||
2156 | instance->instancet->enable_intr(instance->reg_set); | ||
2157 | instance->adprecovery = MEGASAS_HBA_OPERATIONAL; | ||
2158 | |||
2159 | /* Re-fire management commands */ | ||
2160 | for (j = 0 ; j < instance->max_fw_cmds; j++) { | ||
2161 | cmd_fusion = fusion->cmd_list[j]; | ||
2162 | if (cmd_fusion->sync_cmd_idx != | ||
2163 | (u32)ULONG_MAX) { | ||
2164 | cmd_mfi = | ||
2165 | instance-> | ||
2166 | cmd_list[cmd_fusion->sync_cmd_idx]; | ||
2167 | if (cmd_mfi->frame->dcmd.opcode == | ||
2168 | MR_DCMD_LD_MAP_GET_INFO) { | ||
2169 | megasas_return_cmd(instance, | ||
2170 | cmd_mfi); | ||
2171 | megasas_return_cmd_fusion( | ||
2172 | instance, cmd_fusion); | ||
2173 | } else { | ||
2174 | req_desc = | ||
2175 | megasas_get_request_descriptor( | ||
2176 | instance, | ||
2177 | cmd_mfi->context.smid | ||
2178 | -1); | ||
2179 | if (!req_desc) | ||
2180 | printk(KERN_WARNING | ||
2181 | "req_desc NULL" | ||
2182 | "\n"); | ||
2183 | else { | ||
2184 | instance->instancet-> | ||
2185 | fire_cmd(instance, | ||
2186 | req_desc-> | ||
2187 | u.low, | ||
2188 | req_desc-> | ||
2189 | u.high, | ||
2190 | instance-> | ||
2191 | reg_set); | ||
2192 | } | ||
2193 | } | ||
2194 | } | ||
2195 | } | ||
2196 | |||
2197 | /* Reset load balance info */ | ||
2198 | memset(fusion->load_balance_info, 0, | ||
2199 | sizeof(struct LD_LOAD_BALANCE_INFO) | ||
2200 | *MAX_LOGICAL_DRIVES); | ||
2201 | |||
2202 | if (!megasas_get_map_info(instance)) | ||
2203 | megasas_sync_map_info(instance); | ||
2204 | |||
2205 | /* Adapter reset completed successfully */ | ||
2206 | printk(KERN_WARNING "megaraid_sas: Reset " | ||
2207 | "successful.\n"); | ||
2208 | retval = SUCCESS; | ||
2209 | goto out; | ||
2210 | } | ||
2211 | /* Reset failed, kill the adapter */ | ||
2212 | printk(KERN_WARNING "megaraid_sas: Reset failed, killing " | ||
2213 | "adapter.\n"); | ||
2214 | megaraid_sas_kill_hba(instance); | ||
2215 | retval = FAILED; | ||
2216 | } else { | ||
2217 | instance->instancet->enable_intr(instance->reg_set); | ||
2218 | instance->adprecovery = MEGASAS_HBA_OPERATIONAL; | ||
2219 | } | ||
2220 | out: | ||
2221 | clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); | ||
2222 | mutex_unlock(&instance->reset_mutex); | ||
2223 | return retval; | ||
2224 | } | ||
2225 | |||
2226 | /* Fusion OCR work queue */ | ||
2227 | void megasas_fusion_ocr_wq(struct work_struct *work) | ||
2228 | { | ||
2229 | struct megasas_instance *instance = | ||
2230 | container_of(work, struct megasas_instance, work_init); | ||
2231 | |||
2232 | megasas_reset_fusion(instance->host); | ||
2233 | } | ||
2234 | |||
2235 | struct megasas_instance_template megasas_instance_template_fusion = { | ||
2236 | .fire_cmd = megasas_fire_cmd_fusion, | ||
2237 | .enable_intr = megasas_enable_intr_fusion, | ||
2238 | .disable_intr = megasas_disable_intr_fusion, | ||
2239 | .clear_intr = megasas_clear_intr_fusion, | ||
2240 | .read_fw_status_reg = megasas_read_fw_status_reg_fusion, | ||
2241 | .adp_reset = megasas_adp_reset_fusion, | ||
2242 | .check_reset = megasas_check_reset_fusion, | ||
2243 | .service_isr = megasas_isr_fusion, | ||
2244 | .tasklet = megasas_complete_cmd_dpc_fusion, | ||
2245 | .init_adapter = megasas_init_adapter_fusion, | ||
2246 | .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, | ||
2247 | .issue_dcmd = megasas_issue_dcmd_fusion, | ||
2248 | }; | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h new file mode 100644 index 000000000000..82b577a72c8b --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h | |||
@@ -0,0 +1,695 @@ | |||
1 | /* | ||
2 | * Linux MegaRAID driver for SAS based RAID controllers | ||
3 | * | ||
4 | * Copyright (c) 2009-2011 LSI Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version 2 | ||
9 | * of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | * FILE: megaraid_sas_fusion.h | ||
21 | * | ||
22 | * Authors: LSI Corporation | ||
23 | * Manoj Jose | ||
24 | * Sumant Patro | ||
25 | * | ||
26 | * Send feedback to: <megaraidlinux@lsi.com> | ||
27 | * | ||
28 | * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 | ||
29 | * ATTN: Linuxraid | ||
30 | */ | ||
31 | |||
32 | #ifndef _MEGARAID_SAS_FUSION_H_ | ||
33 | #define _MEGARAID_SAS_FUSION_H_ | ||
34 | |||
35 | /* Fusion defines */ | ||
36 | #define MEGASAS_MAX_SZ_CHAIN_FRAME 1024 | ||
37 | #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009) | ||
38 | #define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256 | ||
39 | #define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0 | ||
40 | #define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1 | ||
41 | #define MEGASAS_LOAD_BALANCE_FLAG 0x1 | ||
42 | #define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1 | ||
43 | #define HOST_DIAG_WRITE_ENABLE 0x80 | ||
44 | #define HOST_DIAG_RESET_ADAPTER 0x4 | ||
45 | #define MEGASAS_FUSION_MAX_RESET_TRIES 3 | ||
46 | |||
47 | /* T10 PI defines */ | ||
48 | #define MR_PROT_INFO_TYPE_CONTROLLER 0x8 | ||
49 | #define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f | ||
50 | #define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9 | ||
51 | #define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB | ||
52 | #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18 | ||
53 | #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20 | ||
54 | #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60 | ||
55 | #define MEGASAS_EEDPBLOCKSIZE 512 | ||
56 | |||
57 | /* | ||
58 | * Raid context flags | ||
59 | */ | ||
60 | |||
61 | #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4 | ||
62 | #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30 | ||
63 | enum MR_RAID_FLAGS_IO_SUB_TYPE { | ||
64 | MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0, | ||
65 | MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1, | ||
66 | }; | ||
67 | |||
68 | /* | ||
69 | * Request descriptor types | ||
70 | */ | ||
71 | #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7 | ||
72 | #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1 | ||
73 | |||
74 | #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 | ||
75 | |||
76 | #define MEGASAS_FP_CMD_LEN 16 | ||
77 | #define MEGASAS_FUSION_IN_RESET 0 | ||
78 | |||
79 | /* | ||
80 | * Raid Context structure which describes MegaRAID specific IO Paramenters | ||
81 | * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames | ||
82 | */ | ||
83 | |||
84 | struct RAID_CONTEXT { | ||
85 | u16 resvd0; | ||
86 | u16 timeoutValue; | ||
87 | u8 regLockFlags; | ||
88 | u8 resvd1; | ||
89 | u16 VirtualDiskTgtId; | ||
90 | u64 regLockRowLBA; | ||
91 | u32 regLockLength; | ||
92 | u16 nextLMId; | ||
93 | u8 exStatus; | ||
94 | u8 status; | ||
95 | u8 RAIDFlags; | ||
96 | u8 numSGE; | ||
97 | u16 configSeqNum; | ||
98 | u8 spanArm; | ||
99 | u8 resvd2[3]; | ||
100 | }; | ||
101 | |||
102 | #define RAID_CTX_SPANARM_ARM_SHIFT (0) | ||
103 | #define RAID_CTX_SPANARM_ARM_MASK (0x1f) | ||
104 | |||
105 | #define RAID_CTX_SPANARM_SPAN_SHIFT (5) | ||
106 | #define RAID_CTX_SPANARM_SPAN_MASK (0xE0) | ||
107 | |||
108 | /* | ||
109 | * define region lock types | ||
110 | */ | ||
111 | enum REGION_TYPE { | ||
112 | REGION_TYPE_UNUSED = 0, | ||
113 | REGION_TYPE_SHARED_READ = 1, | ||
114 | REGION_TYPE_SHARED_WRITE = 2, | ||
115 | REGION_TYPE_EXCLUSIVE = 3, | ||
116 | }; | ||
117 | |||
118 | /* MPI2 defines */ | ||
119 | #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */ | ||
120 | #define MPI2_WHOINIT_HOST_DRIVER (0x04) | ||
121 | #define MPI2_VERSION_MAJOR (0x02) | ||
122 | #define MPI2_VERSION_MINOR (0x00) | ||
123 | #define MPI2_VERSION_MAJOR_MASK (0xFF00) | ||
124 | #define MPI2_VERSION_MAJOR_SHIFT (8) | ||
125 | #define MPI2_VERSION_MINOR_MASK (0x00FF) | ||
126 | #define MPI2_VERSION_MINOR_SHIFT (0) | ||
127 | #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ | ||
128 | MPI2_VERSION_MINOR) | ||
129 | #define MPI2_HEADER_VERSION_UNIT (0x10) | ||
130 | #define MPI2_HEADER_VERSION_DEV (0x00) | ||
131 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | ||
132 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | ||
133 | #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) | ||
134 | #define MPI2_HEADER_VERSION_DEV_SHIFT (0) | ||
135 | #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ | ||
136 | MPI2_HEADER_VERSION_DEV) | ||
137 | #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) | ||
138 | #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) | ||
139 | #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) | ||
140 | #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) | ||
141 | #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) | ||
142 | #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) | ||
143 | #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) | ||
144 | #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */ | ||
145 | #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) | ||
146 | #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) | ||
147 | #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) | ||
148 | #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) | ||
149 | #define MPI2_SCSIIO_CONTROL_READ (0x02000000) | ||
150 | #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) | ||
151 | #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) | ||
152 | #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) | ||
153 | #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) | ||
154 | #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) | ||
155 | #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) | ||
156 | #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) | ||
157 | #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) | ||
158 | #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) | ||
159 | #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) | ||
160 | #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) | ||
161 | #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) | ||
162 | |||
163 | struct MPI25_IEEE_SGE_CHAIN64 { | ||
164 | u64 Address; | ||
165 | u32 Length; | ||
166 | u16 Reserved1; | ||
167 | u8 NextChainOffset; | ||
168 | u8 Flags; | ||
169 | }; | ||
170 | |||
171 | struct MPI2_SGE_SIMPLE_UNION { | ||
172 | u32 FlagsLength; | ||
173 | union { | ||
174 | u32 Address32; | ||
175 | u64 Address64; | ||
176 | } u; | ||
177 | }; | ||
178 | |||
179 | struct MPI2_SCSI_IO_CDB_EEDP32 { | ||
180 | u8 CDB[20]; /* 0x00 */ | ||
181 | u32 PrimaryReferenceTag; /* 0x14 */ | ||
182 | u16 PrimaryApplicationTag; /* 0x18 */ | ||
183 | u16 PrimaryApplicationTagMask; /* 0x1A */ | ||
184 | u32 TransferLength; /* 0x1C */ | ||
185 | }; | ||
186 | |||
187 | struct MPI2_SGE_CHAIN_UNION { | ||
188 | u16 Length; | ||
189 | u8 NextChainOffset; | ||
190 | u8 Flags; | ||
191 | union { | ||
192 | u32 Address32; | ||
193 | u64 Address64; | ||
194 | } u; | ||
195 | }; | ||
196 | |||
197 | struct MPI2_IEEE_SGE_SIMPLE32 { | ||
198 | u32 Address; | ||
199 | u32 FlagsLength; | ||
200 | }; | ||
201 | |||
202 | struct MPI2_IEEE_SGE_CHAIN32 { | ||
203 | u32 Address; | ||
204 | u32 FlagsLength; | ||
205 | }; | ||
206 | |||
207 | struct MPI2_IEEE_SGE_SIMPLE64 { | ||
208 | u64 Address; | ||
209 | u32 Length; | ||
210 | u16 Reserved1; | ||
211 | u8 Reserved2; | ||
212 | u8 Flags; | ||
213 | }; | ||
214 | |||
215 | struct MPI2_IEEE_SGE_CHAIN64 { | ||
216 | u64 Address; | ||
217 | u32 Length; | ||
218 | u16 Reserved1; | ||
219 | u8 Reserved2; | ||
220 | u8 Flags; | ||
221 | }; | ||
222 | |||
223 | union MPI2_IEEE_SGE_SIMPLE_UNION { | ||
224 | struct MPI2_IEEE_SGE_SIMPLE32 Simple32; | ||
225 | struct MPI2_IEEE_SGE_SIMPLE64 Simple64; | ||
226 | }; | ||
227 | |||
228 | union MPI2_IEEE_SGE_CHAIN_UNION { | ||
229 | struct MPI2_IEEE_SGE_CHAIN32 Chain32; | ||
230 | struct MPI2_IEEE_SGE_CHAIN64 Chain64; | ||
231 | }; | ||
232 | |||
233 | union MPI2_SGE_IO_UNION { | ||
234 | struct MPI2_SGE_SIMPLE_UNION MpiSimple; | ||
235 | struct MPI2_SGE_CHAIN_UNION MpiChain; | ||
236 | union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; | ||
237 | union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; | ||
238 | }; | ||
239 | |||
240 | union MPI2_SCSI_IO_CDB_UNION { | ||
241 | u8 CDB32[32]; | ||
242 | struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32; | ||
243 | struct MPI2_SGE_SIMPLE_UNION SGE; | ||
244 | }; | ||
245 | |||
246 | /* | ||
247 | * RAID SCSI IO Request Message | ||
248 | * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST | ||
249 | */ | ||
250 | struct MPI2_RAID_SCSI_IO_REQUEST { | ||
251 | u16 DevHandle; /* 0x00 */ | ||
252 | u8 ChainOffset; /* 0x02 */ | ||
253 | u8 Function; /* 0x03 */ | ||
254 | u16 Reserved1; /* 0x04 */ | ||
255 | u8 Reserved2; /* 0x06 */ | ||
256 | u8 MsgFlags; /* 0x07 */ | ||
257 | u8 VP_ID; /* 0x08 */ | ||
258 | u8 VF_ID; /* 0x09 */ | ||
259 | u16 Reserved3; /* 0x0A */ | ||
260 | u32 SenseBufferLowAddress; /* 0x0C */ | ||
261 | u16 SGLFlags; /* 0x10 */ | ||
262 | u8 SenseBufferLength; /* 0x12 */ | ||
263 | u8 Reserved4; /* 0x13 */ | ||
264 | u8 SGLOffset0; /* 0x14 */ | ||
265 | u8 SGLOffset1; /* 0x15 */ | ||
266 | u8 SGLOffset2; /* 0x16 */ | ||
267 | u8 SGLOffset3; /* 0x17 */ | ||
268 | u32 SkipCount; /* 0x18 */ | ||
269 | u32 DataLength; /* 0x1C */ | ||
270 | u32 BidirectionalDataLength; /* 0x20 */ | ||
271 | u16 IoFlags; /* 0x24 */ | ||
272 | u16 EEDPFlags; /* 0x26 */ | ||
273 | u32 EEDPBlockSize; /* 0x28 */ | ||
274 | u32 SecondaryReferenceTag; /* 0x2C */ | ||
275 | u16 SecondaryApplicationTag; /* 0x30 */ | ||
276 | u16 ApplicationTagTranslationMask; /* 0x32 */ | ||
277 | u8 LUN[8]; /* 0x34 */ | ||
278 | u32 Control; /* 0x3C */ | ||
279 | union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ | ||
280 | struct RAID_CONTEXT RaidContext; /* 0x60 */ | ||
281 | union MPI2_SGE_IO_UNION SGL; /* 0x80 */ | ||
282 | }; | ||
283 | |||
284 | /* | ||
285 | * MPT RAID MFA IO Descriptor. | ||
286 | */ | ||
287 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { | ||
288 | u32 RequestFlags:8; | ||
289 | u32 MessageAddress1:24; /* bits 31:8*/ | ||
290 | u32 MessageAddress2; /* bits 61:32 */ | ||
291 | }; | ||
292 | |||
293 | /* Default Request Descriptor */ | ||
294 | struct MPI2_DEFAULT_REQUEST_DESCRIPTOR { | ||
295 | u8 RequestFlags; /* 0x00 */ | ||
296 | u8 MSIxIndex; /* 0x01 */ | ||
297 | u16 SMID; /* 0x02 */ | ||
298 | u16 LMID; /* 0x04 */ | ||
299 | u16 DescriptorTypeDependent; /* 0x06 */ | ||
300 | }; | ||
301 | |||
302 | /* High Priority Request Descriptor */ | ||
303 | struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { | ||
304 | u8 RequestFlags; /* 0x00 */ | ||
305 | u8 MSIxIndex; /* 0x01 */ | ||
306 | u16 SMID; /* 0x02 */ | ||
307 | u16 LMID; /* 0x04 */ | ||
308 | u16 Reserved1; /* 0x06 */ | ||
309 | }; | ||
310 | |||
311 | /* SCSI IO Request Descriptor */ | ||
312 | struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR { | ||
313 | u8 RequestFlags; /* 0x00 */ | ||
314 | u8 MSIxIndex; /* 0x01 */ | ||
315 | u16 SMID; /* 0x02 */ | ||
316 | u16 LMID; /* 0x04 */ | ||
317 | u16 DevHandle; /* 0x06 */ | ||
318 | }; | ||
319 | |||
320 | /* SCSI Target Request Descriptor */ | ||
321 | struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { | ||
322 | u8 RequestFlags; /* 0x00 */ | ||
323 | u8 MSIxIndex; /* 0x01 */ | ||
324 | u16 SMID; /* 0x02 */ | ||
325 | u16 LMID; /* 0x04 */ | ||
326 | u16 IoIndex; /* 0x06 */ | ||
327 | }; | ||
328 | |||
329 | /* RAID Accelerator Request Descriptor */ | ||
330 | struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { | ||
331 | u8 RequestFlags; /* 0x00 */ | ||
332 | u8 MSIxIndex; /* 0x01 */ | ||
333 | u16 SMID; /* 0x02 */ | ||
334 | u16 LMID; /* 0x04 */ | ||
335 | u16 Reserved; /* 0x06 */ | ||
336 | }; | ||
337 | |||
338 | /* union of Request Descriptors */ | ||
339 | union MEGASAS_REQUEST_DESCRIPTOR_UNION { | ||
340 | struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; | ||
341 | struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; | ||
342 | struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; | ||
343 | struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; | ||
344 | struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; | ||
345 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo; | ||
346 | union { | ||
347 | struct { | ||
348 | u32 low; | ||
349 | u32 high; | ||
350 | } u; | ||
351 | u64 Words; | ||
352 | }; | ||
353 | }; | ||
354 | |||
355 | /* Default Reply Descriptor */ | ||
356 | struct MPI2_DEFAULT_REPLY_DESCRIPTOR { | ||
357 | u8 ReplyFlags; /* 0x00 */ | ||
358 | u8 MSIxIndex; /* 0x01 */ | ||
359 | u16 DescriptorTypeDependent1; /* 0x02 */ | ||
360 | u32 DescriptorTypeDependent2; /* 0x04 */ | ||
361 | }; | ||
362 | |||
363 | /* Address Reply Descriptor */ | ||
364 | struct MPI2_ADDRESS_REPLY_DESCRIPTOR { | ||
365 | u8 ReplyFlags; /* 0x00 */ | ||
366 | u8 MSIxIndex; /* 0x01 */ | ||
367 | u16 SMID; /* 0x02 */ | ||
368 | u32 ReplyFrameAddress; /* 0x04 */ | ||
369 | }; | ||
370 | |||
371 | /* SCSI IO Success Reply Descriptor */ | ||
372 | struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { | ||
373 | u8 ReplyFlags; /* 0x00 */ | ||
374 | u8 MSIxIndex; /* 0x01 */ | ||
375 | u16 SMID; /* 0x02 */ | ||
376 | u16 TaskTag; /* 0x04 */ | ||
377 | u16 Reserved1; /* 0x06 */ | ||
378 | }; | ||
379 | |||
380 | /* TargetAssist Success Reply Descriptor */ | ||
381 | struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { | ||
382 | u8 ReplyFlags; /* 0x00 */ | ||
383 | u8 MSIxIndex; /* 0x01 */ | ||
384 | u16 SMID; /* 0x02 */ | ||
385 | u8 SequenceNumber; /* 0x04 */ | ||
386 | u8 Reserved1; /* 0x05 */ | ||
387 | u16 IoIndex; /* 0x06 */ | ||
388 | }; | ||
389 | |||
390 | /* Target Command Buffer Reply Descriptor */ | ||
391 | struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { | ||
392 | u8 ReplyFlags; /* 0x00 */ | ||
393 | u8 MSIxIndex; /* 0x01 */ | ||
394 | u8 VP_ID; /* 0x02 */ | ||
395 | u8 Flags; /* 0x03 */ | ||
396 | u16 InitiatorDevHandle; /* 0x04 */ | ||
397 | u16 IoIndex; /* 0x06 */ | ||
398 | }; | ||
399 | |||
400 | /* RAID Accelerator Success Reply Descriptor */ | ||
401 | struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { | ||
402 | u8 ReplyFlags; /* 0x00 */ | ||
403 | u8 MSIxIndex; /* 0x01 */ | ||
404 | u16 SMID; /* 0x02 */ | ||
405 | u32 Reserved; /* 0x04 */ | ||
406 | }; | ||
407 | |||
408 | /* union of Reply Descriptors */ | ||
409 | union MPI2_REPLY_DESCRIPTORS_UNION { | ||
410 | struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default; | ||
411 | struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; | ||
412 | struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; | ||
413 | struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; | ||
414 | struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; | ||
415 | struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR | ||
416 | RAIDAcceleratorSuccess; | ||
417 | u64 Words; | ||
418 | }; | ||
419 | |||
420 | /* IOCInit Request message */ | ||
421 | struct MPI2_IOC_INIT_REQUEST { | ||
422 | u8 WhoInit; /* 0x00 */ | ||
423 | u8 Reserved1; /* 0x01 */ | ||
424 | u8 ChainOffset; /* 0x02 */ | ||
425 | u8 Function; /* 0x03 */ | ||
426 | u16 Reserved2; /* 0x04 */ | ||
427 | u8 Reserved3; /* 0x06 */ | ||
428 | u8 MsgFlags; /* 0x07 */ | ||
429 | u8 VP_ID; /* 0x08 */ | ||
430 | u8 VF_ID; /* 0x09 */ | ||
431 | u16 Reserved4; /* 0x0A */ | ||
432 | u16 MsgVersion; /* 0x0C */ | ||
433 | u16 HeaderVersion; /* 0x0E */ | ||
434 | u32 Reserved5; /* 0x10 */ | ||
435 | u16 Reserved6; /* 0x14 */ | ||
436 | u8 Reserved7; /* 0x16 */ | ||
437 | u8 HostMSIxVectors; /* 0x17 */ | ||
438 | u16 Reserved8; /* 0x18 */ | ||
439 | u16 SystemRequestFrameSize; /* 0x1A */ | ||
440 | u16 ReplyDescriptorPostQueueDepth; /* 0x1C */ | ||
441 | u16 ReplyFreeQueueDepth; /* 0x1E */ | ||
442 | u32 SenseBufferAddressHigh; /* 0x20 */ | ||
443 | u32 SystemReplyAddressHigh; /* 0x24 */ | ||
444 | u64 SystemRequestFrameBaseAddress; /* 0x28 */ | ||
445 | u64 ReplyDescriptorPostQueueAddress;/* 0x30 */ | ||
446 | u64 ReplyFreeQueueAddress; /* 0x38 */ | ||
447 | u64 TimeStamp; /* 0x40 */ | ||
448 | }; | ||
449 | |||
450 | /* mrpriv defines */ | ||
451 | #define MR_PD_INVALID 0xFFFF | ||
452 | #define MAX_SPAN_DEPTH 8 | ||
453 | #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH) | ||
454 | #define MAX_ROW_SIZE 32 | ||
455 | #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) | ||
456 | #define MAX_LOGICAL_DRIVES 64 | ||
457 | #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) | ||
458 | #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) | ||
459 | #define MAX_ARRAYS 128 | ||
460 | #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) | ||
461 | #define MAX_PHYSICAL_DEVICES 256 | ||
462 | #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) | ||
463 | #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 | ||
464 | |||
465 | struct MR_DEV_HANDLE_INFO { | ||
466 | u16 curDevHdl; | ||
467 | u8 validHandles; | ||
468 | u8 reserved; | ||
469 | u16 devHandle[2]; | ||
470 | }; | ||
471 | |||
472 | struct MR_ARRAY_INFO { | ||
473 | u16 pd[MAX_RAIDMAP_ROW_SIZE]; | ||
474 | }; | ||
475 | |||
476 | struct MR_QUAD_ELEMENT { | ||
477 | u64 logStart; | ||
478 | u64 logEnd; | ||
479 | u64 offsetInSpan; | ||
480 | u32 diff; | ||
481 | u32 reserved1; | ||
482 | }; | ||
483 | |||
484 | struct MR_SPAN_INFO { | ||
485 | u32 noElements; | ||
486 | u32 reserved1; | ||
487 | struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH]; | ||
488 | }; | ||
489 | |||
490 | struct MR_LD_SPAN { | ||
491 | u64 startBlk; | ||
492 | u64 numBlks; | ||
493 | u16 arrayRef; | ||
494 | u8 reserved[6]; | ||
495 | }; | ||
496 | |||
497 | struct MR_SPAN_BLOCK_INFO { | ||
498 | u64 num_rows; | ||
499 | struct MR_LD_SPAN span; | ||
500 | struct MR_SPAN_INFO block_span_info; | ||
501 | }; | ||
502 | |||
503 | struct MR_LD_RAID { | ||
504 | struct { | ||
505 | u32 fpCapable:1; | ||
506 | u32 reserved5:3; | ||
507 | u32 ldPiMode:4; | ||
508 | u32 pdPiMode:4; | ||
509 | u32 encryptionType:8; | ||
510 | u32 fpWriteCapable:1; | ||
511 | u32 fpReadCapable:1; | ||
512 | u32 fpWriteAcrossStripe:1; | ||
513 | u32 fpReadAcrossStripe:1; | ||
514 | u32 reserved4:8; | ||
515 | } capability; | ||
516 | u32 reserved6; | ||
517 | u64 size; | ||
518 | u8 spanDepth; | ||
519 | u8 level; | ||
520 | u8 stripeShift; | ||
521 | u8 rowSize; | ||
522 | u8 rowDataSize; | ||
523 | u8 writeMode; | ||
524 | u8 PRL; | ||
525 | u8 SRL; | ||
526 | u16 targetId; | ||
527 | u8 ldState; | ||
528 | u8 regTypeReqOnWrite; | ||
529 | u8 modFactor; | ||
530 | u8 reserved2[1]; | ||
531 | u16 seqNum; | ||
532 | |||
533 | struct { | ||
534 | u32 ldSyncRequired:1; | ||
535 | u32 reserved:31; | ||
536 | } flags; | ||
537 | |||
538 | u8 reserved3[0x5C]; | ||
539 | }; | ||
540 | |||
541 | struct MR_LD_SPAN_MAP { | ||
542 | struct MR_LD_RAID ldRaid; | ||
543 | u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE]; | ||
544 | struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH]; | ||
545 | }; | ||
546 | |||
547 | struct MR_FW_RAID_MAP { | ||
548 | u32 totalSize; | ||
549 | union { | ||
550 | struct { | ||
551 | u32 maxLd; | ||
552 | u32 maxSpanDepth; | ||
553 | u32 maxRowSize; | ||
554 | u32 maxPdCount; | ||
555 | u32 maxArrays; | ||
556 | } validationInfo; | ||
557 | u32 version[5]; | ||
558 | u32 reserved1[5]; | ||
559 | }; | ||
560 | |||
561 | u32 ldCount; | ||
562 | u32 Reserved1; | ||
563 | u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+ | ||
564 | MAX_RAIDMAP_VIEWS]; | ||
565 | u8 fpPdIoTimeoutSec; | ||
566 | u8 reserved2[7]; | ||
567 | struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS]; | ||
568 | struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES]; | ||
569 | struct MR_LD_SPAN_MAP ldSpanMap[1]; | ||
570 | }; | ||
571 | |||
572 | struct IO_REQUEST_INFO { | ||
573 | u64 ldStartBlock; | ||
574 | u32 numBlocks; | ||
575 | u16 ldTgtId; | ||
576 | u8 isRead; | ||
577 | u16 devHandle; | ||
578 | u64 pdBlock; | ||
579 | u8 fpOkForIo; | ||
580 | }; | ||
581 | |||
582 | struct MR_LD_TARGET_SYNC { | ||
583 | u8 targetId; | ||
584 | u8 reserved; | ||
585 | u16 seqNum; | ||
586 | }; | ||
587 | |||
588 | #define IEEE_SGE_FLAGS_ADDR_MASK (0x03) | ||
589 | #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) | ||
590 | #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) | ||
591 | #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) | ||
592 | #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) | ||
593 | #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) | ||
594 | #define IEEE_SGE_FLAGS_END_OF_LIST (0x40) | ||
595 | |||
596 | struct megasas_register_set; | ||
597 | struct megasas_instance; | ||
598 | |||
599 | union desc_word { | ||
600 | u64 word; | ||
601 | struct { | ||
602 | u32 low; | ||
603 | u32 high; | ||
604 | } u; | ||
605 | }; | ||
606 | |||
607 | struct megasas_cmd_fusion { | ||
608 | struct MPI2_RAID_SCSI_IO_REQUEST *io_request; | ||
609 | dma_addr_t io_request_phys_addr; | ||
610 | |||
611 | union MPI2_SGE_IO_UNION *sg_frame; | ||
612 | dma_addr_t sg_frame_phys_addr; | ||
613 | |||
614 | u8 *sense; | ||
615 | dma_addr_t sense_phys_addr; | ||
616 | |||
617 | struct list_head list; | ||
618 | struct scsi_cmnd *scmd; | ||
619 | struct megasas_instance *instance; | ||
620 | |||
621 | u8 retry_for_fw_reset; | ||
622 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc; | ||
623 | |||
624 | /* | ||
625 | * Context for a MFI frame. | ||
626 | * Used to get the mfi cmd from list when a MFI cmd is completed | ||
627 | */ | ||
628 | u32 sync_cmd_idx; | ||
629 | u32 index; | ||
630 | u8 flags; | ||
631 | }; | ||
632 | |||
633 | struct LD_LOAD_BALANCE_INFO { | ||
634 | u8 loadBalanceFlag; | ||
635 | u8 reserved1; | ||
636 | u16 raid1DevHandle[2]; | ||
637 | atomic_t scsi_pending_cmds[2]; | ||
638 | u64 last_accessed_block[2]; | ||
639 | }; | ||
640 | |||
641 | struct MR_FW_RAID_MAP_ALL { | ||
642 | struct MR_FW_RAID_MAP raidMap; | ||
643 | struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; | ||
644 | } __attribute__ ((packed)); | ||
645 | |||
646 | struct fusion_context { | ||
647 | struct megasas_cmd_fusion **cmd_list; | ||
648 | struct list_head cmd_pool; | ||
649 | |||
650 | spinlock_t cmd_pool_lock; | ||
651 | |||
652 | dma_addr_t req_frames_desc_phys; | ||
653 | u8 *req_frames_desc; | ||
654 | |||
655 | struct dma_pool *io_request_frames_pool; | ||
656 | dma_addr_t io_request_frames_phys; | ||
657 | u8 *io_request_frames; | ||
658 | |||
659 | struct dma_pool *sg_dma_pool; | ||
660 | struct dma_pool *sense_dma_pool; | ||
661 | |||
662 | dma_addr_t reply_frames_desc_phys; | ||
663 | union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc; | ||
664 | struct dma_pool *reply_frames_desc_pool; | ||
665 | |||
666 | u16 last_reply_idx; | ||
667 | |||
668 | u32 reply_q_depth; | ||
669 | u32 request_alloc_sz; | ||
670 | u32 reply_alloc_sz; | ||
671 | u32 io_frames_alloc_sz; | ||
672 | |||
673 | u16 max_sge_in_main_msg; | ||
674 | u16 max_sge_in_chain; | ||
675 | |||
676 | u8 chain_offset_io_request; | ||
677 | u8 chain_offset_mfi_pthru; | ||
678 | |||
679 | struct MR_FW_RAID_MAP_ALL *ld_map[2]; | ||
680 | dma_addr_t ld_map_phys[2]; | ||
681 | |||
682 | u32 map_sz; | ||
683 | u8 fast_path_io; | ||
684 | struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES]; | ||
685 | }; | ||
686 | |||
687 | union desc_value { | ||
688 | u64 word; | ||
689 | struct { | ||
690 | u32 low; | ||
691 | u32 high; | ||
692 | } u; | ||
693 | }; | ||
694 | |||
695 | #endif /* _MEGARAID_SAS_FUSION_H_ */ | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 4b1c2f0350f9..8be75e65f763 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h | |||
@@ -8,7 +8,7 @@ | |||
8 | * scatter/gather formats. | 8 | * scatter/gather formats. |
9 | * Creation Date: June 21, 2006 | 9 | * Creation Date: June 21, 2006 |
10 | * | 10 | * |
11 | * mpi2.h Version: 02.00.15 | 11 | * mpi2.h Version: 02.00.16 |
12 | * | 12 | * |
13 | * Version History | 13 | * Version History |
14 | * --------------- | 14 | * --------------- |
@@ -61,6 +61,8 @@ | |||
61 | * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. | 61 | * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. |
62 | * Added defines for product-specific range of message | 62 | * Added defines for product-specific range of message |
63 | * function codes, 0xF0 to 0xFF. | 63 | * function codes, 0xF0 to 0xFF. |
64 | * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. | ||
65 | * Added alternative defines for the SGE Direction bit. | ||
64 | * -------------------------------------------------------------------------- | 66 | * -------------------------------------------------------------------------- |
65 | */ | 67 | */ |
66 | 68 | ||
@@ -86,7 +88,7 @@ | |||
86 | #define MPI2_VERSION_02_00 (0x0200) | 88 | #define MPI2_VERSION_02_00 (0x0200) |
87 | 89 | ||
88 | /* versioning for this MPI header set */ | 90 | /* versioning for this MPI header set */ |
89 | #define MPI2_HEADER_VERSION_UNIT (0x0F) | 91 | #define MPI2_HEADER_VERSION_UNIT (0x10) |
90 | #define MPI2_HEADER_VERSION_DEV (0x00) | 92 | #define MPI2_HEADER_VERSION_DEV (0x00) |
91 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) | 93 | #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) |
92 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) | 94 | #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) |
@@ -929,6 +931,9 @@ typedef struct _MPI2_MPI_SGE_UNION | |||
929 | #define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) | 931 | #define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) |
930 | #define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) | 932 | #define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) |
931 | 933 | ||
934 | #define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) | ||
935 | #define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) | ||
936 | |||
932 | /* Address Size */ | 937 | /* Address Size */ |
933 | 938 | ||
934 | #define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) | 939 | #define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index e3728d736d85..d76a65847603 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI Configuration messages and pages | 6 | * Title: MPI Configuration messages and pages |
7 | * Creation Date: November 10, 2006 | 7 | * Creation Date: November 10, 2006 |
8 | * | 8 | * |
9 | * mpi2_cnfg.h Version: 02.00.14 | 9 | * mpi2_cnfg.h Version: 02.00.15 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -121,6 +121,10 @@ | |||
121 | * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. | 121 | * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. |
122 | * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. | 122 | * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. |
123 | * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. | 123 | * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. |
124 | * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT | ||
125 | * define. | ||
126 | * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. | ||
127 | * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. | ||
124 | * -------------------------------------------------------------------------- | 128 | * -------------------------------------------------------------------------- |
125 | */ | 129 | */ |
126 | 130 | ||
@@ -333,7 +337,7 @@ typedef struct _MPI2_CONFIG_REQUEST | |||
333 | #define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) | 337 | #define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) |
334 | #define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) | 338 | #define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) |
335 | 339 | ||
336 | /* values for SGLFlags field are in the SGL section of mpi2.h */ | 340 | /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ |
337 | 341 | ||
338 | 342 | ||
339 | /* Config Reply Message */ | 343 | /* Config Reply Message */ |
@@ -379,6 +383,8 @@ typedef struct _MPI2_CONFIG_REPLY | |||
379 | #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) | 383 | #define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) |
380 | #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) | 384 | #define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) |
381 | 385 | ||
386 | #define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) | ||
387 | |||
382 | #define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) | 388 | #define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) |
383 | #define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) | 389 | #define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) |
384 | #define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) | 390 | #define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) |
@@ -390,6 +396,8 @@ typedef struct _MPI2_CONFIG_REPLY | |||
390 | #define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) | 396 | #define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) |
391 | 397 | ||
392 | 398 | ||
399 | |||
400 | |||
393 | /* Manufacturing Page 0 */ | 401 | /* Manufacturing Page 0 */ |
394 | 402 | ||
395 | typedef struct _MPI2_CONFIG_PAGE_MAN_0 | 403 | typedef struct _MPI2_CONFIG_PAGE_MAN_0 |
@@ -729,6 +737,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 | |||
729 | /* IO Unit Page 1 Flags defines */ | 737 | /* IO Unit Page 1 Flags defines */ |
730 | #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) | 738 | #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) |
731 | #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) | 739 | #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) |
740 | #define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) | ||
732 | #define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) | 741 | #define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) |
733 | #define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) | 742 | #define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) |
734 | #define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) | 743 | #define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) |
@@ -1347,6 +1356,7 @@ typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 | |||
1347 | #define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) | 1356 | #define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) |
1348 | #define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) | 1357 | #define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) |
1349 | #define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) | 1358 | #define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) |
1359 | #define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) | ||
1350 | #define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) | 1360 | #define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) |
1351 | #define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) | 1361 | #define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) |
1352 | #define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) | 1362 | #define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) |
@@ -1469,11 +1479,15 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 | |||
1469 | #define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) | 1479 | #define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) |
1470 | #define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) | 1480 | #define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) |
1471 | #define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) | 1481 | #define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) |
1482 | #define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) | ||
1472 | #define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) | 1483 | #define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) |
1473 | 1484 | ||
1474 | /* PhysDiskAttributes defines */ | 1485 | /* PhysDiskAttributes defines */ |
1486 | #define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) | ||
1475 | #define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) | 1487 | #define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) |
1476 | #define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) | 1488 | #define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) |
1489 | |||
1490 | #define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) | ||
1477 | #define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) | 1491 | #define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) |
1478 | #define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) | 1492 | #define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) |
1479 | 1493 | ||
@@ -1545,6 +1559,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 | |||
1545 | #define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) | 1559 | #define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) |
1546 | #define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) | 1560 | #define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) |
1547 | #define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) | 1561 | #define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) |
1562 | #define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) | ||
1548 | #define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) | 1563 | #define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) |
1549 | #define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) | 1564 | #define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) |
1550 | #define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) | 1565 | #define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) |
@@ -1571,6 +1586,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 | |||
1571 | #define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) | 1586 | #define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) |
1572 | 1587 | ||
1573 | #define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) | 1588 | #define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) |
1589 | #define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) | ||
1574 | #define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) | 1590 | #define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) |
1575 | #define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) | 1591 | #define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) |
1576 | #define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) | 1592 | #define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt index bd6c92b5fae5..b1e88f26b748 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt +++ b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt | |||
@@ -291,6 +291,7 @@ mpi2_raid.h | |||
291 | * can be sized by the build environment. | 291 | * can be sized by the build environment. |
292 | * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of | 292 | * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of |
293 | * VolumeCreationFlags and marked the old one as obsolete. | 293 | * VolumeCreationFlags and marked the old one as obsolete. |
294 | * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. | ||
294 | * -------------------------------------------------------------------------- | 295 | * -------------------------------------------------------------------------- |
295 | 296 | ||
296 | mpi2_sas.h | 297 | mpi2_sas.h |
@@ -301,6 +302,7 @@ mpi2_sas.h | |||
301 | * Request. | 302 | * Request. |
302 | * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST | 303 | * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST |
303 | * to MPI2_SGE_IO_UNION since it supports chained SGLs. | 304 | * to MPI2_SGE_IO_UNION since it supports chained SGLs. |
305 | * 05-12-10 02.00.04 Modified some comments. | ||
304 | * -------------------------------------------------------------------------- | 306 | * -------------------------------------------------------------------------- |
305 | 307 | ||
306 | mpi2_targ.h | 308 | mpi2_targ.h |
@@ -324,6 +326,7 @@ mpi2_tool.h | |||
324 | * and reply messages. | 326 | * and reply messages. |
325 | * Added MPI2_DIAG_BUF_TYPE_EXTENDED. | 327 | * Added MPI2_DIAG_BUF_TYPE_EXTENDED. |
326 | * Incremented MPI2_DIAG_BUF_TYPE_COUNT. | 328 | * Incremented MPI2_DIAG_BUF_TYPE_COUNT. |
329 | * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. | ||
327 | * -------------------------------------------------------------------------- | 330 | * -------------------------------------------------------------------------- |
328 | 331 | ||
329 | mpi2_type.h | 332 | mpi2_type.h |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h index c4c99dfcb820..20e6b8869341 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI SCSI initiator mode messages and structures | 6 | * Title: MPI SCSI initiator mode messages and structures |
7 | * Creation Date: June 23, 2006 | 7 | * Creation Date: June 23, 2006 |
8 | * | 8 | * |
9 | * mpi2_init.h Version: 02.00.09 | 9 | * mpi2_init.h Version: 02.00.10 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -32,6 +32,7 @@ | |||
32 | * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. | 32 | * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. |
33 | * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. | 33 | * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. |
34 | * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. | 34 | * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. |
35 | * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. | ||
35 | * -------------------------------------------------------------------------- | 36 | * -------------------------------------------------------------------------- |
36 | */ | 37 | */ |
37 | 38 | ||
@@ -98,7 +99,13 @@ typedef struct _MPI2_SCSI_IO_REQUEST | |||
98 | U8 LUN[8]; /* 0x34 */ | 99 | U8 LUN[8]; /* 0x34 */ |
99 | U32 Control; /* 0x3C */ | 100 | U32 Control; /* 0x3C */ |
100 | MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ | 101 | MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ |
102 | |||
103 | #ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /* typically this is left undefined */ | ||
104 | MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; | ||
105 | #endif | ||
106 | |||
101 | MPI2_SGE_IO_UNION SGL; /* 0x60 */ | 107 | MPI2_SGE_IO_UNION SGL; /* 0x60 */ |
108 | |||
102 | } MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST, | 109 | } MPI2_SCSI_IO_REQUEST, MPI2_POINTER PTR_MPI2_SCSI_IO_REQUEST, |
103 | Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t; | 110 | Mpi2SCSIIORequest_t, MPI2_POINTER pMpi2SCSIIORequest_t; |
104 | 111 | ||
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h index 495bedc4d1f7..761cbdb8a033 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages | 6 | * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages |
7 | * Creation Date: October 11, 2006 | 7 | * Creation Date: October 11, 2006 |
8 | * | 8 | * |
9 | * mpi2_ioc.h Version: 02.00.14 | 9 | * mpi2_ioc.h Version: 02.00.15 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -101,6 +101,8 @@ | |||
101 | * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. | 101 | * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. |
102 | * Added PowerManagementControl Request structures and | 102 | * Added PowerManagementControl Request structures and |
103 | * defines. | 103 | * defines. |
104 | * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. | ||
105 | * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. | ||
104 | * -------------------------------------------------------------------------- | 106 | * -------------------------------------------------------------------------- |
105 | */ | 107 | */ |
106 | 108 | ||
@@ -456,7 +458,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY | |||
456 | #define MPI2_EVENT_STATE_CHANGE (0x0002) | 458 | #define MPI2_EVENT_STATE_CHANGE (0x0002) |
457 | #define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) | 459 | #define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) |
458 | #define MPI2_EVENT_EVENT_CHANGE (0x000A) | 460 | #define MPI2_EVENT_EVENT_CHANGE (0x000A) |
459 | #define MPI2_EVENT_TASK_SET_FULL (0x000E) | 461 | #define MPI2_EVENT_TASK_SET_FULL (0x000E) /* obsolete */ |
460 | #define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) | 462 | #define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) |
461 | #define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) | 463 | #define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) |
462 | #define MPI2_EVENT_SAS_DISCOVERY (0x0016) | 464 | #define MPI2_EVENT_SAS_DISCOVERY (0x0016) |
@@ -517,6 +519,7 @@ typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED | |||
517 | MPI2_POINTER pMpi2EventDataHardResetReceived_t; | 519 | MPI2_POINTER pMpi2EventDataHardResetReceived_t; |
518 | 520 | ||
519 | /* Task Set Full Event data */ | 521 | /* Task Set Full Event data */ |
522 | /* this event is obsolete */ | ||
520 | 523 | ||
521 | typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL | 524 | typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL |
522 | { | 525 | { |
@@ -831,6 +834,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST | |||
831 | #define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) | 834 | #define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) |
832 | #define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) | 835 | #define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) |
833 | #define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) | 836 | #define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) |
837 | #define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) | ||
834 | #define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) | 838 | #define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) |
835 | #define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) | 839 | #define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) |
836 | #define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) | 840 | #define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h index 5160c33d2a00..bd61a7b60a2b 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2008 LSI Corporation. | 2 | * Copyright (c) 2000-2010 LSI Corporation. |
3 | * | 3 | * |
4 | * | 4 | * |
5 | * Name: mpi2_raid.h | 5 | * Name: mpi2_raid.h |
6 | * Title: MPI Integrated RAID messages and structures | 6 | * Title: MPI Integrated RAID messages and structures |
7 | * Creation Date: April 26, 2007 | 7 | * Creation Date: April 26, 2007 |
8 | * | 8 | * |
9 | * mpi2_raid.h Version: 02.00.04 | 9 | * mpi2_raid.h Version: 02.00.05 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -22,6 +22,7 @@ | |||
22 | * can be sized by the build environment. | 22 | * can be sized by the build environment. |
23 | * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of | 23 | * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of |
24 | * VolumeCreationFlags and marked the old one as obsolete. | 24 | * VolumeCreationFlags and marked the old one as obsolete. |
25 | * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. | ||
25 | * -------------------------------------------------------------------------- | 26 | * -------------------------------------------------------------------------- |
26 | */ | 27 | */ |
27 | 28 | ||
@@ -260,6 +261,7 @@ typedef struct _MPI2_RAID_VOL_INDICATOR | |||
260 | #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) | 261 | #define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) |
261 | #define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) | 262 | #define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) |
262 | #define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) | 263 | #define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) |
264 | #define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) | ||
263 | 265 | ||
264 | 266 | ||
265 | /* RAID Action Reply ActionData union */ | 267 | /* RAID Action Reply ActionData union */ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index 2d8aeed51392..608f6d6e6fca 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h | |||
@@ -1,12 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2000-2007 LSI Corporation. | 2 | * Copyright (c) 2000-2010 LSI Corporation. |
3 | * | 3 | * |
4 | * | 4 | * |
5 | * Name: mpi2_sas.h | 5 | * Name: mpi2_sas.h |
6 | * Title: MPI Serial Attached SCSI structures and definitions | 6 | * Title: MPI Serial Attached SCSI structures and definitions |
7 | * Creation Date: February 9, 2007 | 7 | * Creation Date: February 9, 2007 |
8 | * | 8 | * |
9 | * mpi2.h Version: 02.00.03 | 9 | * mpi2_sas.h Version: 02.00.04 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -20,6 +20,7 @@ | |||
20 | * Request. | 20 | * Request. |
21 | * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST | 21 | * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST |
22 | * to MPI2_SGE_IO_UNION since it supports chained SGLs. | 22 | * to MPI2_SGE_IO_UNION since it supports chained SGLs. |
23 | * 05-12-10 02.00.04 Modified some comments. | ||
23 | * -------------------------------------------------------------------------- | 24 | * -------------------------------------------------------------------------- |
24 | */ | 25 | */ |
25 | 26 | ||
@@ -110,7 +111,7 @@ typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST | |||
110 | /* values for PassthroughFlags field */ | 111 | /* values for PassthroughFlags field */ |
111 | #define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) | 112 | #define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) |
112 | 113 | ||
113 | /* values for SGLFlags field are in the SGL section of mpi2.h */ | 114 | /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ |
114 | 115 | ||
115 | 116 | ||
116 | /* SMP Passthrough Reply Message */ | 117 | /* SMP Passthrough Reply Message */ |
@@ -174,7 +175,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST | |||
174 | #define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) | 175 | #define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) |
175 | #define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) | 176 | #define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) |
176 | 177 | ||
177 | /* values for SGLFlags field are in the SGL section of mpi2.h */ | 178 | /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ |
178 | 179 | ||
179 | 180 | ||
180 | /* SATA Passthrough Reply Message */ | 181 | /* SATA Passthrough Reply Message */ |
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 686b09b81219..5c6e3a67bb94 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * Title: MPI diagnostic tool structures and definitions | 6 | * Title: MPI diagnostic tool structures and definitions |
7 | * Creation Date: March 26, 2007 | 7 | * Creation Date: March 26, 2007 |
8 | * | 8 | * |
9 | * mpi2_tool.h Version: 02.00.04 | 9 | * mpi2_tool.h Version: 02.00.05 |
10 | * | 10 | * |
11 | * Version History | 11 | * Version History |
12 | * --------------- | 12 | * --------------- |
@@ -22,6 +22,7 @@ | |||
22 | * and reply messages. | 22 | * and reply messages. |
23 | * Added MPI2_DIAG_BUF_TYPE_EXTENDED. | 23 | * Added MPI2_DIAG_BUF_TYPE_EXTENDED. |
24 | * Incremented MPI2_DIAG_BUF_TYPE_COUNT. | 24 | * Incremented MPI2_DIAG_BUF_TYPE_COUNT. |
25 | * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. | ||
25 | * -------------------------------------------------------------------------- | 26 | * -------------------------------------------------------------------------- |
26 | */ | 27 | */ |
27 | 28 | ||
@@ -37,6 +38,7 @@ | |||
37 | /* defines for the Tools */ | 38 | /* defines for the Tools */ |
38 | #define MPI2_TOOLBOX_CLEAN_TOOL (0x00) | 39 | #define MPI2_TOOLBOX_CLEAN_TOOL (0x00) |
39 | #define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) | 40 | #define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) |
41 | #define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) | ||
40 | #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) | 42 | #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) |
41 | #define MPI2_TOOLBOX_BEACON_TOOL (0x05) | 43 | #define MPI2_TOOLBOX_BEACON_TOOL (0x05) |
42 | #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) | 44 | #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) |
@@ -102,8 +104,7 @@ typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST | |||
102 | * Toolbox Memory Move request | 104 | * Toolbox Memory Move request |
103 | ****************************************************************************/ | 105 | ****************************************************************************/ |
104 | 106 | ||
105 | typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST | 107 | typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST { |
106 | { | ||
107 | U8 Tool; /* 0x00 */ | 108 | U8 Tool; /* 0x00 */ |
108 | U8 Reserved1; /* 0x01 */ | 109 | U8 Reserved1; /* 0x01 */ |
109 | U8 ChainOffset; /* 0x02 */ | 110 | U8 ChainOffset; /* 0x02 */ |
@@ -120,6 +121,44 @@ typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST | |||
120 | 121 | ||
121 | 122 | ||
122 | /**************************************************************************** | 123 | /**************************************************************************** |
124 | * Toolbox Diagnostic Data Upload request | ||
125 | ****************************************************************************/ | ||
126 | |||
127 | typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST { | ||
128 | U8 Tool; /* 0x00 */ | ||
129 | U8 Reserved1; /* 0x01 */ | ||
130 | U8 ChainOffset; /* 0x02 */ | ||
131 | U8 Function; /* 0x03 */ | ||
132 | U16 Reserved2; /* 0x04 */ | ||
133 | U8 Reserved3; /* 0x06 */ | ||
134 | U8 MsgFlags; /* 0x07 */ | ||
135 | U8 VP_ID; /* 0x08 */ | ||
136 | U8 VF_ID; /* 0x09 */ | ||
137 | U16 Reserved4; /* 0x0A */ | ||
138 | U8 SGLFlags; /* 0x0C */ | ||
139 | U8 Reserved5; /* 0x0D */ | ||
140 | U16 Reserved6; /* 0x0E */ | ||
141 | U32 Flags; /* 0x10 */ | ||
142 | U32 DataLength; /* 0x14 */ | ||
143 | MPI2_SGE_SIMPLE_UNION SGL; /* 0x18 */ | ||
144 | } MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, | ||
145 | MPI2_POINTER PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, | ||
146 | Mpi2ToolboxDiagDataUploadRequest_t, | ||
147 | MPI2_POINTER pMpi2ToolboxDiagDataUploadRequest_t; | ||
148 | |||
149 | /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ | ||
150 | |||
151 | |||
152 | typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER { | ||
153 | U32 DiagDataLength; /* 00h */ | ||
154 | U8 FormatCode; /* 04h */ | ||
155 | U8 Reserved1; /* 05h */ | ||
156 | U16 Reserved2; /* 06h */ | ||
157 | } MPI2_DIAG_DATA_UPLOAD_HEADER, MPI2_POINTER PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, | ||
158 | Mpi2DiagDataUploadHeader_t, MPI2_POINTER pMpi2DiagDataUploadHeader_t; | ||
159 | |||
160 | |||
161 | /**************************************************************************** | ||
123 | * Toolbox ISTWI Read Write Tool | 162 | * Toolbox ISTWI Read Write Tool |
124 | ****************************************************************************/ | 163 | ****************************************************************************/ |
125 | 164 | ||
@@ -162,7 +201,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { | |||
162 | #define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) | 201 | #define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) |
163 | #define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) | 202 | #define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) |
164 | 203 | ||
165 | /* values for SGLFlags field are in the SGL section of mpi2.h */ | 204 | /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ |
166 | 205 | ||
167 | 206 | ||
168 | /* Toolbox ISTWI Read Write Tool reply message */ | 207 | /* Toolbox ISTWI Read Write Tool reply message */ |
@@ -248,7 +287,7 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { | |||
248 | Mpi2ToolboxDiagnosticCliRequest_t, | 287 | Mpi2ToolboxDiagnosticCliRequest_t, |
249 | MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t; | 288 | MPI2_POINTER pMpi2ToolboxDiagnosticCliRequest_t; |
250 | 289 | ||
251 | /* values for SGLFlags field are in the SGL section of mpi2.h */ | 290 | /* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ |
252 | 291 | ||
253 | 292 | ||
254 | /* Toolbox Diagnostic CLI Tool reply message */ | 293 | /* Toolbox Diagnostic CLI Tool reply message */ |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 12faf64f91b0..b2a817055b8b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c | |||
@@ -65,7 +65,6 @@ | |||
65 | static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; | 65 | static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; |
66 | 66 | ||
67 | #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ | 67 | #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ |
68 | #define MPT2SAS_MAX_REQUEST_QUEUE 600 /* maximum controller queue depth */ | ||
69 | 68 | ||
70 | static int max_queue_depth = -1; | 69 | static int max_queue_depth = -1; |
71 | module_param(max_queue_depth, int, 0); | 70 | module_param(max_queue_depth, int, 0); |
@@ -79,6 +78,10 @@ static int msix_disable = -1; | |||
79 | module_param(msix_disable, int, 0); | 78 | module_param(msix_disable, int, 0); |
80 | MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); | 79 | MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); |
81 | 80 | ||
81 | static int missing_delay[2] = {-1, -1}; | ||
82 | module_param_array(missing_delay, int, NULL, 0); | ||
83 | MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); | ||
84 | |||
82 | /* diag_buffer_enable is bitwise | 85 | /* diag_buffer_enable is bitwise |
83 | * bit 0 set = TRACE | 86 | * bit 0 set = TRACE |
84 | * bit 1 set = SNAPSHOT | 87 | * bit 1 set = SNAPSHOT |
@@ -515,9 +518,6 @@ _base_display_event_data(struct MPT2SAS_ADAPTER *ioc, | |||
515 | case MPI2_EVENT_EVENT_CHANGE: | 518 | case MPI2_EVENT_EVENT_CHANGE: |
516 | desc = "Event Change"; | 519 | desc = "Event Change"; |
517 | break; | 520 | break; |
518 | case MPI2_EVENT_TASK_SET_FULL: | ||
519 | desc = "Task Set Full"; | ||
520 | break; | ||
521 | case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: | 521 | case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: |
522 | desc = "Device Status Change"; | 522 | desc = "Device Status Change"; |
523 | break; | 523 | break; |
@@ -758,7 +758,7 @@ _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
758 | if (smid < ioc->internal_smid) { | 758 | if (smid < ioc->internal_smid) { |
759 | i = smid - ioc->hi_priority_smid; | 759 | i = smid - ioc->hi_priority_smid; |
760 | cb_idx = ioc->hpr_lookup[i].cb_idx; | 760 | cb_idx = ioc->hpr_lookup[i].cb_idx; |
761 | } else { | 761 | } else if (smid <= ioc->hba_queue_depth) { |
762 | i = smid - ioc->internal_smid; | 762 | i = smid - ioc->internal_smid; |
763 | cb_idx = ioc->internal_lookup[i].cb_idx; | 763 | cb_idx = ioc->internal_lookup[i].cb_idx; |
764 | } | 764 | } |
@@ -848,6 +848,7 @@ _base_interrupt(int irq, void *bus_id) | |||
848 | return IRQ_NONE; | 848 | return IRQ_NONE; |
849 | 849 | ||
850 | completed_cmds = 0; | 850 | completed_cmds = 0; |
851 | cb_idx = 0xFF; | ||
851 | do { | 852 | do { |
852 | rd.word = rpf->Words; | 853 | rd.word = rpf->Words; |
853 | if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) | 854 | if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) |
@@ -860,6 +861,9 @@ _base_interrupt(int irq, void *bus_id) | |||
860 | MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { | 861 | MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { |
861 | reply = le32_to_cpu | 862 | reply = le32_to_cpu |
862 | (rpf->AddressReply.ReplyFrameAddress); | 863 | (rpf->AddressReply.ReplyFrameAddress); |
864 | if (reply > ioc->reply_dma_max_address || | ||
865 | reply < ioc->reply_dma_min_address) | ||
866 | reply = 0; | ||
863 | } else if (request_desript_type == | 867 | } else if (request_desript_type == |
864 | MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) | 868 | MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER) |
865 | goto next; | 869 | goto next; |
@@ -1489,6 +1493,7 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
1489 | { | 1493 | { |
1490 | unsigned long flags; | 1494 | unsigned long flags; |
1491 | int i; | 1495 | int i; |
1496 | struct chain_tracker *chain_req, *next; | ||
1492 | 1497 | ||
1493 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); | 1498 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
1494 | if (smid >= ioc->hi_priority_smid) { | 1499 | if (smid >= ioc->hi_priority_smid) { |
@@ -1511,6 +1516,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) | |||
1511 | 1516 | ||
1512 | /* scsiio queue */ | 1517 | /* scsiio queue */ |
1513 | i = smid - 1; | 1518 | i = smid - 1; |
1519 | if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { | ||
1520 | list_for_each_entry_safe(chain_req, next, | ||
1521 | &ioc->scsi_lookup[i].chain_list, tracker_list) { | ||
1522 | list_del_init(&chain_req->tracker_list); | ||
1523 | list_add_tail(&chain_req->tracker_list, | ||
1524 | &ioc->free_chain_list); | ||
1525 | } | ||
1526 | } | ||
1514 | ioc->scsi_lookup[i].cb_idx = 0xFF; | 1527 | ioc->scsi_lookup[i].cb_idx = 0xFF; |
1515 | ioc->scsi_lookup[i].scmd = NULL; | 1528 | ioc->scsi_lookup[i].scmd = NULL; |
1516 | list_add_tail(&ioc->scsi_lookup[i].tracker_list, | 1529 | list_add_tail(&ioc->scsi_lookup[i].tracker_list, |
@@ -1819,6 +1832,97 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) | |||
1819 | } | 1832 | } |
1820 | 1833 | ||
1821 | /** | 1834 | /** |
1835 | * _base_update_missing_delay - change the missing delay timers | ||
1836 | * @ioc: per adapter object | ||
1837 | * @device_missing_delay: amount of time till device is reported missing | ||
1838 | * @io_missing_delay: interval IO is returned when there is a missing device | ||
1839 | * | ||
1840 | * Return nothing. | ||
1841 | * | ||
1842 | * Passed on the command line, this function will modify the device missing | ||
1843 | * delay, as well as the io missing delay. This should be called at driver | ||
1844 | * load time. | ||
1845 | */ | ||
1846 | static void | ||
1847 | _base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc, | ||
1848 | u16 device_missing_delay, u8 io_missing_delay) | ||
1849 | { | ||
1850 | u16 dmd, dmd_new, dmd_orignal; | ||
1851 | u8 io_missing_delay_original; | ||
1852 | u16 sz; | ||
1853 | Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; | ||
1854 | Mpi2ConfigReply_t mpi_reply; | ||
1855 | u8 num_phys = 0; | ||
1856 | u16 ioc_status; | ||
1857 | |||
1858 | mpt2sas_config_get_number_hba_phys(ioc, &num_phys); | ||
1859 | if (!num_phys) | ||
1860 | return; | ||
1861 | |||
1862 | sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * | ||
1863 | sizeof(Mpi2SasIOUnit1PhyData_t)); | ||
1864 | sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); | ||
1865 | if (!sas_iounit_pg1) { | ||
1866 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1867 | ioc->name, __FILE__, __LINE__, __func__); | ||
1868 | goto out; | ||
1869 | } | ||
1870 | if ((mpt2sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, | ||
1871 | sas_iounit_pg1, sz))) { | ||
1872 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1873 | ioc->name, __FILE__, __LINE__, __func__); | ||
1874 | goto out; | ||
1875 | } | ||
1876 | ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & | ||
1877 | MPI2_IOCSTATUS_MASK; | ||
1878 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { | ||
1879 | printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", | ||
1880 | ioc->name, __FILE__, __LINE__, __func__); | ||
1881 | goto out; | ||
1882 | } | ||
1883 | |||
1884 | /* device missing delay */ | ||
1885 | dmd = sas_iounit_pg1->ReportDeviceMissingDelay; | ||
1886 | if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) | ||
1887 | dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; | ||
1888 | else | ||
1889 | dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; | ||
1890 | dmd_orignal = dmd; | ||
1891 | if (device_missing_delay > 0x7F) { | ||
1892 | dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : | ||
1893 | device_missing_delay; | ||
1894 | dmd = dmd / 16; | ||
1895 | dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; | ||
1896 | } else | ||
1897 | dmd = device_missing_delay; | ||
1898 | sas_iounit_pg1->ReportDeviceMissingDelay = dmd; | ||
1899 | |||
1900 | /* io missing delay */ | ||
1901 | io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; | ||
1902 | sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; | ||
1903 | |||
1904 | if (!mpt2sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, | ||
1905 | sz)) { | ||
1906 | if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) | ||
1907 | dmd_new = (dmd & | ||
1908 | MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; | ||
1909 | else | ||
1910 | dmd_new = | ||
1911 | dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; | ||
1912 | printk(MPT2SAS_INFO_FMT "device_missing_delay: old(%d), " | ||
1913 | "new(%d)\n", ioc->name, dmd_orignal, dmd_new); | ||
1914 | printk(MPT2SAS_INFO_FMT "ioc_missing_delay: old(%d), " | ||
1915 | "new(%d)\n", ioc->name, io_missing_delay_original, | ||
1916 | io_missing_delay); | ||
1917 | ioc->device_missing_delay = dmd_new; | ||
1918 | ioc->io_missing_delay = io_missing_delay; | ||
1919 | } | ||
1920 | |||
1921 | out: | ||
1922 | kfree(sas_iounit_pg1); | ||
1923 | } | ||
1924 | |||
1925 | /** | ||
1822 | * _base_static_config_pages - static start of day config pages | 1926 | * _base_static_config_pages - static start of day config pages |
1823 | * @ioc: per adapter object | 1927 | * @ioc: per adapter object |
1824 | * | 1928 | * |
@@ -1855,6 +1959,7 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) | |||
1855 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; | 1959 | MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; |
1856 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); | 1960 | ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); |
1857 | mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); | 1961 | mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); |
1962 | |||
1858 | } | 1963 | } |
1859 | 1964 | ||
1860 | /** | 1965 | /** |
@@ -1868,6 +1973,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc) | |||
1868 | static void | 1973 | static void |
1869 | _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) | 1974 | _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) |
1870 | { | 1975 | { |
1976 | int i; | ||
1977 | |||
1871 | dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | 1978 | dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, |
1872 | __func__)); | 1979 | __func__)); |
1873 | 1980 | ||
@@ -1932,6 +2039,20 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) | |||
1932 | } | 2039 | } |
1933 | kfree(ioc->hpr_lookup); | 2040 | kfree(ioc->hpr_lookup); |
1934 | kfree(ioc->internal_lookup); | 2041 | kfree(ioc->internal_lookup); |
2042 | if (ioc->chain_lookup) { | ||
2043 | for (i = 0; i < ioc->chain_depth; i++) { | ||
2044 | if (ioc->chain_lookup[i].chain_buffer) | ||
2045 | pci_pool_free(ioc->chain_dma_pool, | ||
2046 | ioc->chain_lookup[i].chain_buffer, | ||
2047 | ioc->chain_lookup[i].chain_buffer_dma); | ||
2048 | } | ||
2049 | if (ioc->chain_dma_pool) | ||
2050 | pci_pool_destroy(ioc->chain_dma_pool); | ||
2051 | } | ||
2052 | if (ioc->chain_lookup) { | ||
2053 | free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); | ||
2054 | ioc->chain_lookup = NULL; | ||
2055 | } | ||
1935 | } | 2056 | } |
1936 | 2057 | ||
1937 | 2058 | ||
@@ -1953,6 +2074,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
1953 | u32 sz, total_sz; | 2074 | u32 sz, total_sz; |
1954 | u32 retry_sz; | 2075 | u32 retry_sz; |
1955 | u16 max_request_credit; | 2076 | u16 max_request_credit; |
2077 | int i; | ||
1956 | 2078 | ||
1957 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, | 2079 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, |
1958 | __func__)); | 2080 | __func__)); |
@@ -1970,14 +2092,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
1970 | } | 2092 | } |
1971 | 2093 | ||
1972 | /* command line tunables for max controller queue depth */ | 2094 | /* command line tunables for max controller queue depth */ |
1973 | if (max_queue_depth != -1) { | 2095 | if (max_queue_depth != -1) |
1974 | max_request_credit = (max_queue_depth < facts->RequestCredit) | 2096 | max_request_credit = (max_queue_depth < facts->RequestCredit) |
1975 | ? max_queue_depth : facts->RequestCredit; | 2097 | ? max_queue_depth : facts->RequestCredit; |
1976 | } else { | 2098 | else |
1977 | max_request_credit = (facts->RequestCredit > | 2099 | max_request_credit = facts->RequestCredit; |
1978 | MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE : | ||
1979 | facts->RequestCredit; | ||
1980 | } | ||
1981 | 2100 | ||
1982 | ioc->hba_queue_depth = max_request_credit; | 2101 | ioc->hba_queue_depth = max_request_credit; |
1983 | ioc->hi_priority_depth = facts->HighPriorityCredit; | 2102 | ioc->hi_priority_depth = facts->HighPriorityCredit; |
@@ -2083,7 +2202,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2083 | * "frame for smid=0 | 2202 | * "frame for smid=0 |
2084 | */ | 2203 | */ |
2085 | ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; | 2204 | ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; |
2086 | sz = ((ioc->scsiio_depth + 1 + ioc->chain_depth) * ioc->request_sz); | 2205 | sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); |
2087 | 2206 | ||
2088 | /* hi-priority queue */ | 2207 | /* hi-priority queue */ |
2089 | sz += (ioc->hi_priority_depth * ioc->request_sz); | 2208 | sz += (ioc->hi_priority_depth * ioc->request_sz); |
@@ -2124,19 +2243,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2124 | ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * | 2243 | ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * |
2125 | ioc->request_sz); | 2244 | ioc->request_sz); |
2126 | 2245 | ||
2127 | ioc->chain = ioc->internal + (ioc->internal_depth * | ||
2128 | ioc->request_sz); | ||
2129 | ioc->chain_dma = ioc->internal_dma + (ioc->internal_depth * | ||
2130 | ioc->request_sz); | ||
2131 | 2246 | ||
2132 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): " | 2247 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): " |
2133 | "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, | 2248 | "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, |
2134 | ioc->request, ioc->hba_queue_depth, ioc->request_sz, | 2249 | ioc->request, ioc->hba_queue_depth, ioc->request_sz, |
2135 | (ioc->hba_queue_depth * ioc->request_sz)/1024)); | 2250 | (ioc->hba_queue_depth * ioc->request_sz)/1024)); |
2136 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth" | ||
2137 | "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain, | ||
2138 | ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth * | ||
2139 | ioc->request_sz))/1024)); | ||
2140 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n", | 2251 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n", |
2141 | ioc->name, (unsigned long long) ioc->request_dma)); | 2252 | ioc->name, (unsigned long long) ioc->request_dma)); |
2142 | total_sz += sz; | 2253 | total_sz += sz; |
@@ -2155,6 +2266,38 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2155 | "depth(%d)\n", ioc->name, ioc->request, | 2266 | "depth(%d)\n", ioc->name, ioc->request, |
2156 | ioc->scsiio_depth)); | 2267 | ioc->scsiio_depth)); |
2157 | 2268 | ||
2269 | /* loop till the allocation succeeds */ | ||
2270 | do { | ||
2271 | sz = ioc->chain_depth * sizeof(struct chain_tracker); | ||
2272 | ioc->chain_pages = get_order(sz); | ||
2273 | ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( | ||
2274 | GFP_KERNEL, ioc->chain_pages); | ||
2275 | if (ioc->chain_lookup == NULL) | ||
2276 | ioc->chain_depth -= 100; | ||
2277 | } while (ioc->chain_lookup == NULL); | ||
2278 | ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, | ||
2279 | ioc->request_sz, 16, 0); | ||
2280 | if (!ioc->chain_dma_pool) { | ||
2281 | printk(MPT2SAS_ERR_FMT "chain_dma_pool: pci_pool_create " | ||
2282 | "failed\n", ioc->name); | ||
2283 | goto out; | ||
2284 | } | ||
2285 | for (i = 0; i < ioc->chain_depth; i++) { | ||
2286 | ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( | ||
2287 | ioc->chain_dma_pool , GFP_KERNEL, | ||
2288 | &ioc->chain_lookup[i].chain_buffer_dma); | ||
2289 | if (!ioc->chain_lookup[i].chain_buffer) { | ||
2290 | ioc->chain_depth = i; | ||
2291 | goto chain_done; | ||
2292 | } | ||
2293 | total_sz += ioc->request_sz; | ||
2294 | } | ||
2295 | chain_done: | ||
2296 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool depth" | ||
2297 | "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, | ||
2298 | ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth * | ||
2299 | ioc->request_sz))/1024)); | ||
2300 | |||
2158 | /* initialize hi-priority queue smid's */ | 2301 | /* initialize hi-priority queue smid's */ |
2159 | ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, | 2302 | ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, |
2160 | sizeof(struct request_tracker), GFP_KERNEL); | 2303 | sizeof(struct request_tracker), GFP_KERNEL); |
@@ -2221,6 +2364,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2221 | ioc->name); | 2364 | ioc->name); |
2222 | goto out; | 2365 | goto out; |
2223 | } | 2366 | } |
2367 | ioc->reply_dma_min_address = (u32)(ioc->reply_dma); | ||
2368 | ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; | ||
2224 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth" | 2369 | dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth" |
2225 | "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply, | 2370 | "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply, |
2226 | ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); | 2371 | ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); |
@@ -2302,7 +2447,6 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
2302 | return 0; | 2447 | return 0; |
2303 | 2448 | ||
2304 | out: | 2449 | out: |
2305 | _base_release_memory_pools(ioc); | ||
2306 | return -ENOMEM; | 2450 | return -ENOMEM; |
2307 | } | 2451 | } |
2308 | 2452 | ||
@@ -3485,6 +3629,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
3485 | INIT_LIST_HEAD(&ioc->free_list); | 3629 | INIT_LIST_HEAD(&ioc->free_list); |
3486 | smid = 1; | 3630 | smid = 1; |
3487 | for (i = 0; i < ioc->scsiio_depth; i++, smid++) { | 3631 | for (i = 0; i < ioc->scsiio_depth; i++, smid++) { |
3632 | INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list); | ||
3488 | ioc->scsi_lookup[i].cb_idx = 0xFF; | 3633 | ioc->scsi_lookup[i].cb_idx = 0xFF; |
3489 | ioc->scsi_lookup[i].smid = smid; | 3634 | ioc->scsi_lookup[i].smid = smid; |
3490 | ioc->scsi_lookup[i].scmd = NULL; | 3635 | ioc->scsi_lookup[i].scmd = NULL; |
@@ -3511,6 +3656,13 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) | |||
3511 | list_add_tail(&ioc->internal_lookup[i].tracker_list, | 3656 | list_add_tail(&ioc->internal_lookup[i].tracker_list, |
3512 | &ioc->internal_free_list); | 3657 | &ioc->internal_free_list); |
3513 | } | 3658 | } |
3659 | |||
3660 | /* chain pool */ | ||
3661 | INIT_LIST_HEAD(&ioc->free_chain_list); | ||
3662 | for (i = 0; i < ioc->chain_depth; i++) | ||
3663 | list_add_tail(&ioc->chain_lookup[i].tracker_list, | ||
3664 | &ioc->free_chain_list); | ||
3665 | |||
3514 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | 3666 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
3515 | 3667 | ||
3516 | /* initialize Reply Free Queue */ | 3668 | /* initialize Reply Free Queue */ |
@@ -3708,12 +3860,15 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) | |||
3708 | _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); | 3860 | _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); |
3709 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); | 3861 | _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); |
3710 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); | 3862 | _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); |
3711 | _base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL); | ||
3712 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); | 3863 | _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); |
3713 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); | 3864 | r = _base_make_ioc_operational(ioc, CAN_SLEEP); |
3714 | if (r) | 3865 | if (r) |
3715 | goto out_free_resources; | 3866 | goto out_free_resources; |
3716 | 3867 | ||
3868 | if (missing_delay[0] != -1 && missing_delay[1] != -1) | ||
3869 | _base_update_missing_delay(ioc, missing_delay[0], | ||
3870 | missing_delay[1]); | ||
3871 | |||
3717 | mpt2sas_base_start_watchdog(ioc); | 3872 | mpt2sas_base_start_watchdog(ioc); |
3718 | return 0; | 3873 | return 0; |
3719 | 3874 | ||
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 0b15a8bdebfc..283568c6fb04 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h | |||
@@ -69,8 +69,8 @@ | |||
69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" | 69 | #define MPT2SAS_DRIVER_NAME "mpt2sas" |
70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" | 70 | #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" |
71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" | 71 | #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" |
72 | #define MPT2SAS_DRIVER_VERSION "06.100.00.00" | 72 | #define MPT2SAS_DRIVER_VERSION "07.100.00.00" |
73 | #define MPT2SAS_MAJOR_VERSION 06 | 73 | #define MPT2SAS_MAJOR_VERSION 07 |
74 | #define MPT2SAS_MINOR_VERSION 100 | 74 | #define MPT2SAS_MINOR_VERSION 100 |
75 | #define MPT2SAS_BUILD_VERSION 00 | 75 | #define MPT2SAS_BUILD_VERSION 00 |
76 | #define MPT2SAS_RELEASE_VERSION 00 | 76 | #define MPT2SAS_RELEASE_VERSION 00 |
@@ -419,6 +419,18 @@ enum reset_type { | |||
419 | }; | 419 | }; |
420 | 420 | ||
421 | /** | 421 | /** |
422 | * struct chain_tracker - firmware chain tracker | ||
423 | * @chain_buffer: chain buffer | ||
424 | * @chain_buffer_dma: physical address | ||
425 | * @tracker_list: list of free request (ioc->free_chain_list) | ||
426 | */ | ||
427 | struct chain_tracker { | ||
428 | void *chain_buffer; | ||
429 | dma_addr_t chain_buffer_dma; | ||
430 | struct list_head tracker_list; | ||
431 | }; | ||
432 | |||
433 | /** | ||
422 | * struct request_tracker - firmware request tracker | 434 | * struct request_tracker - firmware request tracker |
423 | * @smid: system message id | 435 | * @smid: system message id |
424 | * @scmd: scsi request pointer | 436 | * @scmd: scsi request pointer |
@@ -430,6 +442,7 @@ struct request_tracker { | |||
430 | u16 smid; | 442 | u16 smid; |
431 | struct scsi_cmnd *scmd; | 443 | struct scsi_cmnd *scmd; |
432 | u8 cb_idx; | 444 | u8 cb_idx; |
445 | struct list_head chain_list; | ||
433 | struct list_head tracker_list; | 446 | struct list_head tracker_list; |
434 | }; | 447 | }; |
435 | 448 | ||
@@ -704,8 +717,10 @@ struct MPT2SAS_ADAPTER { | |||
704 | wait_queue_head_t reset_wq; | 717 | wait_queue_head_t reset_wq; |
705 | 718 | ||
706 | /* chain */ | 719 | /* chain */ |
707 | u8 *chain; | 720 | struct chain_tracker *chain_lookup; |
708 | dma_addr_t chain_dma; | 721 | struct list_head free_chain_list; |
722 | struct dma_pool *chain_dma_pool; | ||
723 | ulong chain_pages; | ||
709 | u16 max_sges_in_main_message; | 724 | u16 max_sges_in_main_message; |
710 | u16 max_sges_in_chain_message; | 725 | u16 max_sges_in_chain_message; |
711 | u16 chains_needed_per_io; | 726 | u16 chains_needed_per_io; |
@@ -737,6 +752,8 @@ struct MPT2SAS_ADAPTER { | |||
737 | u16 reply_sz; | 752 | u16 reply_sz; |
738 | u8 *reply; | 753 | u8 *reply; |
739 | dma_addr_t reply_dma; | 754 | dma_addr_t reply_dma; |
755 | u32 reply_dma_max_address; | ||
756 | u32 reply_dma_min_address; | ||
740 | struct dma_pool *reply_dma_pool; | 757 | struct dma_pool *reply_dma_pool; |
741 | 758 | ||
742 | /* reply free queue */ | 759 | /* reply free queue */ |
@@ -832,6 +849,8 @@ int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, | |||
832 | ulong timeout, struct scsi_cmnd *scmd); | 849 | ulong timeout, struct scsi_cmnd *scmd); |
833 | void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); | 850 | void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); |
834 | void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); | 851 | void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); |
852 | void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); | ||
853 | void mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); | ||
835 | struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, | 854 | struct _sas_node *mpt2sas_scsih_expander_find_by_handle(struct MPT2SAS_ADAPTER *ioc, |
836 | u16 handle); | 855 | u16 handle); |
837 | struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER | 856 | struct _sas_node *mpt2sas_scsih_expander_find_by_sas_address(struct MPT2SAS_ADAPTER |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 40cb8aeb21b1..e92b77af5484 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c | |||
@@ -81,6 +81,7 @@ enum block_state { | |||
81 | BLOCKING, | 81 | BLOCKING, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | ||
84 | /** | 85 | /** |
85 | * _ctl_sas_device_find_by_handle - sas device search | 86 | * _ctl_sas_device_find_by_handle - sas device search |
86 | * @ioc: per adapter object | 87 | * @ioc: per adapter object |
@@ -107,7 +108,6 @@ _ctl_sas_device_find_by_handle(struct MPT2SAS_ADAPTER *ioc, u16 handle) | |||
107 | return r; | 108 | return r; |
108 | } | 109 | } |
109 | 110 | ||
110 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | ||
111 | /** | 111 | /** |
112 | * _ctl_display_some_debug - debug routine | 112 | * _ctl_display_some_debug - debug routine |
113 | * @ioc: per adapter object | 113 | * @ioc: per adapter object |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 1a96a00418a4..eda347c57979 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c | |||
@@ -931,31 +931,32 @@ _scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id, | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /** | 933 | /** |
934 | * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) | 934 | * _scsih_get_chain_buffer_tracker - obtain chain tracker |
935 | * @ioc: per adapter object | 935 | * @ioc: per adapter object |
936 | * @smid: system request message index | 936 | * @smid: smid associated to an IO request |
937 | * | 937 | * |
938 | * Returns phys pointer to chain buffer. | 938 | * Returns chain tracker(from ioc->free_chain_list) |
939 | */ | 939 | */ |
940 | static dma_addr_t | 940 | static struct chain_tracker * |
941 | _scsih_get_chain_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid) | 941 | _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid) |
942 | { | 942 | { |
943 | return ioc->chain_dma + ((smid - 1) * (ioc->request_sz * | 943 | struct chain_tracker *chain_req; |
944 | ioc->chains_needed_per_io)); | 944 | unsigned long flags; |
945 | } | ||
946 | 945 | ||
947 | /** | 946 | spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); |
948 | * _scsih_get_chain_buffer - obtain block of chains assigned to a mf request | 947 | if (list_empty(&ioc->free_chain_list)) { |
949 | * @ioc: per adapter object | 948 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); |
950 | * @smid: system request message index | 949 | printk(MPT2SAS_WARN_FMT "chain buffers not available\n", |
951 | * | 950 | ioc->name); |
952 | * Returns virt pointer to chain buffer. | 951 | return NULL; |
953 | */ | 952 | } |
954 | static void * | 953 | chain_req = list_entry(ioc->free_chain_list.next, |
955 | _scsih_get_chain_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid) | 954 | struct chain_tracker, tracker_list); |
956 | { | 955 | list_del_init(&chain_req->tracker_list); |
957 | return (void *)(ioc->chain + ((smid - 1) * (ioc->request_sz * | 956 | list_add_tail(&chain_req->tracker_list, |
958 | ioc->chains_needed_per_io))); | 957 | &ioc->scsi_lookup[smid - 1].chain_list); |
958 | spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); | ||
959 | return chain_req; | ||
959 | } | 960 | } |
960 | 961 | ||
961 | /** | 962 | /** |
@@ -986,6 +987,7 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, | |||
986 | u32 sgl_flags; | 987 | u32 sgl_flags; |
987 | u32 sgl_flags_last_element; | 988 | u32 sgl_flags_last_element; |
988 | u32 sgl_flags_end_buffer; | 989 | u32 sgl_flags_end_buffer; |
990 | struct chain_tracker *chain_req; | ||
989 | 991 | ||
990 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); | 992 | mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); |
991 | 993 | ||
@@ -1033,8 +1035,11 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, | |||
1033 | 1035 | ||
1034 | /* initializing the chain flags and pointers */ | 1036 | /* initializing the chain flags and pointers */ |
1035 | chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; | 1037 | chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; |
1036 | chain = _scsih_get_chain_buffer(ioc, smid); | 1038 | chain_req = _scsih_get_chain_buffer_tracker(ioc, smid); |
1037 | chain_dma = _scsih_get_chain_buffer_dma(ioc, smid); | 1039 | if (!chain_req) |
1040 | return -1; | ||
1041 | chain = chain_req->chain_buffer; | ||
1042 | chain_dma = chain_req->chain_buffer_dma; | ||
1038 | do { | 1043 | do { |
1039 | sges_in_segment = (sges_left <= | 1044 | sges_in_segment = (sges_left <= |
1040 | ioc->max_sges_in_chain_message) ? sges_left : | 1045 | ioc->max_sges_in_chain_message) ? sges_left : |
@@ -1070,8 +1075,11 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, | |||
1070 | sges_in_segment--; | 1075 | sges_in_segment--; |
1071 | } | 1076 | } |
1072 | 1077 | ||
1073 | chain_dma += ioc->request_sz; | 1078 | chain_req = _scsih_get_chain_buffer_tracker(ioc, smid); |
1074 | chain += ioc->request_sz; | 1079 | if (!chain_req) |
1080 | return -1; | ||
1081 | chain = chain_req->chain_buffer; | ||
1082 | chain_dma = chain_req->chain_buffer_dma; | ||
1075 | } while (1); | 1083 | } while (1); |
1076 | 1084 | ||
1077 | 1085 | ||
@@ -1094,28 +1102,24 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, | |||
1094 | } | 1102 | } |
1095 | 1103 | ||
1096 | /** | 1104 | /** |
1097 | * _scsih_change_queue_depth - setting device queue depth | 1105 | * _scsih_adjust_queue_depth - setting device queue depth |
1098 | * @sdev: scsi device struct | 1106 | * @sdev: scsi device struct |
1099 | * @qdepth: requested queue depth | 1107 | * @qdepth: requested queue depth |
1100 | * @reason: calling context | ||
1101 | * | 1108 | * |
1102 | * Returns queue depth. | 1109 | * |
1110 | * Returns nothing | ||
1103 | */ | 1111 | */ |
1104 | static int | 1112 | static void |
1105 | _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | 1113 | _scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) |
1106 | { | 1114 | { |
1107 | struct Scsi_Host *shost = sdev->host; | 1115 | struct Scsi_Host *shost = sdev->host; |
1108 | int max_depth; | 1116 | int max_depth; |
1109 | int tag_type; | ||
1110 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | 1117 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); |
1111 | struct MPT2SAS_DEVICE *sas_device_priv_data; | 1118 | struct MPT2SAS_DEVICE *sas_device_priv_data; |
1112 | struct MPT2SAS_TARGET *sas_target_priv_data; | 1119 | struct MPT2SAS_TARGET *sas_target_priv_data; |
1113 | struct _sas_device *sas_device; | 1120 | struct _sas_device *sas_device; |
1114 | unsigned long flags; | 1121 | unsigned long flags; |
1115 | 1122 | ||
1116 | if (reason != SCSI_QDEPTH_DEFAULT) | ||
1117 | return -EOPNOTSUPP; | ||
1118 | |||
1119 | max_depth = shost->can_queue; | 1123 | max_depth = shost->can_queue; |
1120 | 1124 | ||
1121 | /* limit max device queue for SATA to 32 */ | 1125 | /* limit max device queue for SATA to 32 */ |
@@ -1141,8 +1145,27 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | |||
1141 | max_depth = 1; | 1145 | max_depth = 1; |
1142 | if (qdepth > max_depth) | 1146 | if (qdepth > max_depth) |
1143 | qdepth = max_depth; | 1147 | qdepth = max_depth; |
1144 | tag_type = (qdepth == 1) ? 0 : MSG_SIMPLE_TAG; | 1148 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); |
1145 | scsi_adjust_queue_depth(sdev, tag_type, qdepth); | 1149 | } |
1150 | |||
1151 | /** | ||
1152 | * _scsih_change_queue_depth - setting device queue depth | ||
1153 | * @sdev: scsi device struct | ||
1154 | * @qdepth: requested queue depth | ||
1155 | * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP | ||
1156 | * (see include/scsi/scsi_host.h for definition) | ||
1157 | * | ||
1158 | * Returns queue depth. | ||
1159 | */ | ||
1160 | static int | ||
1161 | _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) | ||
1162 | { | ||
1163 | if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) | ||
1164 | _scsih_adjust_queue_depth(sdev, qdepth); | ||
1165 | else if (reason == SCSI_QDEPTH_QFULL) | ||
1166 | scsi_track_queue_full(sdev, qdepth); | ||
1167 | else | ||
1168 | return -EOPNOTSUPP; | ||
1146 | 1169 | ||
1147 | if (sdev->inquiry_len > 7) | 1170 | if (sdev->inquiry_len > 7) |
1148 | sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " | 1171 | sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " |
@@ -2251,13 +2274,13 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) | |||
2251 | 2274 | ||
2252 | struct scsi_target *starget = scmd->device->sdev_target; | 2275 | struct scsi_target *starget = scmd->device->sdev_target; |
2253 | 2276 | ||
2254 | starget_printk(KERN_INFO, starget, "attempting target reset! " | 2277 | starget_printk(KERN_INFO, starget, "attempting device reset! " |
2255 | "scmd(%p)\n", scmd); | 2278 | "scmd(%p)\n", scmd); |
2256 | _scsih_tm_display_info(ioc, scmd); | 2279 | _scsih_tm_display_info(ioc, scmd); |
2257 | 2280 | ||
2258 | sas_device_priv_data = scmd->device->hostdata; | 2281 | sas_device_priv_data = scmd->device->hostdata; |
2259 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 2282 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { |
2260 | starget_printk(KERN_INFO, starget, "target been deleted! " | 2283 | starget_printk(KERN_INFO, starget, "device been deleted! " |
2261 | "scmd(%p)\n", scmd); | 2284 | "scmd(%p)\n", scmd); |
2262 | scmd->result = DID_NO_CONNECT << 16; | 2285 | scmd->result = DID_NO_CONNECT << 16; |
2263 | scmd->scsi_done(scmd); | 2286 | scmd->scsi_done(scmd); |
@@ -2576,9 +2599,9 @@ _scsih_block_io_to_children_attached_to_ex(struct MPT2SAS_ADAPTER *ioc, | |||
2576 | &sas_expander->sas_port_list, port_list) { | 2599 | &sas_expander->sas_port_list, port_list) { |
2577 | 2600 | ||
2578 | if (mpt2sas_port->remote_identify.device_type == | 2601 | if (mpt2sas_port->remote_identify.device_type == |
2579 | MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || | 2602 | SAS_EDGE_EXPANDER_DEVICE || |
2580 | mpt2sas_port->remote_identify.device_type == | 2603 | mpt2sas_port->remote_identify.device_type == |
2581 | MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) { | 2604 | SAS_FANOUT_EXPANDER_DEVICE) { |
2582 | 2605 | ||
2583 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 2606 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
2584 | expander_sibling = | 2607 | expander_sibling = |
@@ -2715,9 +2738,10 @@ static u8 | |||
2715 | _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, | 2738 | _scsih_sas_control_complete(struct MPT2SAS_ADAPTER *ioc, u16 smid, |
2716 | u8 msix_index, u32 reply) | 2739 | u8 msix_index, u32 reply) |
2717 | { | 2740 | { |
2741 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | ||
2718 | Mpi2SasIoUnitControlReply_t *mpi_reply = | 2742 | Mpi2SasIoUnitControlReply_t *mpi_reply = |
2719 | mpt2sas_base_get_reply_virt_addr(ioc, reply); | 2743 | mpt2sas_base_get_reply_virt_addr(ioc, reply); |
2720 | 2744 | #endif | |
2721 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT | 2745 | dewtprintk(ioc, printk(MPT2SAS_INFO_FMT |
2722 | "sc_complete:handle(0x%04x), (open) " | 2746 | "sc_complete:handle(0x%04x), (open) " |
2723 | "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", | 2747 | "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", |
@@ -3963,6 +3987,7 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc) | |||
3963 | Mpi2ConfigReply_t mpi_reply; | 3987 | Mpi2ConfigReply_t mpi_reply; |
3964 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; | 3988 | Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; |
3965 | u16 attached_handle; | 3989 | u16 attached_handle; |
3990 | u8 link_rate; | ||
3966 | 3991 | ||
3967 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT | 3992 | dtmprintk(ioc, printk(MPT2SAS_INFO_FMT |
3968 | "updating handles for sas_host(0x%016llx)\n", | 3993 | "updating handles for sas_host(0x%016llx)\n", |
@@ -3984,15 +4009,17 @@ _scsih_sas_host_refresh(struct MPT2SAS_ADAPTER *ioc) | |||
3984 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) | 4009 | if (ioc_status != MPI2_IOCSTATUS_SUCCESS) |
3985 | goto out; | 4010 | goto out; |
3986 | for (i = 0; i < ioc->sas_hba.num_phys ; i++) { | 4011 | for (i = 0; i < ioc->sas_hba.num_phys ; i++) { |
4012 | link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; | ||
3987 | if (i == 0) | 4013 | if (i == 0) |
3988 | ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> | 4014 | ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> |
3989 | PhyData[0].ControllerDevHandle); | 4015 | PhyData[0].ControllerDevHandle); |
3990 | ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; | 4016 | ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; |
3991 | attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. | 4017 | attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. |
3992 | AttachedDevHandle); | 4018 | AttachedDevHandle); |
4019 | if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) | ||
4020 | link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; | ||
3993 | mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address, | 4021 | mpt2sas_transport_update_links(ioc, ioc->sas_hba.sas_address, |
3994 | attached_handle, i, sas_iounit_pg0->PhyData[i]. | 4022 | attached_handle, i, link_rate); |
3995 | NegotiatedLinkRate >> 4); | ||
3996 | } | 4023 | } |
3997 | out: | 4024 | out: |
3998 | kfree(sas_iounit_pg0); | 4025 | kfree(sas_iounit_pg0); |
@@ -4336,14 +4363,14 @@ _scsih_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4336 | } | 4363 | } |
4337 | 4364 | ||
4338 | /** | 4365 | /** |
4339 | * _scsih_expander_remove - removing expander object | 4366 | * mpt2sas_expander_remove - removing expander object |
4340 | * @ioc: per adapter object | 4367 | * @ioc: per adapter object |
4341 | * @sas_address: expander sas_address | 4368 | * @sas_address: expander sas_address |
4342 | * | 4369 | * |
4343 | * Return nothing. | 4370 | * Return nothing. |
4344 | */ | 4371 | */ |
4345 | static void | 4372 | void |
4346 | _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) | 4373 | mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) |
4347 | { | 4374 | { |
4348 | struct _sas_node *sas_expander; | 4375 | struct _sas_node *sas_expander; |
4349 | unsigned long flags; | 4376 | unsigned long flags; |
@@ -4354,6 +4381,11 @@ _scsih_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) | |||
4354 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 4381 | spin_lock_irqsave(&ioc->sas_node_lock, flags); |
4355 | sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, | 4382 | sas_expander = mpt2sas_scsih_expander_find_by_sas_address(ioc, |
4356 | sas_address); | 4383 | sas_address); |
4384 | if (!sas_expander) { | ||
4385 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | ||
4386 | return; | ||
4387 | } | ||
4388 | list_del(&sas_expander->list); | ||
4357 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 4389 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
4358 | _scsih_expander_node_remove(ioc, sas_expander); | 4390 | _scsih_expander_node_remove(ioc, sas_expander); |
4359 | } | 4391 | } |
@@ -4643,6 +4675,33 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc, | |||
4643 | sas_device_backup.sas_address)); | 4675 | sas_device_backup.sas_address)); |
4644 | } | 4676 | } |
4645 | 4677 | ||
4678 | /** | ||
4679 | * mpt2sas_device_remove - removing device object | ||
4680 | * @ioc: per adapter object | ||
4681 | * @sas_address: expander sas_address | ||
4682 | * | ||
4683 | * Return nothing. | ||
4684 | */ | ||
4685 | void | ||
4686 | mpt2sas_device_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address) | ||
4687 | { | ||
4688 | struct _sas_device *sas_device; | ||
4689 | unsigned long flags; | ||
4690 | |||
4691 | if (ioc->shost_recovery) | ||
4692 | return; | ||
4693 | |||
4694 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
4695 | sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | ||
4696 | sas_address); | ||
4697 | if (!sas_device) { | ||
4698 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
4699 | return; | ||
4700 | } | ||
4701 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
4702 | _scsih_remove_device(ioc, sas_device); | ||
4703 | } | ||
4704 | |||
4646 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING | 4705 | #ifdef CONFIG_SCSI_MPT2SAS_LOGGING |
4647 | /** | 4706 | /** |
4648 | * _scsih_sas_topology_change_event_debug - debug for topology event | 4707 | * _scsih_sas_topology_change_event_debug - debug for topology event |
@@ -4737,7 +4796,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
4737 | int i; | 4796 | int i; |
4738 | u16 parent_handle, handle; | 4797 | u16 parent_handle, handle; |
4739 | u16 reason_code; | 4798 | u16 reason_code; |
4740 | u8 phy_number; | 4799 | u8 phy_number, max_phys; |
4741 | struct _sas_node *sas_expander; | 4800 | struct _sas_node *sas_expander; |
4742 | struct _sas_device *sas_device; | 4801 | struct _sas_device *sas_device; |
4743 | u64 sas_address; | 4802 | u64 sas_address; |
@@ -4775,11 +4834,13 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
4775 | sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, | 4834 | sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, |
4776 | parent_handle); | 4835 | parent_handle); |
4777 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | 4836 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); |
4778 | if (sas_expander) | 4837 | if (sas_expander) { |
4779 | sas_address = sas_expander->sas_address; | 4838 | sas_address = sas_expander->sas_address; |
4780 | else if (parent_handle < ioc->sas_hba.num_phys) | 4839 | max_phys = sas_expander->num_phys; |
4840 | } else if (parent_handle < ioc->sas_hba.num_phys) { | ||
4781 | sas_address = ioc->sas_hba.sas_address; | 4841 | sas_address = ioc->sas_hba.sas_address; |
4782 | else | 4842 | max_phys = ioc->sas_hba.num_phys; |
4843 | } else | ||
4783 | return; | 4844 | return; |
4784 | 4845 | ||
4785 | /* handle siblings events */ | 4846 | /* handle siblings events */ |
@@ -4793,6 +4854,8 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
4793 | ioc->pci_error_recovery) | 4854 | ioc->pci_error_recovery) |
4794 | return; | 4855 | return; |
4795 | phy_number = event_data->StartPhyNum + i; | 4856 | phy_number = event_data->StartPhyNum + i; |
4857 | if (phy_number >= max_phys) | ||
4858 | continue; | ||
4796 | reason_code = event_data->PHY[i].PhyStatus & | 4859 | reason_code = event_data->PHY[i].PhyStatus & |
4797 | MPI2_EVENT_SAS_TOPO_RC_MASK; | 4860 | MPI2_EVENT_SAS_TOPO_RC_MASK; |
4798 | if ((event_data->PHY[i].PhyStatus & | 4861 | if ((event_data->PHY[i].PhyStatus & |
@@ -4844,7 +4907,7 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc, | |||
4844 | /* handle expander removal */ | 4907 | /* handle expander removal */ |
4845 | if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && | 4908 | if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && |
4846 | sas_expander) | 4909 | sas_expander) |
4847 | _scsih_expander_remove(ioc, sas_address); | 4910 | mpt2sas_expander_remove(ioc, sas_address); |
4848 | 4911 | ||
4849 | } | 4912 | } |
4850 | 4913 | ||
@@ -5773,90 +5836,6 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc, | |||
5773 | } | 5836 | } |
5774 | 5837 | ||
5775 | /** | 5838 | /** |
5776 | * _scsih_task_set_full - handle task set full | ||
5777 | * @ioc: per adapter object | ||
5778 | * @fw_event: The fw_event_work object | ||
5779 | * Context: user. | ||
5780 | * | ||
5781 | * Throttle back qdepth. | ||
5782 | */ | ||
5783 | static void | ||
5784 | _scsih_task_set_full(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work | ||
5785 | *fw_event) | ||
5786 | { | ||
5787 | unsigned long flags; | ||
5788 | struct _sas_device *sas_device; | ||
5789 | static struct _raid_device *raid_device; | ||
5790 | struct scsi_device *sdev; | ||
5791 | int depth; | ||
5792 | u16 current_depth; | ||
5793 | u16 handle; | ||
5794 | int id, channel; | ||
5795 | u64 sas_address; | ||
5796 | Mpi2EventDataTaskSetFull_t *event_data = fw_event->event_data; | ||
5797 | |||
5798 | current_depth = le16_to_cpu(event_data->CurrentDepth); | ||
5799 | handle = le16_to_cpu(event_data->DevHandle); | ||
5800 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | ||
5801 | sas_device = _scsih_sas_device_find_by_handle(ioc, handle); | ||
5802 | if (!sas_device) { | ||
5803 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
5804 | return; | ||
5805 | } | ||
5806 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
5807 | id = sas_device->id; | ||
5808 | channel = sas_device->channel; | ||
5809 | sas_address = sas_device->sas_address; | ||
5810 | |||
5811 | /* if hidden raid component, then change to volume characteristics */ | ||
5812 | if (test_bit(handle, ioc->pd_handles) && sas_device->volume_handle) { | ||
5813 | spin_lock_irqsave(&ioc->raid_device_lock, flags); | ||
5814 | raid_device = _scsih_raid_device_find_by_handle( | ||
5815 | ioc, sas_device->volume_handle); | ||
5816 | spin_unlock_irqrestore(&ioc->raid_device_lock, flags); | ||
5817 | if (raid_device) { | ||
5818 | id = raid_device->id; | ||
5819 | channel = raid_device->channel; | ||
5820 | handle = raid_device->handle; | ||
5821 | sas_address = raid_device->wwid; | ||
5822 | } | ||
5823 | } | ||
5824 | |||
5825 | if (ioc->logging_level & MPT_DEBUG_TASK_SET_FULL) | ||
5826 | starget_printk(KERN_INFO, sas_device->starget, "task set " | ||
5827 | "full: handle(0x%04x), sas_addr(0x%016llx), depth(%d)\n", | ||
5828 | handle, (unsigned long long)sas_address, current_depth); | ||
5829 | |||
5830 | shost_for_each_device(sdev, ioc->shost) { | ||
5831 | if (sdev->id == id && sdev->channel == channel) { | ||
5832 | if (current_depth > sdev->queue_depth) { | ||
5833 | if (ioc->logging_level & | ||
5834 | MPT_DEBUG_TASK_SET_FULL) | ||
5835 | sdev_printk(KERN_INFO, sdev, "strange " | ||
5836 | "observation, the queue depth is" | ||
5837 | " (%d) meanwhile fw queue depth " | ||
5838 | "is (%d)\n", sdev->queue_depth, | ||
5839 | current_depth); | ||
5840 | continue; | ||
5841 | } | ||
5842 | depth = scsi_track_queue_full(sdev, | ||
5843 | current_depth - 1); | ||
5844 | if (depth > 0) | ||
5845 | sdev_printk(KERN_INFO, sdev, "Queue depth " | ||
5846 | "reduced to (%d)\n", depth); | ||
5847 | else if (depth < 0) | ||
5848 | sdev_printk(KERN_INFO, sdev, "Tagged Command " | ||
5849 | "Queueing is being disabled\n"); | ||
5850 | else if (depth == 0) | ||
5851 | if (ioc->logging_level & | ||
5852 | MPT_DEBUG_TASK_SET_FULL) | ||
5853 | sdev_printk(KERN_INFO, sdev, | ||
5854 | "Queue depth not changed yet\n"); | ||
5855 | } | ||
5856 | } | ||
5857 | } | ||
5858 | |||
5859 | /** | ||
5860 | * _scsih_prep_device_scan - initialize parameters prior to device scan | 5839 | * _scsih_prep_device_scan - initialize parameters prior to device scan |
5861 | * @ioc: per adapter object | 5840 | * @ioc: per adapter object |
5862 | * | 5841 | * |
@@ -6219,7 +6198,7 @@ _scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc) | |||
6219 | sas_expander->responding = 0; | 6198 | sas_expander->responding = 0; |
6220 | continue; | 6199 | continue; |
6221 | } | 6200 | } |
6222 | _scsih_expander_remove(ioc, sas_expander->sas_address); | 6201 | mpt2sas_expander_remove(ioc, sas_expander->sas_address); |
6223 | goto retry_expander_search; | 6202 | goto retry_expander_search; |
6224 | } | 6203 | } |
6225 | } | 6204 | } |
@@ -6343,9 +6322,6 @@ _firmware_event_work(struct work_struct *work) | |||
6343 | case MPI2_EVENT_IR_OPERATION_STATUS: | 6322 | case MPI2_EVENT_IR_OPERATION_STATUS: |
6344 | _scsih_sas_ir_operation_status_event(ioc, fw_event); | 6323 | _scsih_sas_ir_operation_status_event(ioc, fw_event); |
6345 | break; | 6324 | break; |
6346 | case MPI2_EVENT_TASK_SET_FULL: | ||
6347 | _scsih_task_set_full(ioc, fw_event); | ||
6348 | break; | ||
6349 | } | 6325 | } |
6350 | _scsih_fw_event_free(ioc, fw_event); | 6326 | _scsih_fw_event_free(ioc, fw_event); |
6351 | } | 6327 | } |
@@ -6415,7 +6391,6 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, | |||
6415 | case MPI2_EVENT_SAS_DISCOVERY: | 6391 | case MPI2_EVENT_SAS_DISCOVERY: |
6416 | case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: | 6392 | case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: |
6417 | case MPI2_EVENT_IR_PHYSICAL_DISK: | 6393 | case MPI2_EVENT_IR_PHYSICAL_DISK: |
6418 | case MPI2_EVENT_TASK_SET_FULL: | ||
6419 | break; | 6394 | break; |
6420 | 6395 | ||
6421 | default: /* ignore the rest */ | 6396 | default: /* ignore the rest */ |
@@ -6490,56 +6465,23 @@ static void | |||
6490 | _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, | 6465 | _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, |
6491 | struct _sas_node *sas_expander) | 6466 | struct _sas_node *sas_expander) |
6492 | { | 6467 | { |
6493 | struct _sas_port *mpt2sas_port; | 6468 | struct _sas_port *mpt2sas_port, *next; |
6494 | struct _sas_device *sas_device; | ||
6495 | struct _sas_node *expander_sibling; | ||
6496 | unsigned long flags; | ||
6497 | |||
6498 | if (!sas_expander) | ||
6499 | return; | ||
6500 | 6469 | ||
6501 | /* remove sibling ports attached to this expander */ | 6470 | /* remove sibling ports attached to this expander */ |
6502 | retry_device_search: | 6471 | list_for_each_entry_safe(mpt2sas_port, next, |
6503 | list_for_each_entry(mpt2sas_port, | ||
6504 | &sas_expander->sas_port_list, port_list) { | 6472 | &sas_expander->sas_port_list, port_list) { |
6473 | if (ioc->shost_recovery) | ||
6474 | return; | ||
6505 | if (mpt2sas_port->remote_identify.device_type == | 6475 | if (mpt2sas_port->remote_identify.device_type == |
6506 | SAS_END_DEVICE) { | 6476 | SAS_END_DEVICE) |
6507 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 6477 | mpt2sas_device_remove(ioc, |
6508 | sas_device = | 6478 | mpt2sas_port->remote_identify.sas_address); |
6509 | mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | 6479 | else if (mpt2sas_port->remote_identify.device_type == |
6510 | mpt2sas_port->remote_identify.sas_address); | 6480 | SAS_EDGE_EXPANDER_DEVICE || |
6511 | spin_unlock_irqrestore(&ioc->sas_device_lock, flags); | ||
6512 | if (!sas_device) | ||
6513 | continue; | ||
6514 | _scsih_remove_device(ioc, sas_device); | ||
6515 | if (ioc->shost_recovery) | ||
6516 | return; | ||
6517 | goto retry_device_search; | ||
6518 | } | ||
6519 | } | ||
6520 | |||
6521 | retry_expander_search: | ||
6522 | list_for_each_entry(mpt2sas_port, | ||
6523 | &sas_expander->sas_port_list, port_list) { | ||
6524 | |||
6525 | if (mpt2sas_port->remote_identify.device_type == | ||
6526 | MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || | ||
6527 | mpt2sas_port->remote_identify.device_type == | 6481 | mpt2sas_port->remote_identify.device_type == |
6528 | MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) { | 6482 | SAS_FANOUT_EXPANDER_DEVICE) |
6529 | 6483 | mpt2sas_expander_remove(ioc, | |
6530 | spin_lock_irqsave(&ioc->sas_node_lock, flags); | 6484 | mpt2sas_port->remote_identify.sas_address); |
6531 | expander_sibling = | ||
6532 | mpt2sas_scsih_expander_find_by_sas_address( | ||
6533 | ioc, mpt2sas_port->remote_identify.sas_address); | ||
6534 | spin_unlock_irqrestore(&ioc->sas_node_lock, flags); | ||
6535 | if (!expander_sibling) | ||
6536 | continue; | ||
6537 | _scsih_expander_remove(ioc, | ||
6538 | expander_sibling->sas_address); | ||
6539 | if (ioc->shost_recovery) | ||
6540 | return; | ||
6541 | goto retry_expander_search; | ||
6542 | } | ||
6543 | } | 6485 | } |
6544 | 6486 | ||
6545 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, | 6487 | mpt2sas_transport_port_remove(ioc, sas_expander->sas_address, |
@@ -6550,7 +6492,6 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, | |||
6550 | sas_expander->handle, (unsigned long long) | 6492 | sas_expander->handle, (unsigned long long) |
6551 | sas_expander->sas_address); | 6493 | sas_expander->sas_address); |
6552 | 6494 | ||
6553 | list_del(&sas_expander->list); | ||
6554 | kfree(sas_expander->phy); | 6495 | kfree(sas_expander->phy); |
6555 | kfree(sas_expander); | 6496 | kfree(sas_expander); |
6556 | } | 6497 | } |
@@ -6668,9 +6609,7 @@ _scsih_remove(struct pci_dev *pdev) | |||
6668 | { | 6609 | { |
6669 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | 6610 | struct Scsi_Host *shost = pci_get_drvdata(pdev); |
6670 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); | 6611 | struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); |
6671 | struct _sas_port *mpt2sas_port; | 6612 | struct _sas_port *mpt2sas_port, *next_port; |
6672 | struct _sas_device *sas_device; | ||
6673 | struct _sas_node *expander_sibling; | ||
6674 | struct _raid_device *raid_device, *next; | 6613 | struct _raid_device *raid_device, *next; |
6675 | struct MPT2SAS_TARGET *sas_target_priv_data; | 6614 | struct MPT2SAS_TARGET *sas_target_priv_data; |
6676 | struct workqueue_struct *wq; | 6615 | struct workqueue_struct *wq; |
@@ -6702,28 +6641,18 @@ _scsih_remove(struct pci_dev *pdev) | |||
6702 | } | 6641 | } |
6703 | 6642 | ||
6704 | /* free ports attached to the sas_host */ | 6643 | /* free ports attached to the sas_host */ |
6705 | retry_again: | 6644 | list_for_each_entry_safe(mpt2sas_port, next_port, |
6706 | list_for_each_entry(mpt2sas_port, | ||
6707 | &ioc->sas_hba.sas_port_list, port_list) { | 6645 | &ioc->sas_hba.sas_port_list, port_list) { |
6708 | if (mpt2sas_port->remote_identify.device_type == | 6646 | if (mpt2sas_port->remote_identify.device_type == |
6709 | SAS_END_DEVICE) { | 6647 | SAS_END_DEVICE) |
6710 | sas_device = | 6648 | mpt2sas_device_remove(ioc, |
6711 | mpt2sas_scsih_sas_device_find_by_sas_address(ioc, | 6649 | mpt2sas_port->remote_identify.sas_address); |
6712 | mpt2sas_port->remote_identify.sas_address); | 6650 | else if (mpt2sas_port->remote_identify.device_type == |
6713 | if (sas_device) { | 6651 | SAS_EDGE_EXPANDER_DEVICE || |
6714 | _scsih_remove_device(ioc, sas_device); | 6652 | mpt2sas_port->remote_identify.device_type == |
6715 | goto retry_again; | 6653 | SAS_FANOUT_EXPANDER_DEVICE) |
6716 | } | 6654 | mpt2sas_expander_remove(ioc, |
6717 | } else { | ||
6718 | expander_sibling = | ||
6719 | mpt2sas_scsih_expander_find_by_sas_address(ioc, | ||
6720 | mpt2sas_port->remote_identify.sas_address); | 6655 | mpt2sas_port->remote_identify.sas_address); |
6721 | if (expander_sibling) { | ||
6722 | _scsih_expander_remove(ioc, | ||
6723 | expander_sibling->sas_address); | ||
6724 | goto retry_again; | ||
6725 | } | ||
6726 | } | ||
6727 | } | 6656 | } |
6728 | 6657 | ||
6729 | /* free phys attached to the sas_host */ | 6658 | /* free phys attached to the sas_host */ |
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index b55c6dc07470..cb1cdecbe0f8 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c | |||
@@ -465,62 +465,149 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, | |||
465 | return rc; | 465 | return rc; |
466 | } | 466 | } |
467 | 467 | ||
468 | /** | ||
469 | * _transport_delete_port - helper function to removing a port | ||
470 | * @ioc: per adapter object | ||
471 | * @mpt2sas_port: mpt2sas per port object | ||
472 | * | ||
473 | * Returns nothing. | ||
474 | */ | ||
475 | static void | ||
476 | _transport_delete_port(struct MPT2SAS_ADAPTER *ioc, | ||
477 | struct _sas_port *mpt2sas_port) | ||
478 | { | ||
479 | u64 sas_address = mpt2sas_port->remote_identify.sas_address; | ||
480 | enum sas_device_type device_type = | ||
481 | mpt2sas_port->remote_identify.device_type; | ||
482 | |||
483 | dev_printk(KERN_INFO, &mpt2sas_port->port->dev, | ||
484 | "remove: sas_addr(0x%016llx)\n", | ||
485 | (unsigned long long) sas_address); | ||
486 | |||
487 | ioc->logging_level |= MPT_DEBUG_TRANSPORT; | ||
488 | if (device_type == SAS_END_DEVICE) | ||
489 | mpt2sas_device_remove(ioc, sas_address); | ||
490 | else if (device_type == SAS_EDGE_EXPANDER_DEVICE || | ||
491 | device_type == SAS_FANOUT_EXPANDER_DEVICE) | ||
492 | mpt2sas_expander_remove(ioc, sas_address); | ||
493 | ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; | ||
494 | } | ||
468 | 495 | ||
469 | /** | 496 | /** |
470 | * _transport_delete_duplicate_port - (see below description) | 497 | * _transport_delete_phy - helper function to removing single phy from port |
471 | * @ioc: per adapter object | 498 | * @ioc: per adapter object |
472 | * @sas_node: sas node object (either expander or sas host) | 499 | * @mpt2sas_port: mpt2sas per port object |
473 | * @sas_address: sas address of device being added | 500 | * @mpt2sas_phy: mpt2sas per phy object |
474 | * @phy_num: phy number | ||
475 | * | 501 | * |
476 | * This function is called when attempting to add a new port that is claiming | 502 | * Returns nothing. |
477 | * the same phy resources already in use by another port. If we don't release | 503 | */ |
478 | * the claimed phy resources, the sas transport layer will hang from the BUG | 504 | static void |
479 | * in sas_port_add_phy. | 505 | _transport_delete_phy(struct MPT2SAS_ADAPTER *ioc, |
506 | struct _sas_port *mpt2sas_port, struct _sas_phy *mpt2sas_phy) | ||
507 | { | ||
508 | u64 sas_address = mpt2sas_port->remote_identify.sas_address; | ||
509 | |||
510 | dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev, | ||
511 | "remove: sas_addr(0x%016llx), phy(%d)\n", | ||
512 | (unsigned long long) sas_address, mpt2sas_phy->phy_id); | ||
513 | |||
514 | list_del(&mpt2sas_phy->port_siblings); | ||
515 | mpt2sas_port->num_phys--; | ||
516 | sas_port_delete_phy(mpt2sas_port->port, mpt2sas_phy->phy); | ||
517 | mpt2sas_phy->phy_belongs_to_port = 0; | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * _transport_add_phy - helper function to adding single phy to port | ||
522 | * @ioc: per adapter object | ||
523 | * @mpt2sas_port: mpt2sas per port object | ||
524 | * @mpt2sas_phy: mpt2sas per phy object | ||
480 | * | 525 | * |
481 | * The reason we would hit this issue is becuase someone is changing the | 526 | * Returns nothing. |
482 | * sas address of a device on the fly, meanwhile controller firmware sends | ||
483 | * EVENTs out of order when removing the previous instance of the device. | ||
484 | */ | 527 | */ |
485 | static void | 528 | static void |
486 | _transport_delete_duplicate_port(struct MPT2SAS_ADAPTER *ioc, | 529 | _transport_add_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_port *mpt2sas_port, |
487 | struct _sas_node *sas_node, u64 sas_address, int phy_num) | 530 | struct _sas_phy *mpt2sas_phy) |
488 | { | 531 | { |
489 | struct _sas_port *mpt2sas_port, *mpt2sas_port_duplicate; | 532 | u64 sas_address = mpt2sas_port->remote_identify.sas_address; |
490 | struct _sas_phy *mpt2sas_phy; | ||
491 | 533 | ||
492 | printk(MPT2SAS_ERR_FMT "new device located at sas_addr(0x%016llx), " | 534 | dev_printk(KERN_INFO, &mpt2sas_phy->phy->dev, |
493 | "phy_id(%d)\n", ioc->name, (unsigned long long)sas_address, | 535 | "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) |
494 | phy_num); | 536 | sas_address, mpt2sas_phy->phy_id); |
495 | 537 | ||
496 | mpt2sas_port_duplicate = NULL; | 538 | list_add_tail(&mpt2sas_phy->port_siblings, &mpt2sas_port->phy_list); |
497 | list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, port_list) { | 539 | mpt2sas_port->num_phys++; |
498 | dev_printk(KERN_ERR, &mpt2sas_port->port->dev, | 540 | sas_port_add_phy(mpt2sas_port->port, mpt2sas_phy->phy); |
499 | "existing device at sas_addr(0x%016llx), num_phys(%d)\n", | 541 | mpt2sas_phy->phy_belongs_to_port = 1; |
500 | (unsigned long long) | 542 | } |
501 | mpt2sas_port->remote_identify.sas_address, | 543 | |
502 | mpt2sas_port->num_phys); | 544 | /** |
503 | list_for_each_entry(mpt2sas_phy, &mpt2sas_port->phy_list, | 545 | * _transport_add_phy_to_an_existing_port - adding new phy to existing port |
546 | * @ioc: per adapter object | ||
547 | * @sas_node: sas node object (either expander or sas host) | ||
548 | * @mpt2sas_phy: mpt2sas per phy object | ||
549 | * @sas_address: sas address of device/expander were phy needs to be added to | ||
550 | * | ||
551 | * Returns nothing. | ||
552 | */ | ||
553 | static void | ||
554 | _transport_add_phy_to_an_existing_port(struct MPT2SAS_ADAPTER *ioc, | ||
555 | struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy, u64 sas_address) | ||
556 | { | ||
557 | struct _sas_port *mpt2sas_port; | ||
558 | struct _sas_phy *phy_srch; | ||
559 | |||
560 | if (mpt2sas_phy->phy_belongs_to_port == 1) | ||
561 | return; | ||
562 | |||
563 | list_for_each_entry(mpt2sas_port, &sas_node->sas_port_list, | ||
564 | port_list) { | ||
565 | if (mpt2sas_port->remote_identify.sas_address != | ||
566 | sas_address) | ||
567 | continue; | ||
568 | list_for_each_entry(phy_srch, &mpt2sas_port->phy_list, | ||
504 | port_siblings) { | 569 | port_siblings) { |
505 | dev_printk(KERN_ERR, &mpt2sas_phy->phy->dev, | 570 | if (phy_srch == mpt2sas_phy) |
506 | "phy_number(%d)\n", mpt2sas_phy->phy_id); | 571 | return; |
507 | if (mpt2sas_phy->phy_id == phy_num) | ||
508 | mpt2sas_port_duplicate = mpt2sas_port; | ||
509 | } | 572 | } |
573 | _transport_add_phy(ioc, mpt2sas_port, mpt2sas_phy); | ||
574 | return; | ||
510 | } | 575 | } |
511 | 576 | ||
512 | if (!mpt2sas_port_duplicate) | 577 | } |
578 | |||
579 | /** | ||
580 | * _transport_del_phy_from_an_existing_port - delete phy from existing port | ||
581 | * @ioc: per adapter object | ||
582 | * @sas_node: sas node object (either expander or sas host) | ||
583 | * @mpt2sas_phy: mpt2sas per phy object | ||
584 | * | ||
585 | * Returns nothing. | ||
586 | */ | ||
587 | static void | ||
588 | _transport_del_phy_from_an_existing_port(struct MPT2SAS_ADAPTER *ioc, | ||
589 | struct _sas_node *sas_node, struct _sas_phy *mpt2sas_phy) | ||
590 | { | ||
591 | struct _sas_port *mpt2sas_port, *next; | ||
592 | struct _sas_phy *phy_srch; | ||
593 | |||
594 | if (mpt2sas_phy->phy_belongs_to_port == 0) | ||
513 | return; | 595 | return; |
514 | 596 | ||
515 | dev_printk(KERN_ERR, &mpt2sas_port_duplicate->port->dev, | 597 | list_for_each_entry_safe(mpt2sas_port, next, &sas_node->sas_port_list, |
516 | "deleting duplicate device at sas_addr(0x%016llx), phy(%d)!!!!\n", | 598 | port_list) { |
517 | (unsigned long long) | 599 | list_for_each_entry(phy_srch, &mpt2sas_port->phy_list, |
518 | mpt2sas_port_duplicate->remote_identify.sas_address, phy_num); | 600 | port_siblings) { |
519 | ioc->logging_level |= MPT_DEBUG_TRANSPORT; | 601 | if (phy_srch != mpt2sas_phy) |
520 | mpt2sas_transport_port_remove(ioc, | 602 | continue; |
521 | mpt2sas_port_duplicate->remote_identify.sas_address, | 603 | if (mpt2sas_port->num_phys == 1) |
522 | sas_node->sas_address); | 604 | _transport_delete_port(ioc, mpt2sas_port); |
523 | ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; | 605 | else |
606 | _transport_delete_phy(ioc, mpt2sas_port, | ||
607 | mpt2sas_phy); | ||
608 | return; | ||
609 | } | ||
610 | } | ||
524 | } | 611 | } |
525 | 612 | ||
526 | /** | 613 | /** |
@@ -537,11 +624,13 @@ _transport_sanity_check(struct MPT2SAS_ADAPTER *ioc, struct _sas_node *sas_node, | |||
537 | { | 624 | { |
538 | int i; | 625 | int i; |
539 | 626 | ||
540 | for (i = 0; i < sas_node->num_phys; i++) | 627 | for (i = 0; i < sas_node->num_phys; i++) { |
541 | if (sas_node->phy[i].remote_identify.sas_address == sas_address) | 628 | if (sas_node->phy[i].remote_identify.sas_address != sas_address) |
542 | if (sas_node->phy[i].phy_belongs_to_port) | 629 | continue; |
543 | _transport_delete_duplicate_port(ioc, sas_node, | 630 | if (sas_node->phy[i].phy_belongs_to_port == 1) |
544 | sas_address, i); | 631 | _transport_del_phy_from_an_existing_port(ioc, sas_node, |
632 | &sas_node->phy[i]); | ||
633 | } | ||
545 | } | 634 | } |
546 | 635 | ||
547 | /** | 636 | /** |
@@ -905,10 +994,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, | |||
905 | 994 | ||
906 | mpt2sas_phy = &sas_node->phy[phy_number]; | 995 | mpt2sas_phy = &sas_node->phy[phy_number]; |
907 | mpt2sas_phy->attached_handle = handle; | 996 | mpt2sas_phy->attached_handle = handle; |
908 | if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) | 997 | if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { |
909 | _transport_set_identify(ioc, handle, | 998 | _transport_set_identify(ioc, handle, |
910 | &mpt2sas_phy->remote_identify); | 999 | &mpt2sas_phy->remote_identify); |
911 | else | 1000 | _transport_add_phy_to_an_existing_port(ioc, sas_node, |
1001 | mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address); | ||
1002 | } else | ||
912 | memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct | 1003 | memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct |
913 | sas_identify)); | 1004 | sas_identify)); |
914 | 1005 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index bc8194f74625..44578b56ad0a 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1309,6 +1309,31 @@ qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, | |||
1309 | } | 1309 | } |
1310 | 1310 | ||
1311 | static ssize_t | 1311 | static ssize_t |
1312 | qla2x00_thermal_temp_show(struct device *dev, | ||
1313 | struct device_attribute *attr, char *buf) | ||
1314 | { | ||
1315 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | ||
1316 | int rval = QLA_FUNCTION_FAILED; | ||
1317 | uint16_t temp, frac; | ||
1318 | |||
1319 | if (!vha->hw->flags.thermal_supported) | ||
1320 | return snprintf(buf, PAGE_SIZE, "\n"); | ||
1321 | |||
1322 | temp = frac = 0; | ||
1323 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || | ||
1324 | test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) | ||
1325 | DEBUG2_3_11(printk(KERN_WARNING | ||
1326 | "%s(%ld): isp reset in progress.\n", | ||
1327 | __func__, vha->host_no)); | ||
1328 | else if (!vha->hw->flags.eeh_busy) | ||
1329 | rval = qla2x00_get_thermal_temp(vha, &temp, &frac); | ||
1330 | if (rval != QLA_SUCCESS) | ||
1331 | temp = frac = 0; | ||
1332 | |||
1333 | return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac); | ||
1334 | } | ||
1335 | |||
1336 | static ssize_t | ||
1312 | qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, | 1337 | qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, |
1313 | char *buf) | 1338 | char *buf) |
1314 | { | 1339 | { |
@@ -1366,6 +1391,7 @@ static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, | |||
1366 | qla2x00_vn_port_mac_address_show, NULL); | 1391 | qla2x00_vn_port_mac_address_show, NULL); |
1367 | static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); | 1392 | static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); |
1368 | static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); | 1393 | static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); |
1394 | static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL); | ||
1369 | 1395 | ||
1370 | struct device_attribute *qla2x00_host_attrs[] = { | 1396 | struct device_attribute *qla2x00_host_attrs[] = { |
1371 | &dev_attr_driver_version, | 1397 | &dev_attr_driver_version, |
@@ -1394,6 +1420,7 @@ struct device_attribute *qla2x00_host_attrs[] = { | |||
1394 | &dev_attr_fabric_param, | 1420 | &dev_attr_fabric_param, |
1395 | &dev_attr_fw_state, | 1421 | &dev_attr_fw_state, |
1396 | &dev_attr_optrom_gold_fw_version, | 1422 | &dev_attr_optrom_gold_fw_version, |
1423 | &dev_attr_thermal_temp, | ||
1397 | NULL, | 1424 | NULL, |
1398 | }; | 1425 | }; |
1399 | 1426 | ||
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 31a4121a2be1..903b0586ded3 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c | |||
@@ -103,7 +103,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) | |||
103 | 103 | ||
104 | bsg_job->reply->reply_payload_rcv_len = 0; | 104 | bsg_job->reply->reply_payload_rcv_len = 0; |
105 | 105 | ||
106 | if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) { | 106 | if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) { |
107 | ret = -EINVAL; | 107 | ret = -EINVAL; |
108 | goto exit_fcp_prio_cfg; | 108 | goto exit_fcp_prio_cfg; |
109 | } | 109 | } |
@@ -753,7 +753,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) | |||
753 | command_sent = INT_DEF_LB_LOOPBACK_CMD; | 753 | command_sent = INT_DEF_LB_LOOPBACK_CMD; |
754 | rval = qla2x00_loopback_test(vha, &elreq, response); | 754 | rval = qla2x00_loopback_test(vha, &elreq, response); |
755 | 755 | ||
756 | if (new_config[1]) { | 756 | if (new_config[0]) { |
757 | /* Revert back to original port config | 757 | /* Revert back to original port config |
758 | * Also clear internal loopback | 758 | * Also clear internal loopback |
759 | */ | 759 | */ |
@@ -1512,6 +1512,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
1512 | if (((sp_bsg->type == SRB_CT_CMD) || | 1512 | if (((sp_bsg->type == SRB_CT_CMD) || |
1513 | (sp_bsg->type == SRB_ELS_CMD_HST)) | 1513 | (sp_bsg->type == SRB_ELS_CMD_HST)) |
1514 | && (sp_bsg->u.bsg_job == bsg_job)) { | 1514 | && (sp_bsg->u.bsg_job == bsg_job)) { |
1515 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1515 | if (ha->isp_ops->abort_command(sp)) { | 1516 | if (ha->isp_ops->abort_command(sp)) { |
1516 | DEBUG2(qla_printk(KERN_INFO, ha, | 1517 | DEBUG2(qla_printk(KERN_INFO, ha, |
1517 | "scsi(%ld): mbx " | 1518 | "scsi(%ld): mbx " |
@@ -1527,6 +1528,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) | |||
1527 | bsg_job->req->errors = | 1528 | bsg_job->req->errors = |
1528 | bsg_job->reply->result = 0; | 1529 | bsg_job->reply->result = 0; |
1529 | } | 1530 | } |
1531 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1530 | goto done; | 1532 | goto done; |
1531 | } | 1533 | } |
1532 | } | 1534 | } |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 9ce539d4557e..ccfc8e78be21 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2425,6 +2425,9 @@ struct qla_hw_data { | |||
2425 | uint32_t disable_msix_handshake :1; | 2425 | uint32_t disable_msix_handshake :1; |
2426 | uint32_t fcp_prio_enabled :1; | 2426 | uint32_t fcp_prio_enabled :1; |
2427 | uint32_t fw_hung :1; | 2427 | uint32_t fw_hung :1; |
2428 | uint32_t quiesce_owner:1; | ||
2429 | uint32_t thermal_supported:1; | ||
2430 | /* 26 bits */ | ||
2428 | } flags; | 2431 | } flags; |
2429 | 2432 | ||
2430 | /* This spinlock is used to protect "io transactions", you must | 2433 | /* This spinlock is used to protect "io transactions", you must |
@@ -2863,6 +2866,7 @@ typedef struct scsi_qla_host { | |||
2863 | #define ISP_UNRECOVERABLE 17 | 2866 | #define ISP_UNRECOVERABLE 17 |
2864 | #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ | 2867 | #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ |
2865 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ | 2868 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ |
2869 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ | ||
2866 | 2870 | ||
2867 | uint32_t device_flags; | 2871 | uint32_t device_flags; |
2868 | #define SWITCH_FOUND BIT_0 | 2872 | #define SWITCH_FOUND BIT_0 |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 9382a816c133..89e900adb679 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -36,6 +36,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); | |||
36 | extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); | 36 | extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); |
37 | extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *); | 37 | extern int qla81xx_load_risc(scsi_qla_host_t *, uint32_t *); |
38 | 38 | ||
39 | extern int qla2x00_perform_loop_resync(scsi_qla_host_t *); | ||
39 | extern int qla2x00_loop_resync(scsi_qla_host_t *); | 40 | extern int qla2x00_loop_resync(scsi_qla_host_t *); |
40 | 41 | ||
41 | extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); | 42 | extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); |
@@ -45,12 +46,15 @@ extern void qla2x00_update_fcports(scsi_qla_host_t *); | |||
45 | 46 | ||
46 | extern int qla2x00_abort_isp(scsi_qla_host_t *); | 47 | extern int qla2x00_abort_isp(scsi_qla_host_t *); |
47 | extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); | 48 | extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *); |
49 | extern void qla82xx_quiescent_state_cleanup(scsi_qla_host_t *); | ||
48 | 50 | ||
49 | extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); | 51 | extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *); |
50 | 52 | ||
51 | extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); | 53 | extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *); |
52 | extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); | 54 | extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *); |
53 | 55 | ||
56 | extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *); | ||
57 | |||
54 | extern void qla84xx_put_chip(struct scsi_qla_host *); | 58 | extern void qla84xx_put_chip(struct scsi_qla_host *); |
55 | 59 | ||
56 | extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, | 60 | extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *, |
@@ -68,6 +72,7 @@ extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, | |||
68 | extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *, | 72 | extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *, |
69 | struct srb_iocb *); | 73 | struct srb_iocb *); |
70 | extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); | 74 | extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); |
75 | extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); | ||
71 | 76 | ||
72 | extern fc_port_t * | 77 | extern fc_port_t * |
73 | qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); | 78 | qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t ); |
@@ -90,7 +95,6 @@ extern int ql2xfwloadbin; | |||
90 | extern int ql2xetsenable; | 95 | extern int ql2xetsenable; |
91 | extern int ql2xshiftctondsd; | 96 | extern int ql2xshiftctondsd; |
92 | extern int ql2xdbwr; | 97 | extern int ql2xdbwr; |
93 | extern int ql2xdontresethba; | ||
94 | extern int ql2xasynctmfenable; | 98 | extern int ql2xasynctmfenable; |
95 | extern int ql2xgffidenable; | 99 | extern int ql2xgffidenable; |
96 | extern int ql2xenabledif; | 100 | extern int ql2xenabledif; |
@@ -549,9 +553,11 @@ extern void qla82xx_rom_unlock(struct qla_hw_data *); | |||
549 | 553 | ||
550 | /* ISP 8021 IDC */ | 554 | /* ISP 8021 IDC */ |
551 | extern void qla82xx_clear_drv_active(struct qla_hw_data *); | 555 | extern void qla82xx_clear_drv_active(struct qla_hw_data *); |
556 | extern uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *, uint32_t); | ||
552 | extern int qla82xx_idc_lock(struct qla_hw_data *); | 557 | extern int qla82xx_idc_lock(struct qla_hw_data *); |
553 | extern void qla82xx_idc_unlock(struct qla_hw_data *); | 558 | extern void qla82xx_idc_unlock(struct qla_hw_data *); |
554 | extern int qla82xx_device_state_handler(scsi_qla_host_t *); | 559 | extern int qla82xx_device_state_handler(scsi_qla_host_t *); |
560 | extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); | ||
555 | 561 | ||
556 | extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, | 562 | extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, |
557 | size_t, char *); | 563 | size_t, char *); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 259f51137493..f948e1a73aec 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -498,6 +498,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
498 | vha->flags.reset_active = 0; | 498 | vha->flags.reset_active = 0; |
499 | ha->flags.pci_channel_io_perm_failure = 0; | 499 | ha->flags.pci_channel_io_perm_failure = 0; |
500 | ha->flags.eeh_busy = 0; | 500 | ha->flags.eeh_busy = 0; |
501 | ha->flags.thermal_supported = 1; | ||
501 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 502 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
502 | atomic_set(&vha->loop_state, LOOP_DOWN); | 503 | atomic_set(&vha->loop_state, LOOP_DOWN); |
503 | vha->device_flags = DFLG_NO_CABLE; | 504 | vha->device_flags = DFLG_NO_CABLE; |
@@ -2023,6 +2024,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
2023 | &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); | 2024 | &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); |
2024 | if (rval != QLA_SUCCESS) { | 2025 | if (rval != QLA_SUCCESS) { |
2025 | if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || | 2026 | if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || |
2027 | IS_QLA8XXX_TYPE(ha) || | ||
2026 | (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { | 2028 | (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { |
2027 | DEBUG2(printk("%s(%ld) Loop is in a transition state\n", | 2029 | DEBUG2(printk("%s(%ld) Loop is in a transition state\n", |
2028 | __func__, vha->host_no)); | 2030 | __func__, vha->host_no)); |
@@ -2928,6 +2930,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2928 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); | 2930 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); |
2929 | 2931 | ||
2930 | qla2x00_iidma_fcport(vha, fcport); | 2932 | qla2x00_iidma_fcport(vha, fcport); |
2933 | qla24xx_update_fcport_fcp_prio(vha, fcport); | ||
2931 | qla2x00_reg_remote_port(vha, fcport); | 2934 | qla2x00_reg_remote_port(vha, fcport); |
2932 | atomic_set(&fcport->state, FCS_ONLINE); | 2935 | atomic_set(&fcport->state, FCS_ONLINE); |
2933 | } | 2936 | } |
@@ -3844,6 +3847,37 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) | |||
3844 | return (rval); | 3847 | return (rval); |
3845 | } | 3848 | } |
3846 | 3849 | ||
3850 | /* | ||
3851 | * qla2x00_perform_loop_resync | ||
3852 | * Description: This function will set the appropriate flags and call | ||
3853 | * qla2x00_loop_resync. If successful loop will be resynced | ||
3854 | * Arguments : scsi_qla_host_t pointer | ||
3855 | * returm : Success or Failure | ||
3856 | */ | ||
3857 | |||
3858 | int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) | ||
3859 | { | ||
3860 | int32_t rval = 0; | ||
3861 | |||
3862 | if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) { | ||
3863 | /*Configure the flags so that resync happens properly*/ | ||
3864 | atomic_set(&ha->loop_down_timer, 0); | ||
3865 | if (!(ha->device_flags & DFLG_NO_CABLE)) { | ||
3866 | atomic_set(&ha->loop_state, LOOP_UP); | ||
3867 | set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); | ||
3868 | set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); | ||
3869 | set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); | ||
3870 | |||
3871 | rval = qla2x00_loop_resync(ha); | ||
3872 | } else | ||
3873 | atomic_set(&ha->loop_state, LOOP_DEAD); | ||
3874 | |||
3875 | clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); | ||
3876 | } | ||
3877 | |||
3878 | return rval; | ||
3879 | } | ||
3880 | |||
3847 | void | 3881 | void |
3848 | qla2x00_update_fcports(scsi_qla_host_t *base_vha) | 3882 | qla2x00_update_fcports(scsi_qla_host_t *base_vha) |
3849 | { | 3883 | { |
@@ -3857,7 +3891,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) | |||
3857 | list_for_each_entry(vha, &base_vha->hw->vp_list, list) { | 3891 | list_for_each_entry(vha, &base_vha->hw->vp_list, list) { |
3858 | atomic_inc(&vha->vref_count); | 3892 | atomic_inc(&vha->vref_count); |
3859 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 3893 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
3860 | if (fcport && fcport->drport && | 3894 | if (fcport->drport && |
3861 | atomic_read(&fcport->state) != FCS_UNCONFIGURED) { | 3895 | atomic_read(&fcport->state) != FCS_UNCONFIGURED) { |
3862 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 3896 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
3863 | 3897 | ||
@@ -3871,11 +3905,43 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) | |||
3871 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 3905 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
3872 | } | 3906 | } |
3873 | 3907 | ||
3908 | /* | ||
3909 | * qla82xx_quiescent_state_cleanup | ||
3910 | * Description: This function will block the new I/Os | ||
3911 | * Its not aborting any I/Os as context | ||
3912 | * is not destroyed during quiescence | ||
3913 | * Arguments: scsi_qla_host_t | ||
3914 | * return : void | ||
3915 | */ | ||
3916 | void | ||
3917 | qla82xx_quiescent_state_cleanup(scsi_qla_host_t *vha) | ||
3918 | { | ||
3919 | struct qla_hw_data *ha = vha->hw; | ||
3920 | struct scsi_qla_host *vp; | ||
3921 | |||
3922 | qla_printk(KERN_INFO, ha, | ||
3923 | "Performing ISP error recovery - ha= %p.\n", ha); | ||
3924 | |||
3925 | atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); | ||
3926 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | ||
3927 | atomic_set(&vha->loop_state, LOOP_DOWN); | ||
3928 | qla2x00_mark_all_devices_lost(vha, 0); | ||
3929 | list_for_each_entry(vp, &ha->vp_list, list) | ||
3930 | qla2x00_mark_all_devices_lost(vha, 0); | ||
3931 | } else { | ||
3932 | if (!atomic_read(&vha->loop_down_timer)) | ||
3933 | atomic_set(&vha->loop_down_timer, | ||
3934 | LOOP_DOWN_TIME); | ||
3935 | } | ||
3936 | /* Wait for pending cmds to complete */ | ||
3937 | qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); | ||
3938 | } | ||
3939 | |||
3874 | void | 3940 | void |
3875 | qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | 3941 | qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) |
3876 | { | 3942 | { |
3877 | struct qla_hw_data *ha = vha->hw; | 3943 | struct qla_hw_data *ha = vha->hw; |
3878 | struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev); | 3944 | struct scsi_qla_host *vp; |
3879 | unsigned long flags; | 3945 | unsigned long flags; |
3880 | 3946 | ||
3881 | vha->flags.online = 0; | 3947 | vha->flags.online = 0; |
@@ -3896,7 +3962,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
3896 | qla2x00_mark_all_devices_lost(vha, 0); | 3962 | qla2x00_mark_all_devices_lost(vha, 0); |
3897 | 3963 | ||
3898 | spin_lock_irqsave(&ha->vport_slock, flags); | 3964 | spin_lock_irqsave(&ha->vport_slock, flags); |
3899 | list_for_each_entry(vp, &base_vha->hw->vp_list, list) { | 3965 | list_for_each_entry(vp, &ha->vp_list, list) { |
3900 | atomic_inc(&vp->vref_count); | 3966 | atomic_inc(&vp->vref_count); |
3901 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 3967 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
3902 | 3968 | ||
@@ -5410,7 +5476,7 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha) | |||
5410 | * the tag (priority) value is returned. | 5476 | * the tag (priority) value is returned. |
5411 | * | 5477 | * |
5412 | * Input: | 5478 | * Input: |
5413 | * ha = adapter block po | 5479 | * vha = scsi host structure pointer. |
5414 | * fcport = port structure pointer. | 5480 | * fcport = port structure pointer. |
5415 | * | 5481 | * |
5416 | * Return: | 5482 | * Return: |
@@ -5504,7 +5570,7 @@ qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
5504 | * Activates fcp priority for the logged in fc port | 5570 | * Activates fcp priority for the logged in fc port |
5505 | * | 5571 | * |
5506 | * Input: | 5572 | * Input: |
5507 | * ha = adapter block pointer. | 5573 | * vha = scsi host structure pointer. |
5508 | * fcp = port structure pointer. | 5574 | * fcp = port structure pointer. |
5509 | * | 5575 | * |
5510 | * Return: | 5576 | * Return: |
@@ -5514,25 +5580,24 @@ qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
5514 | * Kernel context. | 5580 | * Kernel context. |
5515 | */ | 5581 | */ |
5516 | int | 5582 | int |
5517 | qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *ha, fc_port_t *fcport) | 5583 | qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport) |
5518 | { | 5584 | { |
5519 | int ret; | 5585 | int ret; |
5520 | uint8_t priority; | 5586 | uint8_t priority; |
5521 | uint16_t mb[5]; | 5587 | uint16_t mb[5]; |
5522 | 5588 | ||
5523 | if (atomic_read(&fcport->state) == FCS_UNCONFIGURED || | 5589 | if (fcport->port_type != FCT_TARGET || |
5524 | fcport->port_type != FCT_TARGET || | 5590 | fcport->loop_id == FC_NO_LOOP_ID) |
5525 | fcport->loop_id == FC_NO_LOOP_ID) | ||
5526 | return QLA_FUNCTION_FAILED; | 5591 | return QLA_FUNCTION_FAILED; |
5527 | 5592 | ||
5528 | priority = qla24xx_get_fcp_prio(ha, fcport); | 5593 | priority = qla24xx_get_fcp_prio(vha, fcport); |
5529 | ret = qla24xx_set_fcp_prio(ha, fcport->loop_id, priority, mb); | 5594 | ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb); |
5530 | if (ret == QLA_SUCCESS) | 5595 | if (ret == QLA_SUCCESS) |
5531 | fcport->fcp_prio = priority; | 5596 | fcport->fcp_prio = priority; |
5532 | else | 5597 | else |
5533 | DEBUG2(printk(KERN_WARNING | 5598 | DEBUG2(printk(KERN_WARNING |
5534 | "scsi(%ld): Unable to activate fcp priority, " | 5599 | "scsi(%ld): Unable to activate fcp priority, " |
5535 | " ret=0x%x\n", ha->host_no, ret)); | 5600 | " ret=0x%x\n", vha->host_no, ret)); |
5536 | 5601 | ||
5537 | return ret; | 5602 | return ret; |
5538 | } | 5603 | } |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 7f77898486a9..d17ed9a94a0c 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -321,6 +321,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) | |||
321 | struct qla_hw_data *ha = vha->hw; | 321 | struct qla_hw_data *ha = vha->hw; |
322 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; | 322 | struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; |
323 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; | 323 | struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; |
324 | struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; | ||
324 | uint32_t rscn_entry, host_pid; | 325 | uint32_t rscn_entry, host_pid; |
325 | uint8_t rscn_queue_index; | 326 | uint8_t rscn_queue_index; |
326 | unsigned long flags; | 327 | unsigned long flags; |
@@ -498,6 +499,7 @@ skip_rio: | |||
498 | 499 | ||
499 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 500 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
500 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; | 501 | mbx = IS_QLA81XX(ha) ? RD_REG_WORD(®24->mailbox4) : 0; |
502 | mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; | ||
501 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " | 503 | DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " |
502 | "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], | 504 | "(%x %x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3], |
503 | mbx)); | 505 | mbx)); |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index effd8a1403d9..e473e9fb363c 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -4125,7 +4125,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, | |||
4125 | return QLA_FUNCTION_FAILED; | 4125 | return QLA_FUNCTION_FAILED; |
4126 | 4126 | ||
4127 | DEBUG11(printk(KERN_INFO | 4127 | DEBUG11(printk(KERN_INFO |
4128 | "%s(%ld): entered.\n", __func__, ha->host_no)); | 4128 | "%s(%ld): entered.\n", __func__, vha->host_no)); |
4129 | 4129 | ||
4130 | mcp->mb[0] = MBC_PORT_PARAMS; | 4130 | mcp->mb[0] = MBC_PORT_PARAMS; |
4131 | mcp->mb[1] = loop_id; | 4131 | mcp->mb[1] = loop_id; |
@@ -4160,6 +4160,71 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, | |||
4160 | } | 4160 | } |
4161 | 4161 | ||
4162 | int | 4162 | int |
4163 | qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac) | ||
4164 | { | ||
4165 | int rval; | ||
4166 | mbx_cmd_t mc; | ||
4167 | mbx_cmd_t *mcp = &mc; | ||
4168 | struct qla_hw_data *ha = vha->hw; | ||
4169 | |||
4170 | DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, ha->host_no)); | ||
4171 | |||
4172 | /* High bits. */ | ||
4173 | mcp->mb[0] = MBC_READ_SFP; | ||
4174 | mcp->mb[1] = 0x98; | ||
4175 | mcp->mb[2] = 0; | ||
4176 | mcp->mb[3] = 0; | ||
4177 | mcp->mb[6] = 0; | ||
4178 | mcp->mb[7] = 0; | ||
4179 | mcp->mb[8] = 1; | ||
4180 | mcp->mb[9] = 0x01; | ||
4181 | mcp->mb[10] = BIT_13|BIT_0; | ||
4182 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | ||
4183 | mcp->in_mb = MBX_1|MBX_0; | ||
4184 | mcp->tov = MBX_TOV_SECONDS; | ||
4185 | mcp->flags = 0; | ||
4186 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4187 | if (rval != QLA_SUCCESS) { | ||
4188 | DEBUG2_3_11(printk(KERN_WARNING | ||
4189 | "%s(%ld): failed=%x (%x).\n", __func__, | ||
4190 | vha->host_no, rval, mcp->mb[0])); | ||
4191 | ha->flags.thermal_supported = 0; | ||
4192 | goto fail; | ||
4193 | } | ||
4194 | *temp = mcp->mb[1] & 0xFF; | ||
4195 | |||
4196 | /* Low bits. */ | ||
4197 | mcp->mb[0] = MBC_READ_SFP; | ||
4198 | mcp->mb[1] = 0x98; | ||
4199 | mcp->mb[2] = 0; | ||
4200 | mcp->mb[3] = 0; | ||
4201 | mcp->mb[6] = 0; | ||
4202 | mcp->mb[7] = 0; | ||
4203 | mcp->mb[8] = 1; | ||
4204 | mcp->mb[9] = 0x10; | ||
4205 | mcp->mb[10] = BIT_13|BIT_0; | ||
4206 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | ||
4207 | mcp->in_mb = MBX_1|MBX_0; | ||
4208 | mcp->tov = MBX_TOV_SECONDS; | ||
4209 | mcp->flags = 0; | ||
4210 | rval = qla2x00_mailbox_command(vha, mcp); | ||
4211 | if (rval != QLA_SUCCESS) { | ||
4212 | DEBUG2_3_11(printk(KERN_WARNING | ||
4213 | "%s(%ld): failed=%x (%x).\n", __func__, | ||
4214 | vha->host_no, rval, mcp->mb[0])); | ||
4215 | ha->flags.thermal_supported = 0; | ||
4216 | goto fail; | ||
4217 | } | ||
4218 | *frac = ((mcp->mb[1] & 0xFF) >> 6) * 25; | ||
4219 | |||
4220 | if (rval == QLA_SUCCESS) | ||
4221 | DEBUG11(printk(KERN_INFO | ||
4222 | "%s(%ld): done.\n", __func__, ha->host_no)); | ||
4223 | fail: | ||
4224 | return rval; | ||
4225 | } | ||
4226 | |||
4227 | int | ||
4163 | qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) | 4228 | qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) |
4164 | { | 4229 | { |
4165 | int rval; | 4230 | int rval; |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index ae2acacc0003..fdb96a3584a5 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -1079,11 +1079,55 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |||
1079 | 1079 | ||
1080 | /* Halt all the indiviual PEGs and other blocks of the ISP */ | 1080 | /* Halt all the indiviual PEGs and other blocks of the ISP */ |
1081 | qla82xx_rom_lock(ha); | 1081 | qla82xx_rom_lock(ha); |
1082 | |||
1083 | /* mask all niu interrupts */ | ||
1084 | qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); | ||
1085 | /* disable xge rx/tx */ | ||
1086 | qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); | ||
1087 | /* disable xg1 rx/tx */ | ||
1088 | qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); | ||
1089 | |||
1090 | /* halt sre */ | ||
1091 | val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); | ||
1092 | qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); | ||
1093 | |||
1094 | /* halt epg */ | ||
1095 | qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); | ||
1096 | |||
1097 | /* halt timers */ | ||
1098 | qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); | ||
1099 | qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); | ||
1100 | qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); | ||
1101 | qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); | ||
1102 | qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); | ||
1103 | |||
1104 | /* halt pegs */ | ||
1105 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); | ||
1106 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); | ||
1107 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); | ||
1108 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); | ||
1109 | qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); | ||
1110 | |||
1111 | /* big hammer */ | ||
1112 | msleep(1000); | ||
1082 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) | 1113 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) |
1083 | /* don't reset CAM block on reset */ | 1114 | /* don't reset CAM block on reset */ |
1084 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); | 1115 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); |
1085 | else | 1116 | else |
1086 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); | 1117 | qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); |
1118 | |||
1119 | /* reset ms */ | ||
1120 | val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); | ||
1121 | val |= (1 << 1); | ||
1122 | qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); | ||
1123 | msleep(20); | ||
1124 | |||
1125 | /* unreset ms */ | ||
1126 | val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); | ||
1127 | val &= ~(1 << 1); | ||
1128 | qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); | ||
1129 | msleep(20); | ||
1130 | |||
1087 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); | 1131 | qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); |
1088 | 1132 | ||
1089 | /* Read the signature value from the flash. | 1133 | /* Read the signature value from the flash. |
@@ -1210,25 +1254,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) | |||
1210 | } | 1254 | } |
1211 | 1255 | ||
1212 | static int | 1256 | static int |
1213 | qla82xx_check_for_bad_spd(struct qla_hw_data *ha) | ||
1214 | { | ||
1215 | u32 val = 0; | ||
1216 | val = qla82xx_rd_32(ha, BOOT_LOADER_DIMM_STATUS); | ||
1217 | val &= QLA82XX_BOOT_LOADER_MN_ISSUE; | ||
1218 | if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) { | ||
1219 | qla_printk(KERN_INFO, ha, | ||
1220 | "Memory DIMM SPD not programmed. " | ||
1221 | " Assumed valid.\n"); | ||
1222 | return 1; | ||
1223 | } else if (val) { | ||
1224 | qla_printk(KERN_INFO, ha, | ||
1225 | "Memory DIMM type incorrect.Info:%08X.\n", val); | ||
1226 | return 2; | ||
1227 | } | ||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | static int | ||
1232 | qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, | 1257 | qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, |
1233 | u64 off, void *data, int size) | 1258 | u64 off, void *data, int size) |
1234 | { | 1259 | { |
@@ -1293,11 +1318,6 @@ qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, | |||
1293 | word[startword+1] |= tmpw >> (sz[0] * 8); | 1318 | word[startword+1] |= tmpw >> (sz[0] * 8); |
1294 | } | 1319 | } |
1295 | 1320 | ||
1296 | /* | ||
1297 | * don't lock here - write_wx gets the lock if each time | ||
1298 | * write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1299 | * netxen_nic_pci_change_crbwindow_128M(adapter, 0); | ||
1300 | */ | ||
1301 | for (i = 0; i < loop; i++) { | 1321 | for (i = 0; i < loop; i++) { |
1302 | temp = off8 + (i << shift_amount); | 1322 | temp = off8 + (i << shift_amount); |
1303 | qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); | 1323 | qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); |
@@ -1399,12 +1419,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, | |||
1399 | off0[1] = 0; | 1419 | off0[1] = 0; |
1400 | sz[1] = size - sz[0]; | 1420 | sz[1] = size - sz[0]; |
1401 | 1421 | ||
1402 | /* | ||
1403 | * don't lock here - write_wx gets the lock if each time | ||
1404 | * write_lock_irqsave(&adapter->adapter_lock, flags); | ||
1405 | * netxen_nic_pci_change_crbwindow_128M(adapter, 0); | ||
1406 | */ | ||
1407 | |||
1408 | for (i = 0; i < loop; i++) { | 1422 | for (i = 0; i < loop; i++) { |
1409 | temp = off8 + (i << shift_amount); | 1423 | temp = off8 + (i << shift_amount); |
1410 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); | 1424 | qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); |
@@ -1437,11 +1451,6 @@ qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, | |||
1437 | } | 1451 | } |
1438 | } | 1452 | } |
1439 | 1453 | ||
1440 | /* | ||
1441 | * netxen_nic_pci_change_crbwindow_128M(adapter, 1); | ||
1442 | * write_unlock_irqrestore(&adapter->adapter_lock, flags); | ||
1443 | */ | ||
1444 | |||
1445 | if (j >= MAX_CTL_CHECK) | 1454 | if (j >= MAX_CTL_CHECK) |
1446 | return -1; | 1455 | return -1; |
1447 | 1456 | ||
@@ -1872,7 +1881,6 @@ qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) | |||
1872 | qla_printk(KERN_INFO, ha, | 1881 | qla_printk(KERN_INFO, ha, |
1873 | "Cmd Peg initialization failed: 0x%x.\n", val); | 1882 | "Cmd Peg initialization failed: 0x%x.\n", val); |
1874 | 1883 | ||
1875 | qla82xx_check_for_bad_spd(ha); | ||
1876 | val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); | 1884 | val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); |
1877 | read_lock(&ha->hw_lock); | 1885 | read_lock(&ha->hw_lock); |
1878 | qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); | 1886 | qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); |
@@ -2343,6 +2351,17 @@ qla82xx_set_qsnt_ready(struct qla_hw_data *ha) | |||
2343 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); | 2351 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); |
2344 | } | 2352 | } |
2345 | 2353 | ||
2354 | void | ||
2355 | qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) | ||
2356 | { | ||
2357 | struct qla_hw_data *ha = vha->hw; | ||
2358 | uint32_t qsnt_state; | ||
2359 | |||
2360 | qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | ||
2361 | qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); | ||
2362 | qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); | ||
2363 | } | ||
2364 | |||
2346 | static int | 2365 | static int |
2347 | qla82xx_load_fw(scsi_qla_host_t *vha) | 2366 | qla82xx_load_fw(scsi_qla_host_t *vha) |
2348 | { | 2367 | { |
@@ -2542,7 +2561,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, | |||
2542 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); | 2561 | *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); |
2543 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); | 2562 | *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); |
2544 | *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); | 2563 | *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); |
2545 | cur_seg++; | 2564 | cur_seg = sg_next(cur_seg); |
2546 | avail_dsds--; | 2565 | avail_dsds--; |
2547 | } | 2566 | } |
2548 | } | 2567 | } |
@@ -3261,6 +3280,104 @@ dev_ready: | |||
3261 | return QLA_SUCCESS; | 3280 | return QLA_SUCCESS; |
3262 | } | 3281 | } |
3263 | 3282 | ||
3283 | /* | ||
3284 | * qla82xx_need_qsnt_handler | ||
3285 | * Code to start quiescence sequence | ||
3286 | * | ||
3287 | * Note: | ||
3288 | * IDC lock must be held upon entry | ||
3289 | * | ||
3290 | * Return: void | ||
3291 | */ | ||
3292 | |||
3293 | static void | ||
3294 | qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) | ||
3295 | { | ||
3296 | struct qla_hw_data *ha = vha->hw; | ||
3297 | uint32_t dev_state, drv_state, drv_active; | ||
3298 | unsigned long reset_timeout; | ||
3299 | |||
3300 | if (vha->flags.online) { | ||
3301 | /*Block any further I/O and wait for pending cmnds to complete*/ | ||
3302 | qla82xx_quiescent_state_cleanup(vha); | ||
3303 | } | ||
3304 | |||
3305 | /* Set the quiescence ready bit */ | ||
3306 | qla82xx_set_qsnt_ready(ha); | ||
3307 | |||
3308 | /*wait for 30 secs for other functions to ack */ | ||
3309 | reset_timeout = jiffies + (30 * HZ); | ||
3310 | |||
3311 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | ||
3312 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | ||
3313 | /* Its 2 that is written when qsnt is acked, moving one bit */ | ||
3314 | drv_active = drv_active << 0x01; | ||
3315 | |||
3316 | while (drv_state != drv_active) { | ||
3317 | |||
3318 | if (time_after_eq(jiffies, reset_timeout)) { | ||
3319 | /* quiescence timeout, other functions didn't ack | ||
3320 | * changing the state to DEV_READY | ||
3321 | */ | ||
3322 | qla_printk(KERN_INFO, ha, | ||
3323 | "%s: QUIESCENT TIMEOUT\n", QLA2XXX_DRIVER_NAME); | ||
3324 | qla_printk(KERN_INFO, ha, | ||
3325 | "DRV_ACTIVE:%d DRV_STATE:%d\n", drv_active, | ||
3326 | drv_state); | ||
3327 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | ||
3328 | QLA82XX_DEV_READY); | ||
3329 | qla_printk(KERN_INFO, ha, | ||
3330 | "HW State: DEV_READY\n"); | ||
3331 | qla82xx_idc_unlock(ha); | ||
3332 | qla2x00_perform_loop_resync(vha); | ||
3333 | qla82xx_idc_lock(ha); | ||
3334 | |||
3335 | qla82xx_clear_qsnt_ready(vha); | ||
3336 | return; | ||
3337 | } | ||
3338 | |||
3339 | qla82xx_idc_unlock(ha); | ||
3340 | msleep(1000); | ||
3341 | qla82xx_idc_lock(ha); | ||
3342 | |||
3343 | drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); | ||
3344 | drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); | ||
3345 | drv_active = drv_active << 0x01; | ||
3346 | } | ||
3347 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
3348 | /* everyone acked so set the state to DEV_QUIESCENCE */ | ||
3349 | if (dev_state == QLA82XX_DEV_NEED_QUIESCENT) { | ||
3350 | qla_printk(KERN_INFO, ha, "HW State: DEV_QUIESCENT\n"); | ||
3351 | qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_QUIESCENT); | ||
3352 | } | ||
3353 | } | ||
3354 | |||
3355 | /* | ||
3356 | * qla82xx_wait_for_state_change | ||
3357 | * Wait for device state to change from given current state | ||
3358 | * | ||
3359 | * Note: | ||
3360 | * IDC lock must not be held upon entry | ||
3361 | * | ||
3362 | * Return: | ||
3363 | * Changed device state. | ||
3364 | */ | ||
3365 | uint32_t | ||
3366 | qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) | ||
3367 | { | ||
3368 | struct qla_hw_data *ha = vha->hw; | ||
3369 | uint32_t dev_state; | ||
3370 | |||
3371 | do { | ||
3372 | msleep(1000); | ||
3373 | qla82xx_idc_lock(ha); | ||
3374 | dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | ||
3375 | qla82xx_idc_unlock(ha); | ||
3376 | } while (dev_state == curr_state); | ||
3377 | |||
3378 | return dev_state; | ||
3379 | } | ||
3380 | |||
3264 | static void | 3381 | static void |
3265 | qla82xx_dev_failed_handler(scsi_qla_host_t *vha) | 3382 | qla82xx_dev_failed_handler(scsi_qla_host_t *vha) |
3266 | { | 3383 | { |
@@ -3439,15 +3556,28 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) | |||
3439 | qla82xx_idc_lock(ha); | 3556 | qla82xx_idc_lock(ha); |
3440 | break; | 3557 | break; |
3441 | case QLA82XX_DEV_NEED_RESET: | 3558 | case QLA82XX_DEV_NEED_RESET: |
3442 | if (!ql2xdontresethba) | 3559 | qla82xx_need_reset_handler(vha); |
3443 | qla82xx_need_reset_handler(vha); | ||
3444 | break; | 3560 | break; |
3445 | case QLA82XX_DEV_NEED_QUIESCENT: | 3561 | case QLA82XX_DEV_NEED_QUIESCENT: |
3446 | qla82xx_set_qsnt_ready(ha); | 3562 | qla82xx_need_qsnt_handler(vha); |
3563 | /* Reset timeout value after quiescence handler */ | ||
3564 | dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ | ||
3565 | * HZ); | ||
3566 | break; | ||
3447 | case QLA82XX_DEV_QUIESCENT: | 3567 | case QLA82XX_DEV_QUIESCENT: |
3568 | /* Owner will exit and other will wait for the state | ||
3569 | * to get changed | ||
3570 | */ | ||
3571 | if (ha->flags.quiesce_owner) | ||
3572 | goto exit; | ||
3573 | |||
3448 | qla82xx_idc_unlock(ha); | 3574 | qla82xx_idc_unlock(ha); |
3449 | msleep(1000); | 3575 | msleep(1000); |
3450 | qla82xx_idc_lock(ha); | 3576 | qla82xx_idc_lock(ha); |
3577 | |||
3578 | /* Reset timeout value after quiescence handler */ | ||
3579 | dev_init_timeout = jiffies + (ha->nx_dev_init_timeout\ | ||
3580 | * HZ); | ||
3451 | break; | 3581 | break; |
3452 | case QLA82XX_DEV_FAILED: | 3582 | case QLA82XX_DEV_FAILED: |
3453 | qla82xx_dev_failed_handler(vha); | 3583 | qla82xx_dev_failed_handler(vha); |
@@ -3490,6 +3620,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) | |||
3490 | &ha->mbx_cmd_flags)) | 3620 | &ha->mbx_cmd_flags)) |
3491 | complete(&ha->mbx_intr_comp); | 3621 | complete(&ha->mbx_intr_comp); |
3492 | } | 3622 | } |
3623 | } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && | ||
3624 | !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { | ||
3625 | DEBUG(qla_printk(KERN_INFO, ha, | ||
3626 | "scsi(%ld) %s - detected quiescence needed\n", | ||
3627 | vha->host_no, __func__)); | ||
3628 | set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); | ||
3629 | qla2xxx_wake_dpc(vha); | ||
3493 | } else { | 3630 | } else { |
3494 | qla82xx_check_fw_alive(vha); | 3631 | qla82xx_check_fw_alive(vha); |
3495 | } | 3632 | } |
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 51ec0c5380e8..ed5883f1778a 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h | |||
@@ -523,8 +523,6 @@ | |||
523 | # define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) | 523 | # define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) |
524 | # define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) | 524 | # define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) |
525 | 525 | ||
526 | #define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000 | ||
527 | #define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff | ||
528 | #define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) | 526 | #define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) |
529 | #define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) | 527 | #define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) |
530 | #define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) | 528 | #define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2c0876c81a3f..c194c23ca1fb 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -37,12 +37,12 @@ static struct kmem_cache *srb_cachep; | |||
37 | static struct kmem_cache *ctx_cachep; | 37 | static struct kmem_cache *ctx_cachep; |
38 | 38 | ||
39 | int ql2xlogintimeout = 20; | 39 | int ql2xlogintimeout = 20; |
40 | module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); | 40 | module_param(ql2xlogintimeout, int, S_IRUGO); |
41 | MODULE_PARM_DESC(ql2xlogintimeout, | 41 | MODULE_PARM_DESC(ql2xlogintimeout, |
42 | "Login timeout value in seconds."); | 42 | "Login timeout value in seconds."); |
43 | 43 | ||
44 | int qlport_down_retry; | 44 | int qlport_down_retry; |
45 | module_param(qlport_down_retry, int, S_IRUGO|S_IRUSR); | 45 | module_param(qlport_down_retry, int, S_IRUGO); |
46 | MODULE_PARM_DESC(qlport_down_retry, | 46 | MODULE_PARM_DESC(qlport_down_retry, |
47 | "Maximum number of command retries to a port that returns " | 47 | "Maximum number of command retries to a port that returns " |
48 | "a PORT-DOWN status."); | 48 | "a PORT-DOWN status."); |
@@ -55,12 +55,12 @@ MODULE_PARM_DESC(ql2xplogiabsentdevice, | |||
55 | "Default is 0 - no PLOGI. 1 - perfom PLOGI."); | 55 | "Default is 0 - no PLOGI. 1 - perfom PLOGI."); |
56 | 56 | ||
57 | int ql2xloginretrycount = 0; | 57 | int ql2xloginretrycount = 0; |
58 | module_param(ql2xloginretrycount, int, S_IRUGO|S_IRUSR); | 58 | module_param(ql2xloginretrycount, int, S_IRUGO); |
59 | MODULE_PARM_DESC(ql2xloginretrycount, | 59 | MODULE_PARM_DESC(ql2xloginretrycount, |
60 | "Specify an alternate value for the NVRAM login retry count."); | 60 | "Specify an alternate value for the NVRAM login retry count."); |
61 | 61 | ||
62 | int ql2xallocfwdump = 1; | 62 | int ql2xallocfwdump = 1; |
63 | module_param(ql2xallocfwdump, int, S_IRUGO|S_IRUSR); | 63 | module_param(ql2xallocfwdump, int, S_IRUGO); |
64 | MODULE_PARM_DESC(ql2xallocfwdump, | 64 | MODULE_PARM_DESC(ql2xallocfwdump, |
65 | "Option to enable allocation of memory for a firmware dump " | 65 | "Option to enable allocation of memory for a firmware dump " |
66 | "during HBA initialization. Memory allocation requirements " | 66 | "during HBA initialization. Memory allocation requirements " |
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(ql2xextended_error_logging, | |||
73 | "Default is 0 - no logging. 1 - log errors."); | 73 | "Default is 0 - no logging. 1 - log errors."); |
74 | 74 | ||
75 | int ql2xshiftctondsd = 6; | 75 | int ql2xshiftctondsd = 6; |
76 | module_param(ql2xshiftctondsd, int, S_IRUGO|S_IRUSR); | 76 | module_param(ql2xshiftctondsd, int, S_IRUGO); |
77 | MODULE_PARM_DESC(ql2xshiftctondsd, | 77 | MODULE_PARM_DESC(ql2xshiftctondsd, |
78 | "Set to control shifting of command type processing " | 78 | "Set to control shifting of command type processing " |
79 | "based on total number of SG elements."); | 79 | "based on total number of SG elements."); |
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd, | |||
81 | static void qla2x00_free_device(scsi_qla_host_t *); | 81 | static void qla2x00_free_device(scsi_qla_host_t *); |
82 | 82 | ||
83 | int ql2xfdmienable=1; | 83 | int ql2xfdmienable=1; |
84 | module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR); | 84 | module_param(ql2xfdmienable, int, S_IRUGO); |
85 | MODULE_PARM_DESC(ql2xfdmienable, | 85 | MODULE_PARM_DESC(ql2xfdmienable, |
86 | "Enables FDMI registrations. " | 86 | "Enables FDMI registrations. " |
87 | "0 - no FDMI. Default is 1 - perform FDMI."); | 87 | "0 - no FDMI. Default is 1 - perform FDMI."); |
@@ -106,27 +106,27 @@ MODULE_PARM_DESC(ql2xenablehba_err_chk, | |||
106 | " Default is 0 - Error isolation disabled, 1 - Enable it"); | 106 | " Default is 0 - Error isolation disabled, 1 - Enable it"); |
107 | 107 | ||
108 | int ql2xiidmaenable=1; | 108 | int ql2xiidmaenable=1; |
109 | module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR); | 109 | module_param(ql2xiidmaenable, int, S_IRUGO); |
110 | MODULE_PARM_DESC(ql2xiidmaenable, | 110 | MODULE_PARM_DESC(ql2xiidmaenable, |
111 | "Enables iIDMA settings " | 111 | "Enables iIDMA settings " |
112 | "Default is 1 - perform iIDMA. 0 - no iIDMA."); | 112 | "Default is 1 - perform iIDMA. 0 - no iIDMA."); |
113 | 113 | ||
114 | int ql2xmaxqueues = 1; | 114 | int ql2xmaxqueues = 1; |
115 | module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR); | 115 | module_param(ql2xmaxqueues, int, S_IRUGO); |
116 | MODULE_PARM_DESC(ql2xmaxqueues, | 116 | MODULE_PARM_DESC(ql2xmaxqueues, |
117 | "Enables MQ settings " | 117 | "Enables MQ settings " |
118 | "Default is 1 for single queue. Set it to number " | 118 | "Default is 1 for single queue. Set it to number " |
119 | "of queues in MQ mode."); | 119 | "of queues in MQ mode."); |
120 | 120 | ||
121 | int ql2xmultique_tag; | 121 | int ql2xmultique_tag; |
122 | module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR); | 122 | module_param(ql2xmultique_tag, int, S_IRUGO); |
123 | MODULE_PARM_DESC(ql2xmultique_tag, | 123 | MODULE_PARM_DESC(ql2xmultique_tag, |
124 | "Enables CPU affinity settings for the driver " | 124 | "Enables CPU affinity settings for the driver " |
125 | "Default is 0 for no affinity of request and response IO. " | 125 | "Default is 0 for no affinity of request and response IO. " |
126 | "Set it to 1 to turn on the cpu affinity."); | 126 | "Set it to 1 to turn on the cpu affinity."); |
127 | 127 | ||
128 | int ql2xfwloadbin; | 128 | int ql2xfwloadbin; |
129 | module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); | 129 | module_param(ql2xfwloadbin, int, S_IRUGO); |
130 | MODULE_PARM_DESC(ql2xfwloadbin, | 130 | MODULE_PARM_DESC(ql2xfwloadbin, |
131 | "Option to specify location from which to load ISP firmware:\n" | 131 | "Option to specify location from which to load ISP firmware:\n" |
132 | " 2 -- load firmware via the request_firmware() (hotplug)\n" | 132 | " 2 -- load firmware via the request_firmware() (hotplug)\n" |
@@ -135,39 +135,32 @@ MODULE_PARM_DESC(ql2xfwloadbin, | |||
135 | " 0 -- use default semantics.\n"); | 135 | " 0 -- use default semantics.\n"); |
136 | 136 | ||
137 | int ql2xetsenable; | 137 | int ql2xetsenable; |
138 | module_param(ql2xetsenable, int, S_IRUGO|S_IRUSR); | 138 | module_param(ql2xetsenable, int, S_IRUGO); |
139 | MODULE_PARM_DESC(ql2xetsenable, | 139 | MODULE_PARM_DESC(ql2xetsenable, |
140 | "Enables firmware ETS burst." | 140 | "Enables firmware ETS burst." |
141 | "Default is 0 - skip ETS enablement."); | 141 | "Default is 0 - skip ETS enablement."); |
142 | 142 | ||
143 | int ql2xdbwr = 1; | 143 | int ql2xdbwr = 1; |
144 | module_param(ql2xdbwr, int, S_IRUGO|S_IRUSR); | 144 | module_param(ql2xdbwr, int, S_IRUGO); |
145 | MODULE_PARM_DESC(ql2xdbwr, | 145 | MODULE_PARM_DESC(ql2xdbwr, |
146 | "Option to specify scheme for request queue posting\n" | 146 | "Option to specify scheme for request queue posting\n" |
147 | " 0 -- Regular doorbell.\n" | 147 | " 0 -- Regular doorbell.\n" |
148 | " 1 -- CAMRAM doorbell (faster).\n"); | 148 | " 1 -- CAMRAM doorbell (faster).\n"); |
149 | 149 | ||
150 | int ql2xdontresethba; | ||
151 | module_param(ql2xdontresethba, int, S_IRUGO|S_IRUSR); | ||
152 | MODULE_PARM_DESC(ql2xdontresethba, | ||
153 | "Option to specify reset behaviour\n" | ||
154 | " 0 (Default) -- Reset on failure.\n" | ||
155 | " 1 -- Do not reset on failure.\n"); | ||
156 | |||
157 | int ql2xtargetreset = 1; | 150 | int ql2xtargetreset = 1; |
158 | module_param(ql2xtargetreset, int, S_IRUGO|S_IRUSR); | 151 | module_param(ql2xtargetreset, int, S_IRUGO); |
159 | MODULE_PARM_DESC(ql2xtargetreset, | 152 | MODULE_PARM_DESC(ql2xtargetreset, |
160 | "Enable target reset." | 153 | "Enable target reset." |
161 | "Default is 1 - use hw defaults."); | 154 | "Default is 1 - use hw defaults."); |
162 | 155 | ||
163 | int ql2xgffidenable; | 156 | int ql2xgffidenable; |
164 | module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR); | 157 | module_param(ql2xgffidenable, int, S_IRUGO); |
165 | MODULE_PARM_DESC(ql2xgffidenable, | 158 | MODULE_PARM_DESC(ql2xgffidenable, |
166 | "Enables GFF_ID checks of port type. " | 159 | "Enables GFF_ID checks of port type. " |
167 | "Default is 0 - Do not use GFF_ID information."); | 160 | "Default is 0 - Do not use GFF_ID information."); |
168 | 161 | ||
169 | int ql2xasynctmfenable; | 162 | int ql2xasynctmfenable; |
170 | module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR); | 163 | module_param(ql2xasynctmfenable, int, S_IRUGO); |
171 | MODULE_PARM_DESC(ql2xasynctmfenable, | 164 | MODULE_PARM_DESC(ql2xasynctmfenable, |
172 | "Enables issue of TM IOCBs asynchronously via IOCB mechanism" | 165 | "Enables issue of TM IOCBs asynchronously via IOCB mechanism" |
173 | "Default is 0 - Issue TM IOCBs via mailbox mechanism."); | 166 | "Default is 0 - Issue TM IOCBs via mailbox mechanism."); |
@@ -2371,7 +2364,7 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
2371 | list_for_each_entry(vha, &ha->vp_list, list) { | 2364 | list_for_each_entry(vha, &ha->vp_list, list) { |
2372 | atomic_inc(&vha->vref_count); | 2365 | atomic_inc(&vha->vref_count); |
2373 | 2366 | ||
2374 | if (vha && vha->fc_vport) { | 2367 | if (vha->fc_vport) { |
2375 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 2368 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
2376 | 2369 | ||
2377 | fc_vport_terminate(vha->fc_vport); | 2370 | fc_vport_terminate(vha->fc_vport); |
@@ -3386,6 +3379,21 @@ qla2x00_do_dpc(void *data) | |||
3386 | clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); | 3379 | clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); |
3387 | } | 3380 | } |
3388 | 3381 | ||
3382 | if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { | ||
3383 | DEBUG(printk(KERN_INFO "scsi(%ld): dpc: sched " | ||
3384 | "qla2x00_quiesce_needed ha = %p\n", | ||
3385 | base_vha->host_no, ha)); | ||
3386 | qla82xx_device_state_handler(base_vha); | ||
3387 | clear_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags); | ||
3388 | if (!ha->flags.quiesce_owner) { | ||
3389 | qla2x00_perform_loop_resync(base_vha); | ||
3390 | |||
3391 | qla82xx_idc_lock(ha); | ||
3392 | qla82xx_clear_qsnt_ready(base_vha); | ||
3393 | qla82xx_idc_unlock(ha); | ||
3394 | } | ||
3395 | } | ||
3396 | |||
3389 | if (test_and_clear_bit(RESET_MARKER_NEEDED, | 3397 | if (test_and_clear_bit(RESET_MARKER_NEEDED, |
3390 | &base_vha->dpc_flags) && | 3398 | &base_vha->dpc_flags) && |
3391 | (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { | 3399 | (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) { |
@@ -3589,13 +3597,16 @@ qla2x00_timer(scsi_qla_host_t *vha) | |||
3589 | return; | 3597 | return; |
3590 | } | 3598 | } |
3591 | 3599 | ||
3592 | if (IS_QLA82XX(ha)) | ||
3593 | qla82xx_watchdog(vha); | ||
3594 | |||
3595 | /* Hardware read to raise pending EEH errors during mailbox waits. */ | 3600 | /* Hardware read to raise pending EEH errors during mailbox waits. */ |
3596 | if (!pci_channel_offline(ha->pdev)) | 3601 | if (!pci_channel_offline(ha->pdev)) |
3597 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); | 3602 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); |
3598 | 3603 | ||
3604 | if (IS_QLA82XX(ha)) { | ||
3605 | if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) | ||
3606 | start_dpc++; | ||
3607 | qla82xx_watchdog(vha); | ||
3608 | } | ||
3609 | |||
3599 | /* Loop down handler. */ | 3610 | /* Loop down handler. */ |
3600 | if (atomic_read(&vha->loop_down_timer) > 0 && | 3611 | if (atomic_read(&vha->loop_down_timer) > 0 && |
3601 | !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) | 3612 | !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) |
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 76de9574b385..22070621206c 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c | |||
@@ -669,6 +669,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
669 | def = 1; | 669 | def = 1; |
670 | else if (IS_QLA81XX(ha)) | 670 | else if (IS_QLA81XX(ha)) |
671 | def = 2; | 671 | def = 2; |
672 | |||
673 | /* Assign FCP prio region since older adapters may not have FLT, or | ||
674 | FCP prio region in it's FLT. | ||
675 | */ | ||
676 | ha->flt_region_fcp_prio = ha->flags.port0 ? | ||
677 | fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; | ||
678 | |||
672 | ha->flt_region_flt = flt_addr; | 679 | ha->flt_region_flt = flt_addr; |
673 | wptr = (uint16_t *)req->ring; | 680 | wptr = (uint16_t *)req->ring; |
674 | flt = (struct qla_flt_header *)req->ring; | 681 | flt = (struct qla_flt_header *)req->ring; |
@@ -696,10 +703,6 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) | |||
696 | goto no_flash_data; | 703 | goto no_flash_data; |
697 | } | 704 | } |
698 | 705 | ||
699 | /* Assign FCP prio region since older FLT's may not have it */ | ||
700 | ha->flt_region_fcp_prio = ha->flags.port0 ? | ||
701 | fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; | ||
702 | |||
703 | loc = locations[1]; | 706 | loc = locations[1]; |
704 | cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); | 707 | cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); |
705 | for ( ; cnt; cnt--, region++) { | 708 | for ( ; cnt; cnt--, region++) { |
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c index edcf048215dd..af62c3cf8752 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.c +++ b/drivers/scsi/qla4xxx/ql4_dbg.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h index d861c3b411c8..abd83602cdda 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.h +++ b/drivers/scsi/qla4xxx/ql4_dbg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 0f3bfc3da5cf..2fc0045b1a52 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -175,7 +175,7 @@ | |||
175 | struct srb { | 175 | struct srb { |
176 | struct list_head list; /* (8) */ | 176 | struct list_head list; /* (8) */ |
177 | struct scsi_qla_host *ha; /* HA the SP is queued on */ | 177 | struct scsi_qla_host *ha; /* HA the SP is queued on */ |
178 | struct ddb_entry *ddb; | 178 | struct ddb_entry *ddb; |
179 | uint16_t flags; /* (1) Status flags. */ | 179 | uint16_t flags; /* (1) Status flags. */ |
180 | 180 | ||
181 | #define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */ | 181 | #define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */ |
@@ -191,7 +191,6 @@ struct srb { | |||
191 | struct scsi_cmnd *cmd; /* (4) SCSI command block */ | 191 | struct scsi_cmnd *cmd; /* (4) SCSI command block */ |
192 | dma_addr_t dma_handle; /* (4) for unmap of single transfers */ | 192 | dma_addr_t dma_handle; /* (4) for unmap of single transfers */ |
193 | struct kref srb_ref; /* reference count for this srb */ | 193 | struct kref srb_ref; /* reference count for this srb */ |
194 | uint32_t fw_ddb_index; | ||
195 | uint8_t err_id; /* error id */ | 194 | uint8_t err_id; /* error id */ |
196 | #define SRB_ERR_PORT 1 /* Request failed because "port down" */ | 195 | #define SRB_ERR_PORT 1 /* Request failed because "port down" */ |
197 | #define SRB_ERR_LOOP 2 /* Request failed because "loop down" */ | 196 | #define SRB_ERR_LOOP 2 /* Request failed because "loop down" */ |
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 5e757d7fff7d..c1985792f034 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 6575a47501e5..8fad99b7eef4 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index dc01fa3da5d1..1629c48c35ef 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h index 9471ac755000..62f90bdec5d5 100644 --- a/drivers/scsi/qla4xxx/ql4_inline.h +++ b/drivers/scsi/qla4xxx/ql4_inline.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 5ae49fd87846..75fcd82a8fca 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 7c33fd5943d5..6ffbe9727dff 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -554,7 +554,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, | |||
554 | /* mbox_sts[2] = Old ACB state | 554 | /* mbox_sts[2] = Old ACB state |
555 | * mbox_sts[3] = new ACB state */ | 555 | * mbox_sts[3] = new ACB state */ |
556 | if ((mbox_sts[3] == ACB_STATE_VALID) && | 556 | if ((mbox_sts[3] == ACB_STATE_VALID) && |
557 | (mbox_sts[2] == ACB_STATE_TENTATIVE)) | 557 | ((mbox_sts[2] == ACB_STATE_TENTATIVE) || |
558 | (mbox_sts[2] == ACB_STATE_ACQUIRING))) | ||
558 | set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); | 559 | set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); |
559 | else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && | 560 | else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && |
560 | (mbox_sts[2] == ACB_STATE_VALID)) | 561 | (mbox_sts[2] == ACB_STATE_VALID)) |
@@ -1077,7 +1078,7 @@ try_msi: | |||
1077 | ret = pci_enable_msi(ha->pdev); | 1078 | ret = pci_enable_msi(ha->pdev); |
1078 | if (!ret) { | 1079 | if (!ret) { |
1079 | ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, | 1080 | ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler, |
1080 | IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha); | 1081 | 0, DRIVER_NAME, ha); |
1081 | if (!ret) { | 1082 | if (!ret) { |
1082 | DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); | 1083 | DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n")); |
1083 | set_bit(AF_MSI_ENABLED, &ha->flags); | 1084 | set_bit(AF_MSI_ENABLED, &ha->flags); |
@@ -1095,7 +1096,7 @@ try_msi: | |||
1095 | try_intx: | 1096 | try_intx: |
1096 | /* Trying INTx */ | 1097 | /* Trying INTx */ |
1097 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, | 1098 | ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, |
1098 | IRQF_DISABLED|IRQF_SHARED, DRIVER_NAME, ha); | 1099 | IRQF_SHARED, DRIVER_NAME, ha); |
1099 | if (!ret) { | 1100 | if (!ret) { |
1100 | DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); | 1101 | DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n")); |
1101 | set_bit(AF_INTx_ENABLED, &ha->flags); | 1102 | set_bit(AF_INTx_ENABLED, &ha->flags); |
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 2d2f9c879bfd..f65626aec7c1 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -81,23 +81,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, | |||
81 | */ | 81 | */ |
82 | spin_lock_irqsave(&ha->hardware_lock, flags); | 82 | spin_lock_irqsave(&ha->hardware_lock, flags); |
83 | 83 | ||
84 | if (is_qla8022(ha)) { | 84 | if (!is_qla8022(ha)) { |
85 | intr_status = readl(&ha->qla4_8xxx_reg->host_int); | ||
86 | if (intr_status & ISRX_82XX_RISC_INT) { | ||
87 | /* Service existing interrupt */ | ||
88 | DEBUG2(printk("scsi%ld: %s: " | ||
89 | "servicing existing interrupt\n", | ||
90 | ha->host_no, __func__)); | ||
91 | intr_status = readl(&ha->qla4_8xxx_reg->host_status); | ||
92 | ha->isp_ops->interrupt_service_routine(ha, intr_status); | ||
93 | clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags); | ||
94 | if (test_bit(AF_INTERRUPTS_ON, &ha->flags) && | ||
95 | test_bit(AF_INTx_ENABLED, &ha->flags)) | ||
96 | qla4_8xxx_wr_32(ha, | ||
97 | ha->nx_legacy_intr.tgt_mask_reg, | ||
98 | 0xfbff); | ||
99 | } | ||
100 | } else { | ||
101 | intr_status = readl(&ha->reg->ctrl_status); | 85 | intr_status = readl(&ha->reg->ctrl_status); |
102 | if (intr_status & CSR_SCSI_PROCESSOR_INTR) { | 86 | if (intr_status & CSR_SCSI_PROCESSOR_INTR) { |
103 | /* Service existing interrupt */ | 87 | /* Service existing interrupt */ |
@@ -934,7 +918,7 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb) | |||
934 | return status; | 918 | return status; |
935 | 919 | ||
936 | mbox_cmd[0] = MBOX_CMD_ABORT_TASK; | 920 | mbox_cmd[0] = MBOX_CMD_ABORT_TASK; |
937 | mbox_cmd[1] = srb->fw_ddb_index; | 921 | mbox_cmd[1] = srb->ddb->fw_ddb_index; |
938 | mbox_cmd[2] = index; | 922 | mbox_cmd[2] = index; |
939 | /* Immediate Command Enable */ | 923 | /* Immediate Command Enable */ |
940 | mbox_cmd[5] = 0x01; | 924 | mbox_cmd[5] = 0x01; |
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c index f0d0fbf88aa2..b4b859b2d47e 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.c +++ b/drivers/scsi/qla4xxx/ql4_nvram.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h index 7a8fc66a760d..b3831bd29479 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.h +++ b/drivers/scsi/qla4xxx/ql4_nvram.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 474b10d71364..3d5ef2df4134 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2009 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -942,12 +942,55 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) | |||
942 | 942 | ||
943 | /* Halt all the indiviual PEGs and other blocks of the ISP */ | 943 | /* Halt all the indiviual PEGs and other blocks of the ISP */ |
944 | qla4_8xxx_rom_lock(ha); | 944 | qla4_8xxx_rom_lock(ha); |
945 | |||
946 | /* mask all niu interrupts */ | ||
947 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); | ||
948 | /* disable xge rx/tx */ | ||
949 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); | ||
950 | /* disable xg1 rx/tx */ | ||
951 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); | ||
952 | |||
953 | /* halt sre */ | ||
954 | val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); | ||
955 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); | ||
956 | |||
957 | /* halt epg */ | ||
958 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); | ||
959 | |||
960 | /* halt timers */ | ||
961 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); | ||
962 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); | ||
963 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); | ||
964 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); | ||
965 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); | ||
966 | |||
967 | /* halt pegs */ | ||
968 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); | ||
969 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); | ||
970 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); | ||
971 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); | ||
972 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); | ||
973 | |||
974 | /* big hammer */ | ||
975 | msleep(1000); | ||
945 | if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) | 976 | if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) |
946 | /* don't reset CAM block on reset */ | 977 | /* don't reset CAM block on reset */ |
947 | qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); | 978 | qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); |
948 | else | 979 | else |
949 | qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); | 980 | qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); |
950 | 981 | ||
982 | /* reset ms */ | ||
983 | val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); | ||
984 | val |= (1 << 1); | ||
985 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); | ||
986 | |||
987 | msleep(20); | ||
988 | /* unreset ms */ | ||
989 | val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4); | ||
990 | val &= ~(1 << 1); | ||
991 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val); | ||
992 | msleep(20); | ||
993 | |||
951 | qla4_8xxx_rom_unlock(ha); | 994 | qla4_8xxx_rom_unlock(ha); |
952 | 995 | ||
953 | /* Read the signature value from the flash. | 996 | /* Read the signature value from the flash. |
@@ -1084,14 +1127,14 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) | |||
1084 | static int | 1127 | static int |
1085 | qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) | 1128 | qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) |
1086 | { | 1129 | { |
1087 | int i; | 1130 | int i, rval = 0; |
1088 | long size = 0; | 1131 | long size = 0; |
1089 | long flashaddr, memaddr; | 1132 | long flashaddr, memaddr; |
1090 | u64 data; | 1133 | u64 data; |
1091 | u32 high, low; | 1134 | u32 high, low; |
1092 | 1135 | ||
1093 | flashaddr = memaddr = ha->hw.flt_region_bootload; | 1136 | flashaddr = memaddr = ha->hw.flt_region_bootload; |
1094 | size = (image_start - flashaddr)/8; | 1137 | size = (image_start - flashaddr) / 8; |
1095 | 1138 | ||
1096 | DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n", | 1139 | DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n", |
1097 | ha->host_no, __func__, flashaddr, image_start)); | 1140 | ha->host_no, __func__, flashaddr, image_start)); |
@@ -1100,14 +1143,18 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) | |||
1100 | if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) || | 1143 | if ((qla4_8xxx_rom_fast_read(ha, flashaddr, (int *)&low)) || |
1101 | (qla4_8xxx_rom_fast_read(ha, flashaddr + 4, | 1144 | (qla4_8xxx_rom_fast_read(ha, flashaddr + 4, |
1102 | (int *)&high))) { | 1145 | (int *)&high))) { |
1103 | return -1; | 1146 | rval = -1; |
1147 | goto exit_load_from_flash; | ||
1104 | } | 1148 | } |
1105 | data = ((u64)high << 32) | low ; | 1149 | data = ((u64)high << 32) | low ; |
1106 | qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8); | 1150 | rval = qla4_8xxx_pci_mem_write_2M(ha, memaddr, &data, 8); |
1151 | if (rval) | ||
1152 | goto exit_load_from_flash; | ||
1153 | |||
1107 | flashaddr += 8; | 1154 | flashaddr += 8; |
1108 | memaddr += 8; | 1155 | memaddr += 8; |
1109 | 1156 | ||
1110 | if (i%0x1000 == 0) | 1157 | if (i % 0x1000 == 0) |
1111 | msleep(1); | 1158 | msleep(1); |
1112 | 1159 | ||
1113 | } | 1160 | } |
@@ -1119,7 +1166,8 @@ qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) | |||
1119 | qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); | 1166 | qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); |
1120 | read_unlock(&ha->hw_lock); | 1167 | read_unlock(&ha->hw_lock); |
1121 | 1168 | ||
1122 | return 0; | 1169 | exit_load_from_flash: |
1170 | return rval; | ||
1123 | } | 1171 | } |
1124 | 1172 | ||
1125 | static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) | 1173 | static int qla4_8xxx_load_fw(struct scsi_qla_host *ha, uint32_t image_start) |
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index ff689bf53007..35376a1c3f1b 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic Fibre Channel HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2008 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla2xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | #ifndef __QLA_NX_H | 7 | #ifndef __QLA_NX_H |
8 | #define __QLA_NX_H | 8 | #define __QLA_NX_H |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 0d48fb4d1044..3fc1d256636f 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
@@ -706,18 +706,22 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) | |||
706 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); | 706 | dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); |
707 | 707 | ||
708 | /* don't poll if reset is going on */ | 708 | /* don't poll if reset is going on */ |
709 | if (!test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags)) { | 709 | if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || |
710 | test_bit(DPC_RESET_HA, &ha->dpc_flags) || | ||
711 | test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags))) { | ||
710 | if (dev_state == QLA82XX_DEV_NEED_RESET && | 712 | if (dev_state == QLA82XX_DEV_NEED_RESET && |
711 | !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { | 713 | !test_bit(DPC_RESET_HA, &ha->dpc_flags)) { |
712 | printk("scsi%ld: %s: HW State: NEED RESET!\n", | 714 | if (!ql4xdontresethba) { |
713 | ha->host_no, __func__); | 715 | ql4_printk(KERN_INFO, ha, "%s: HW State: " |
714 | set_bit(DPC_RESET_HA, &ha->dpc_flags); | 716 | "NEED RESET!\n", __func__); |
715 | qla4xxx_wake_dpc(ha); | 717 | set_bit(DPC_RESET_HA, &ha->dpc_flags); |
716 | qla4xxx_mailbox_premature_completion(ha); | 718 | qla4xxx_wake_dpc(ha); |
719 | qla4xxx_mailbox_premature_completion(ha); | ||
720 | } | ||
717 | } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && | 721 | } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && |
718 | !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { | 722 | !test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) { |
719 | printk("scsi%ld: %s: HW State: NEED QUIES!\n", | 723 | ql4_printk(KERN_INFO, ha, "%s: HW State: NEED QUIES!\n", |
720 | ha->host_no, __func__); | 724 | __func__); |
721 | set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); | 725 | set_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags); |
722 | qla4xxx_wake_dpc(ha); | 726 | qla4xxx_wake_dpc(ha); |
723 | } else { | 727 | } else { |
@@ -1721,6 +1725,14 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1721 | if (!test_bit(AF_ONLINE, &ha->flags)) { | 1725 | if (!test_bit(AF_ONLINE, &ha->flags)) { |
1722 | ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); | 1726 | ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n"); |
1723 | 1727 | ||
1728 | if (is_qla8022(ha) && ql4xdontresethba) { | ||
1729 | /* Put the device in failed state. */ | ||
1730 | DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n")); | ||
1731 | qla4_8xxx_idc_lock(ha); | ||
1732 | qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, | ||
1733 | QLA82XX_DEV_FAILED); | ||
1734 | qla4_8xxx_idc_unlock(ha); | ||
1735 | } | ||
1724 | ret = -ENODEV; | 1736 | ret = -ENODEV; |
1725 | goto probe_failed; | 1737 | goto probe_failed; |
1726 | } | 1738 | } |
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 9bfacf4ed137..8475b308e01b 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * QLogic iSCSI HBA Driver | 2 | * QLogic iSCSI HBA Driver |
3 | * Copyright (c) 2003-2006 QLogic Corporation | 3 | * Copyright (c) 2003-2010 QLogic Corporation |
4 | * | 4 | * |
5 | * See LICENSE.qla4xxx for copyright and licensing details. | 5 | * See LICENSE.qla4xxx for copyright and licensing details. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k4" | 8 | #define QLA4XXX_DRIVER_VERSION "5.02.00-k5" |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2f1f9b079b10..7b310934efed 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -1805,6 +1805,7 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, | |||
1805 | devip->sense_buff[5] = (ret >> 8) & 0xff; | 1805 | devip->sense_buff[5] = (ret >> 8) & 0xff; |
1806 | devip->sense_buff[6] = ret & 0xff; | 1806 | devip->sense_buff[6] = ret & 0xff; |
1807 | } | 1807 | } |
1808 | scsi_set_resid(SCpnt, scsi_bufflen(SCpnt)); | ||
1808 | return check_condition_result; | 1809 | return check_condition_result; |
1809 | } | 1810 | } |
1810 | 1811 | ||
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 30ac116186f5..45c75649b9e0 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -1124,51 +1124,40 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
1124 | struct list_head *work_q, | 1124 | struct list_head *work_q, |
1125 | struct list_head *done_q) | 1125 | struct list_head *done_q) |
1126 | { | 1126 | { |
1127 | struct scsi_cmnd *scmd, *tgtr_scmd, *next; | 1127 | LIST_HEAD(tmp_list); |
1128 | unsigned int id = 0; | ||
1129 | int rtn; | ||
1130 | 1128 | ||
1131 | do { | 1129 | list_splice_init(work_q, &tmp_list); |
1132 | tgtr_scmd = NULL; | 1130 | |
1133 | list_for_each_entry(scmd, work_q, eh_entry) { | 1131 | while (!list_empty(&tmp_list)) { |
1134 | if (id == scmd_id(scmd)) { | 1132 | struct scsi_cmnd *next, *scmd; |
1135 | tgtr_scmd = scmd; | 1133 | int rtn; |
1136 | break; | 1134 | unsigned int id; |
1137 | } | 1135 | |
1138 | } | 1136 | scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); |
1139 | if (!tgtr_scmd) { | 1137 | id = scmd_id(scmd); |
1140 | /* not one exactly equal; find the next highest */ | ||
1141 | list_for_each_entry(scmd, work_q, eh_entry) { | ||
1142 | if (scmd_id(scmd) > id && | ||
1143 | (!tgtr_scmd || | ||
1144 | scmd_id(tgtr_scmd) > scmd_id(scmd))) | ||
1145 | tgtr_scmd = scmd; | ||
1146 | } | ||
1147 | } | ||
1148 | if (!tgtr_scmd) | ||
1149 | /* no more commands, that's it */ | ||
1150 | break; | ||
1151 | 1138 | ||
1152 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " | 1139 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset " |
1153 | "to target %d\n", | 1140 | "to target %d\n", |
1154 | current->comm, id)); | 1141 | current->comm, id)); |
1155 | rtn = scsi_try_target_reset(tgtr_scmd); | 1142 | rtn = scsi_try_target_reset(scmd); |
1156 | if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { | 1143 | if (rtn != SUCCESS && rtn != FAST_IO_FAIL) |
1157 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { | ||
1158 | if (id == scmd_id(scmd)) | ||
1159 | if (!scsi_device_online(scmd->device) || | ||
1160 | rtn == FAST_IO_FAIL || | ||
1161 | !scsi_eh_tur(tgtr_scmd)) | ||
1162 | scsi_eh_finish_cmd(scmd, | ||
1163 | done_q); | ||
1164 | } | ||
1165 | } else | ||
1166 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset" | 1144 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset" |
1167 | " failed target: " | 1145 | " failed target: " |
1168 | "%d\n", | 1146 | "%d\n", |
1169 | current->comm, id)); | 1147 | current->comm, id)); |
1170 | id++; | 1148 | list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) { |
1171 | } while(id != 0); | 1149 | if (scmd_id(scmd) != id) |
1150 | continue; | ||
1151 | |||
1152 | if ((rtn == SUCCESS || rtn == FAST_IO_FAIL) | ||
1153 | && (!scsi_device_online(scmd->device) || | ||
1154 | rtn == FAST_IO_FAIL || !scsi_eh_tur(scmd))) | ||
1155 | scsi_eh_finish_cmd(scmd, done_q); | ||
1156 | else | ||
1157 | /* push back on work queue for further processing */ | ||
1158 | list_move(&scmd->eh_entry, work_q); | ||
1159 | } | ||
1160 | } | ||
1172 | 1161 | ||
1173 | return list_empty(work_q); | 1162 | return list_empty(work_q); |
1174 | } | 1163 | } |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 4a3842212c50..501f67bef719 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1278,11 +1278,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost, | |||
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | if (scsi_target_is_busy(starget)) { | 1280 | if (scsi_target_is_busy(starget)) { |
1281 | if (list_empty(&sdev->starved_entry)) { | 1281 | if (list_empty(&sdev->starved_entry)) |
1282 | list_add_tail(&sdev->starved_entry, | 1282 | list_add_tail(&sdev->starved_entry, |
1283 | &shost->starved_list); | 1283 | &shost->starved_list); |
1284 | return 0; | 1284 | return 0; |
1285 | } | ||
1286 | } | 1285 | } |
1287 | 1286 | ||
1288 | /* We're OK to process the command, so we can't be starved */ | 1287 | /* We're OK to process the command, so we can't be starved */ |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 76ee2e784f75..4c68d36f9ac2 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -993,16 +993,14 @@ static int __remove_child (struct device * dev, void * data) | |||
993 | */ | 993 | */ |
994 | void scsi_remove_target(struct device *dev) | 994 | void scsi_remove_target(struct device *dev) |
995 | { | 995 | { |
996 | struct device *rdev; | ||
997 | |||
998 | if (scsi_is_target_device(dev)) { | 996 | if (scsi_is_target_device(dev)) { |
999 | __scsi_remove_target(to_scsi_target(dev)); | 997 | __scsi_remove_target(to_scsi_target(dev)); |
1000 | return; | 998 | return; |
1001 | } | 999 | } |
1002 | 1000 | ||
1003 | rdev = get_device(dev); | 1001 | get_device(dev); |
1004 | device_for_each_child(dev, NULL, __remove_child); | 1002 | device_for_each_child(dev, NULL, __remove_child); |
1005 | put_device(rdev); | 1003 | put_device(dev); |
1006 | } | 1004 | } |
1007 | EXPORT_SYMBOL(scsi_remove_target); | 1005 | EXPORT_SYMBOL(scsi_remove_target); |
1008 | 1006 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 332387a6bc25..f905ecb5704d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -2200,3 +2200,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, " | |||
2200 | MODULE_DESCRIPTION("iSCSI Transport Interface"); | 2200 | MODULE_DESCRIPTION("iSCSI Transport Interface"); |
2201 | MODULE_LICENSE("GPL"); | 2201 | MODULE_LICENSE("GPL"); |
2202 | MODULE_VERSION(ISCSI_TRANSPORT_VERSION); | 2202 | MODULE_VERSION(ISCSI_TRANSPORT_VERSION); |
2203 | MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_ISCSI); | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 956496182c80..365024b0c407 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -583,7 +583,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) | |||
583 | * quietly refuse to do anything to a changed disc until | 583 | * quietly refuse to do anything to a changed disc until |
584 | * the changed bit has been reset | 584 | * the changed bit has been reset |
585 | */ | 585 | */ |
586 | /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ | 586 | /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */ |
587 | goto out; | 587 | goto out; |
588 | } | 588 | } |
589 | 589 | ||
@@ -1023,7 +1023,6 @@ static int sd_media_changed(struct gendisk *disk) | |||
1023 | */ | 1023 | */ |
1024 | if (!scsi_device_online(sdp)) { | 1024 | if (!scsi_device_online(sdp)) { |
1025 | set_media_not_present(sdkp); | 1025 | set_media_not_present(sdkp); |
1026 | retval = 1; | ||
1027 | goto out; | 1026 | goto out; |
1028 | } | 1027 | } |
1029 | 1028 | ||
@@ -1054,7 +1053,6 @@ static int sd_media_changed(struct gendisk *disk) | |||
1054 | /* 0x3a is medium not present */ | 1053 | /* 0x3a is medium not present */ |
1055 | sshdr->asc == 0x3a)) { | 1054 | sshdr->asc == 0x3a)) { |
1056 | set_media_not_present(sdkp); | 1055 | set_media_not_present(sdkp); |
1057 | retval = 1; | ||
1058 | goto out; | 1056 | goto out; |
1059 | } | 1057 | } |
1060 | 1058 | ||
@@ -1065,12 +1063,27 @@ static int sd_media_changed(struct gendisk *disk) | |||
1065 | */ | 1063 | */ |
1066 | sdkp->media_present = 1; | 1064 | sdkp->media_present = 1; |
1067 | 1065 | ||
1068 | retval = sdp->changed; | ||
1069 | sdp->changed = 0; | ||
1070 | out: | 1066 | out: |
1071 | if (retval != sdkp->previous_state) | 1067 | /* |
1068 | * Report a media change under the following conditions: | ||
1069 | * | ||
1070 | * Medium is present now and wasn't present before. | ||
1071 | * Medium wasn't present before and is present now. | ||
1072 | * Medium was present at all times, but it changed while | ||
1073 | * we weren't looking (sdp->changed is set). | ||
1074 | * | ||
1075 | * If there was no medium before and there is no medium now then | ||
1076 | * don't report a change, even if a medium was inserted and removed | ||
1077 | * while we weren't looking. | ||
1078 | */ | ||
1079 | retval = (sdkp->media_present != sdkp->previous_state || | ||
1080 | (sdkp->media_present && sdp->changed)); | ||
1081 | if (retval) | ||
1072 | sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL); | 1082 | sdev_evt_send_simple(sdp, SDEV_EVT_MEDIA_CHANGE, GFP_KERNEL); |
1073 | sdkp->previous_state = retval; | 1083 | sdkp->previous_state = sdkp->media_present; |
1084 | |||
1085 | /* sdp->changed indicates medium was changed or is not present */ | ||
1086 | sdp->changed = !sdkp->media_present; | ||
1074 | kfree(sshdr); | 1087 | kfree(sshdr); |
1075 | return retval; | 1088 | return retval; |
1076 | } | 1089 | } |
@@ -1175,6 +1188,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) | |||
1175 | u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); | 1188 | u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); |
1176 | u64 bad_lba; | 1189 | u64 bad_lba; |
1177 | int info_valid; | 1190 | int info_valid; |
1191 | /* | ||
1192 | * resid is optional but mostly filled in. When it's unused, | ||
1193 | * its value is zero, so we assume the whole buffer transferred | ||
1194 | */ | ||
1195 | unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); | ||
1196 | unsigned int good_bytes; | ||
1178 | 1197 | ||
1179 | if (scmd->request->cmd_type != REQ_TYPE_FS) | 1198 | if (scmd->request->cmd_type != REQ_TYPE_FS) |
1180 | return 0; | 1199 | return 0; |
@@ -1208,7 +1227,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) | |||
1208 | /* This computation should always be done in terms of | 1227 | /* This computation should always be done in terms of |
1209 | * the resolution of the device's medium. | 1228 | * the resolution of the device's medium. |
1210 | */ | 1229 | */ |
1211 | return (bad_lba - start_lba) * scmd->device->sector_size; | 1230 | good_bytes = (bad_lba - start_lba) * scmd->device->sector_size; |
1231 | return min(good_bytes, transferred); | ||
1212 | } | 1232 | } |
1213 | 1233 | ||
1214 | /** | 1234 | /** |
@@ -1902,10 +1922,14 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1902 | int old_rcd = sdkp->RCD; | 1922 | int old_rcd = sdkp->RCD; |
1903 | int old_dpofua = sdkp->DPOFUA; | 1923 | int old_dpofua = sdkp->DPOFUA; |
1904 | 1924 | ||
1905 | if (sdp->skip_ms_page_8) | 1925 | if (sdp->skip_ms_page_8) { |
1906 | goto defaults; | 1926 | if (sdp->type == TYPE_RBC) |
1907 | 1927 | goto defaults; | |
1908 | if (sdp->type == TYPE_RBC) { | 1928 | else { |
1929 | modepage = 0x3F; | ||
1930 | dbd = 0; | ||
1931 | } | ||
1932 | } else if (sdp->type == TYPE_RBC) { | ||
1909 | modepage = 6; | 1933 | modepage = 6; |
1910 | dbd = 8; | 1934 | dbd = 8; |
1911 | } else { | 1935 | } else { |
@@ -1933,13 +1957,11 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1933 | */ | 1957 | */ |
1934 | if (len < 3) | 1958 | if (len < 3) |
1935 | goto bad_sense; | 1959 | goto bad_sense; |
1936 | if (len > 20) | 1960 | else if (len > SD_BUF_SIZE) { |
1937 | len = 20; | 1961 | sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " |
1938 | 1962 | "data from %d to %d bytes\n", len, SD_BUF_SIZE); | |
1939 | /* Take headers and block descriptors into account */ | 1963 | len = SD_BUF_SIZE; |
1940 | len += data.header_length + data.block_descriptor_length; | 1964 | } |
1941 | if (len > SD_BUF_SIZE) | ||
1942 | goto bad_sense; | ||
1943 | 1965 | ||
1944 | /* Get the data */ | 1966 | /* Get the data */ |
1945 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); | 1967 | res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len, &data, &sshdr); |
@@ -1947,16 +1969,45 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
1947 | if (scsi_status_is_good(res)) { | 1969 | if (scsi_status_is_good(res)) { |
1948 | int offset = data.header_length + data.block_descriptor_length; | 1970 | int offset = data.header_length + data.block_descriptor_length; |
1949 | 1971 | ||
1950 | if (offset >= SD_BUF_SIZE - 2) { | 1972 | while (offset < len) { |
1951 | sd_printk(KERN_ERR, sdkp, "Malformed MODE SENSE response\n"); | 1973 | u8 page_code = buffer[offset] & 0x3F; |
1952 | goto defaults; | 1974 | u8 spf = buffer[offset] & 0x40; |
1975 | |||
1976 | if (page_code == 8 || page_code == 6) { | ||
1977 | /* We're interested only in the first 3 bytes. | ||
1978 | */ | ||
1979 | if (len - offset <= 2) { | ||
1980 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
1981 | "mode parameter data\n"); | ||
1982 | goto defaults; | ||
1983 | } else { | ||
1984 | modepage = page_code; | ||
1985 | goto Page_found; | ||
1986 | } | ||
1987 | } else { | ||
1988 | /* Go to the next page */ | ||
1989 | if (spf && len - offset > 3) | ||
1990 | offset += 4 + (buffer[offset+2] << 8) + | ||
1991 | buffer[offset+3]; | ||
1992 | else if (!spf && len - offset > 1) | ||
1993 | offset += 2 + buffer[offset+1]; | ||
1994 | else { | ||
1995 | sd_printk(KERN_ERR, sdkp, "Incomplete " | ||
1996 | "mode parameter data\n"); | ||
1997 | goto defaults; | ||
1998 | } | ||
1999 | } | ||
1953 | } | 2000 | } |
1954 | 2001 | ||
1955 | if ((buffer[offset] & 0x3f) != modepage) { | 2002 | if (modepage == 0x3F) { |
2003 | sd_printk(KERN_ERR, sdkp, "No Caching mode page " | ||
2004 | "present\n"); | ||
2005 | goto defaults; | ||
2006 | } else if ((buffer[offset] & 0x3f) != modepage) { | ||
1956 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); | 2007 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); |
1957 | goto defaults; | 2008 | goto defaults; |
1958 | } | 2009 | } |
1959 | 2010 | Page_found: | |
1960 | if (modepage == 8) { | 2011 | if (modepage == 8) { |
1961 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); | 2012 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); |
1962 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); | 2013 | sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0); |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 5b7388f1c835..1871b8ae83ae 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -17,7 +17,7 @@ | |||
17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support | 17 | Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support |
18 | */ | 18 | */ |
19 | 19 | ||
20 | static const char *verstr = "20100829"; | 20 | static const char *verstr = "20101219"; |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | 23 | ||
@@ -3729,9 +3729,11 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | |||
3729 | b_size = PAGE_SIZE << order; | 3729 | b_size = PAGE_SIZE << order; |
3730 | } else { | 3730 | } else { |
3731 | for (b_size = PAGE_SIZE, order = 0; | 3731 | for (b_size = PAGE_SIZE, order = 0; |
3732 | order < ST_MAX_ORDER && b_size < new_size; | 3732 | order < ST_MAX_ORDER && |
3733 | max_segs * (PAGE_SIZE << order) < new_size; | ||
3733 | order++, b_size *= 2) | 3734 | order++, b_size *= 2) |
3734 | ; /* empty */ | 3735 | ; /* empty */ |
3736 | STbuffer->reserved_page_order = order; | ||
3735 | } | 3737 | } |
3736 | if (max_segs * (PAGE_SIZE << order) < new_size) { | 3738 | if (max_segs * (PAGE_SIZE << order) < new_size) { |
3737 | if (order == ST_MAX_ORDER) | 3739 | if (order == ST_MAX_ORDER) |
@@ -3758,7 +3760,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm | |||
3758 | segs++; | 3760 | segs++; |
3759 | } | 3761 | } |
3760 | STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); | 3762 | STbuffer->b_data = page_address(STbuffer->reserved_pages[0]); |
3761 | STbuffer->reserved_page_order = order; | ||
3762 | 3763 | ||
3763 | return 1; | 3764 | return 1; |
3764 | } | 3765 | } |