aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c193
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c134
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h65
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c426
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c50
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c252
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c171
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c909
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h205
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
24 files changed, 2226 insertions, 540 deletions
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index ff0fc7c7812f..44def6bb4bb0 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o qla_mr.o qla_nx2.o qla_target.o 3 qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o 6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4a0d7c92181f..07befcf365b8 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -147,6 +147,92 @@ static struct bin_attribute sysfs_fw_dump_attr = {
147}; 147};
148 148
149static ssize_t 149static ssize_t
150qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj,
151 struct bin_attribute *bin_attr,
152 char *buf, loff_t off, size_t count)
153{
154 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
155 struct device, kobj)));
156 struct qla_hw_data *ha = vha->hw;
157
158 if (!ha->fw_dump_template || !ha->fw_dump_template_len)
159 return 0;
160
161 ql_dbg(ql_dbg_user, vha, 0x70e2,
162 "chunk <- off=%llx count=%zx\n", off, count);
163 return memory_read_from_buffer(buf, count, &off,
164 ha->fw_dump_template, ha->fw_dump_template_len);
165}
166
167static ssize_t
168qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj,
169 struct bin_attribute *bin_attr,
170 char *buf, loff_t off, size_t count)
171{
172 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
173 struct device, kobj)));
174 struct qla_hw_data *ha = vha->hw;
175 uint32_t size;
176
177 if (off == 0) {
178 if (ha->fw_dump)
179 vfree(ha->fw_dump);
180 if (ha->fw_dump_template)
181 vfree(ha->fw_dump_template);
182
183 ha->fw_dump = NULL;
184 ha->fw_dump_len = 0;
185 ha->fw_dump_template = NULL;
186 ha->fw_dump_template_len = 0;
187
188 size = qla27xx_fwdt_template_size(buf);
189 ql_dbg(ql_dbg_user, vha, 0x70d1,
190 "-> allocating fwdt (%x bytes)...\n", size);
191 ha->fw_dump_template = vmalloc(size);
192 if (!ha->fw_dump_template) {
193 ql_log(ql_log_warn, vha, 0x70d2,
194 "Failed allocate fwdt (%x bytes).\n", size);
195 return -ENOMEM;
196 }
197 ha->fw_dump_template_len = size;
198 }
199
200 if (off + count > ha->fw_dump_template_len) {
201 count = ha->fw_dump_template_len - off;
202 ql_dbg(ql_dbg_user, vha, 0x70d3,
203 "chunk -> truncating to %zx bytes.\n", count);
204 }
205
206 ql_dbg(ql_dbg_user, vha, 0x70d4,
207 "chunk -> off=%llx count=%zx\n", off, count);
208 memcpy(ha->fw_dump_template + off, buf, count);
209
210 if (off + count == ha->fw_dump_template_len) {
211 size = qla27xx_fwdt_calculate_dump_size(vha);
212 ql_dbg(ql_dbg_user, vha, 0x70d5,
213 "-> allocating fwdump (%x bytes)...\n", size);
214 ha->fw_dump = vmalloc(size);
215 if (!ha->fw_dump) {
216 ql_log(ql_log_warn, vha, 0x70d6,
217 "Failed allocate fwdump (%x bytes).\n", size);
218 return -ENOMEM;
219 }
220 ha->fw_dump_len = size;
221 }
222
223 return count;
224}
225static struct bin_attribute sysfs_fw_dump_template_attr = {
226 .attr = {
227 .name = "fw_dump_template",
228 .mode = S_IRUSR | S_IWUSR,
229 },
230 .size = 0,
231 .read = qla2x00_sysfs_read_fw_dump_template,
232 .write = qla2x00_sysfs_write_fw_dump_template,
233};
234
235static ssize_t
150qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, 236qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
151 struct bin_attribute *bin_attr, 237 struct bin_attribute *bin_attr,
152 char *buf, loff_t off, size_t count) 238 char *buf, loff_t off, size_t count)
@@ -241,12 +327,17 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
241 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 327 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
242 struct device, kobj))); 328 struct device, kobj)));
243 struct qla_hw_data *ha = vha->hw; 329 struct qla_hw_data *ha = vha->hw;
330 ssize_t rval = 0;
244 331
245 if (ha->optrom_state != QLA_SREADING) 332 if (ha->optrom_state != QLA_SREADING)
246 return 0; 333 return 0;
247 334
248 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, 335 mutex_lock(&ha->optrom_mutex);
249 ha->optrom_region_size); 336 rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
337 ha->optrom_region_size);
338 mutex_unlock(&ha->optrom_mutex);
339
340 return rval;
250} 341}
251 342
252static ssize_t 343static ssize_t
@@ -265,7 +356,9 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
265 if (off + count > ha->optrom_region_size) 356 if (off + count > ha->optrom_region_size)
266 count = ha->optrom_region_size - off; 357 count = ha->optrom_region_size - off;
267 358
359 mutex_lock(&ha->optrom_mutex);
268 memcpy(&ha->optrom_buffer[off], buf, count); 360 memcpy(&ha->optrom_buffer[off], buf, count);
361 mutex_unlock(&ha->optrom_mutex);
269 362
270 return count; 363 return count;
271} 364}
@@ -288,10 +381,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
288 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 381 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
289 struct device, kobj))); 382 struct device, kobj)));
290 struct qla_hw_data *ha = vha->hw; 383 struct qla_hw_data *ha = vha->hw;
291
292 uint32_t start = 0; 384 uint32_t start = 0;
293 uint32_t size = ha->optrom_size; 385 uint32_t size = ha->optrom_size;
294 int val, valid; 386 int val, valid;
387 ssize_t rval = count;
295 388
296 if (off) 389 if (off)
297 return -EINVAL; 390 return -EINVAL;
@@ -304,12 +397,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
304 if (start > ha->optrom_size) 397 if (start > ha->optrom_size)
305 return -EINVAL; 398 return -EINVAL;
306 399
400 mutex_lock(&ha->optrom_mutex);
307 switch (val) { 401 switch (val) {
308 case 0: 402 case 0:
309 if (ha->optrom_state != QLA_SREADING && 403 if (ha->optrom_state != QLA_SREADING &&
310 ha->optrom_state != QLA_SWRITING) 404 ha->optrom_state != QLA_SWRITING) {
311 return -EINVAL; 405 rval = -EINVAL;
312 406 goto out;
407 }
313 ha->optrom_state = QLA_SWAITING; 408 ha->optrom_state = QLA_SWAITING;
314 409
315 ql_dbg(ql_dbg_user, vha, 0x7061, 410 ql_dbg(ql_dbg_user, vha, 0x7061,
@@ -320,8 +415,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
320 ha->optrom_buffer = NULL; 415 ha->optrom_buffer = NULL;
321 break; 416 break;
322 case 1: 417 case 1:
323 if (ha->optrom_state != QLA_SWAITING) 418 if (ha->optrom_state != QLA_SWAITING) {
324 return -EINVAL; 419 rval = -EINVAL;
420 goto out;
421 }
325 422
326 ha->optrom_region_start = start; 423 ha->optrom_region_start = start;
327 ha->optrom_region_size = start + size > ha->optrom_size ? 424 ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -335,13 +432,15 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
335 "(%x).\n", ha->optrom_region_size); 432 "(%x).\n", ha->optrom_region_size);
336 433
337 ha->optrom_state = QLA_SWAITING; 434 ha->optrom_state = QLA_SWAITING;
338 return -ENOMEM; 435 rval = -ENOMEM;
436 goto out;
339 } 437 }
340 438
341 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 439 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
342 ql_log(ql_log_warn, vha, 0x7063, 440 ql_log(ql_log_warn, vha, 0x7063,
343 "HBA not online, failing NVRAM update.\n"); 441 "HBA not online, failing NVRAM update.\n");
344 return -EAGAIN; 442 rval = -EAGAIN;
443 goto out;
345 } 444 }
346 445
347 ql_dbg(ql_dbg_user, vha, 0x7064, 446 ql_dbg(ql_dbg_user, vha, 0x7064,
@@ -353,8 +452,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
353 ha->optrom_region_start, ha->optrom_region_size); 452 ha->optrom_region_start, ha->optrom_region_size);
354 break; 453 break;
355 case 2: 454 case 2:
356 if (ha->optrom_state != QLA_SWAITING) 455 if (ha->optrom_state != QLA_SWAITING) {
357 return -EINVAL; 456 rval = -EINVAL;
457 goto out;
458 }
358 459
359 /* 460 /*
360 * We need to be more restrictive on which FLASH regions are 461 * We need to be more restrictive on which FLASH regions are
@@ -388,7 +489,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
388 if (!valid) { 489 if (!valid) {
389 ql_log(ql_log_warn, vha, 0x7065, 490 ql_log(ql_log_warn, vha, 0x7065,
390 "Invalid start region 0x%x/0x%x.\n", start, size); 491 "Invalid start region 0x%x/0x%x.\n", start, size);
391 return -EINVAL; 492 rval = -EINVAL;
493 goto out;
392 } 494 }
393 495
394 ha->optrom_region_start = start; 496 ha->optrom_region_start = start;
@@ -403,7 +505,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
403 "(%x)\n", ha->optrom_region_size); 505 "(%x)\n", ha->optrom_region_size);
404 506
405 ha->optrom_state = QLA_SWAITING; 507 ha->optrom_state = QLA_SWAITING;
406 return -ENOMEM; 508 rval = -ENOMEM;
509 goto out;
407 } 510 }
408 511
409 ql_dbg(ql_dbg_user, vha, 0x7067, 512 ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -413,13 +516,16 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
413 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 516 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
414 break; 517 break;
415 case 3: 518 case 3:
416 if (ha->optrom_state != QLA_SWRITING) 519 if (ha->optrom_state != QLA_SWRITING) {
417 return -EINVAL; 520 rval = -EINVAL;
521 goto out;
522 }
418 523
419 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 524 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
420 ql_log(ql_log_warn, vha, 0x7068, 525 ql_log(ql_log_warn, vha, 0x7068,
421 "HBA not online, failing flash update.\n"); 526 "HBA not online, failing flash update.\n");
422 return -EAGAIN; 527 rval = -EAGAIN;
528 goto out;
423 } 529 }
424 530
425 ql_dbg(ql_dbg_user, vha, 0x7069, 531 ql_dbg(ql_dbg_user, vha, 0x7069,
@@ -430,9 +536,12 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
430 ha->optrom_region_start, ha->optrom_region_size); 536 ha->optrom_region_start, ha->optrom_region_size);
431 break; 537 break;
432 default: 538 default:
433 return -EINVAL; 539 rval = -EINVAL;
434 } 540 }
435 return count; 541
542out:
543 mutex_unlock(&ha->optrom_mutex);
544 return rval;
436} 545}
437 546
438static struct bin_attribute sysfs_optrom_ctl_attr = { 547static struct bin_attribute sysfs_optrom_ctl_attr = {
@@ -822,6 +931,7 @@ static struct sysfs_entry {
822 int is4GBp_only; 931 int is4GBp_only;
823} bin_file_entries[] = { 932} bin_file_entries[] = {
824 { "fw_dump", &sysfs_fw_dump_attr, }, 933 { "fw_dump", &sysfs_fw_dump_attr, },
934 { "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },
825 { "nvram", &sysfs_nvram_attr, }, 935 { "nvram", &sysfs_nvram_attr, },
826 { "optrom", &sysfs_optrom_attr, }, 936 { "optrom", &sysfs_optrom_attr, },
827 { "optrom_ctl", &sysfs_optrom_ctl_attr, }, 937 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
@@ -847,6 +957,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
847 continue; 957 continue;
848 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 958 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
849 continue; 959 continue;
960 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
961 continue;
850 962
851 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 963 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
852 iter->attr); 964 iter->attr);
@@ -1187,7 +1299,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
1187 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1299 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1188 struct qla_hw_data *ha = vha->hw; 1300 struct qla_hw_data *ha = vha->hw;
1189 1301
1190 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1302 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1191 return scnprintf(buf, PAGE_SIZE, "\n"); 1303 return scnprintf(buf, PAGE_SIZE, "\n");
1192 1304
1193 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1305 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1391,6 +1503,37 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
1391 return scnprintf(buf, PAGE_SIZE, "%d\n", size); 1503 return scnprintf(buf, PAGE_SIZE, "%d\n", size);
1392} 1504}
1393 1505
1506static ssize_t
1507qla2x00_allow_cna_fw_dump_show(struct device *dev,
1508 struct device_attribute *attr, char *buf)
1509{
1510 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1511
1512 if (!IS_P3P_TYPE(vha->hw))
1513 return scnprintf(buf, PAGE_SIZE, "\n");
1514 else
1515 return scnprintf(buf, PAGE_SIZE, "%s\n",
1516 vha->hw->allow_cna_fw_dump ? "true" : "false");
1517}
1518
1519static ssize_t
1520qla2x00_allow_cna_fw_dump_store(struct device *dev,
1521 struct device_attribute *attr, const char *buf, size_t count)
1522{
1523 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1524 int val = 0;
1525
1526 if (!IS_P3P_TYPE(vha->hw))
1527 return -EINVAL;
1528
1529 if (sscanf(buf, "%d", &val) != 1)
1530 return -EINVAL;
1531
1532 vha->hw->allow_cna_fw_dump = val != 0;
1533
1534 return strlen(buf);
1535}
1536
1394static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1537static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1395static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1538static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1396static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1539static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1432,6 +1575,9 @@ static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1432static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL); 1575static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);
1433static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL); 1576static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);
1434static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); 1577static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1578static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
1579 qla2x00_allow_cna_fw_dump_show,
1580 qla2x00_allow_cna_fw_dump_store);
1435 1581
1436struct device_attribute *qla2x00_host_attrs[] = { 1582struct device_attribute *qla2x00_host_attrs[] = {
1437 &dev_attr_driver_version, 1583 &dev_attr_driver_version,
@@ -1464,6 +1610,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1464 &dev_attr_diag_requests, 1610 &dev_attr_diag_requests,
1465 &dev_attr_diag_megabytes, 1611 &dev_attr_diag_megabytes,
1466 &dev_attr_fw_dump_size, 1612 &dev_attr_fw_dump_size,
1613 &dev_attr_allow_cna_fw_dump,
1467 NULL, 1614 NULL,
1468}; 1615};
1469 1616
@@ -1509,6 +1656,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
1509 case PORT_SPEED_16GB: 1656 case PORT_SPEED_16GB:
1510 speed = FC_PORTSPEED_16GBIT; 1657 speed = FC_PORTSPEED_16GBIT;
1511 break; 1658 break;
1659 case PORT_SPEED_32GB:
1660 speed = FC_PORTSPEED_32GBIT;
1661 break;
1512 } 1662 }
1513 fc_host_speed(shost) = speed; 1663 fc_host_speed(shost) = speed;
1514} 1664}
@@ -2160,6 +2310,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2160 else if (IS_QLAFX00(ha)) 2310 else if (IS_QLAFX00(ha))
2161 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 2311 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2162 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 2312 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2313 else if (IS_QLA27XX(ha))
2314 speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
2315 FC_PORTSPEED_8GBIT;
2163 else 2316 else
2164 speed = FC_PORTSPEED_1GBIT; 2317 speed = FC_PORTSPEED_1GBIT;
2165 fc_host_supported_speeds(vha->host) = speed; 2318 fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f15d03e6b7ee..71ff340f6de4 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1437,9 +1437,12 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1437 if (ha->flags.nic_core_reset_hdlr_active) 1437 if (ha->flags.nic_core_reset_hdlr_active)
1438 return -EBUSY; 1438 return -EBUSY;
1439 1439
1440 mutex_lock(&ha->optrom_mutex);
1440 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1441 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1441 if (rval) 1442 if (rval) {
1443 mutex_unlock(&ha->optrom_mutex);
1442 return rval; 1444 return rval;
1445 }
1443 1446
1444 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1447 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1445 ha->optrom_region_start, ha->optrom_region_size); 1448 ha->optrom_region_start, ha->optrom_region_size);
@@ -1453,6 +1456,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1453 vfree(ha->optrom_buffer); 1456 vfree(ha->optrom_buffer);
1454 ha->optrom_buffer = NULL; 1457 ha->optrom_buffer = NULL;
1455 ha->optrom_state = QLA_SWAITING; 1458 ha->optrom_state = QLA_SWAITING;
1459 mutex_unlock(&ha->optrom_mutex);
1456 bsg_job->job_done(bsg_job); 1460 bsg_job->job_done(bsg_job);
1457 return rval; 1461 return rval;
1458} 1462}
@@ -1465,9 +1469,12 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1465 struct qla_hw_data *ha = vha->hw; 1469 struct qla_hw_data *ha = vha->hw;
1466 int rval = 0; 1470 int rval = 0;
1467 1471
1472 mutex_lock(&ha->optrom_mutex);
1468 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1473 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1469 if (rval) 1474 if (rval) {
1475 mutex_unlock(&ha->optrom_mutex);
1470 return rval; 1476 return rval;
1477 }
1471 1478
1472 /* Set the isp82xx_no_md_cap not to capture minidump */ 1479 /* Set the isp82xx_no_md_cap not to capture minidump */
1473 ha->flags.isp82xx_no_md_cap = 1; 1480 ha->flags.isp82xx_no_md_cap = 1;
@@ -1483,6 +1490,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1483 vfree(ha->optrom_buffer); 1490 vfree(ha->optrom_buffer);
1484 ha->optrom_buffer = NULL; 1491 ha->optrom_buffer = NULL;
1485 ha->optrom_state = QLA_SWAITING; 1492 ha->optrom_state = QLA_SWAITING;
1493 mutex_unlock(&ha->optrom_mutex);
1486 bsg_job->job_done(bsg_job); 1494 bsg_job->job_done(bsg_job);
1487 return rval; 1495 return rval;
1488} 1496}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6103f553bb1..97255f7c3975 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,13 +11,15 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x015b | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x017d | 0x004b,0x0141 |
15 * | | | 0x0x015a | 15 * | | | 0x0144,0x0146 |
16 * | Mailbox commands | 0x1187 | 0x111a-0x111b | 16 * | | | 0x015b-0x0160 |
17 * | | | 0x1155-0x1158 | 17 * | | | 0x016e-0x0170 |
18 * | | | 0x1018-0x1019 | 18 * | Mailbox commands | 0x1187 | 0x1018-0x1019 |
19 * | | | 0x10ca |
19 * | | | 0x1115-0x1116 | 20 * | | | 0x1115-0x1116 |
20 * | | | 0x10ca | 21 * | | | 0x111a-0x111b |
22 * | | | 0x1155-0x1158 |
21 * | Device Discovery | 0x2095 | 0x2020-0x2022, | 23 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
22 * | | | 0x2011-0x2012, | 24 * | | | 0x2011-0x2012, |
23 * | | | 0x2016 | 25 * | | | 0x2016 |
@@ -32,18 +34,17 @@
32 * | | | 0x5047,0x5052 | 34 * | | | 0x5047,0x5052 |
33 * | | | 0x5084,0x5075 | 35 * | | | 0x5084,0x5075 |
34 * | | | 0x503d,0x5044 | 36 * | | | 0x503d,0x5044 |
37 * | | | 0x507b |
35 * | Timer Routines | 0x6012 | | 38 * | Timer Routines | 0x6012 | |
36 * | User Space Interactions | 0x70e1 | 0x7018,0x702e, | 39 * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
37 * | | | 0x7020,0x7024, | 40 * | | | 0x7020,0x7024 |
38 * | | | 0x7039,0x7045, | 41 * | | | 0x7039,0x7045 |
39 * | | | 0x7073-0x7075, | 42 * | | | 0x7073-0x7075 |
40 * | | | 0x707b,0x708c, | 43 * | | | 0x70a5-0x70a6 |
41 * | | | 0x70a5,0x70a6, | 44 * | | | 0x70a8,0x70ab |
42 * | | | 0x70a8,0x70ab, | 45 * | | | 0x70ad-0x70ae |
43 * | | | 0x70ad-0x70ae, | 46 * | | | 0x70d7-0x70db |
44 * | | | 0x70d1-0x70db, | 47 * | | | 0x70de-0x70df |
45 * | | | 0x7047,0x703b |
46 * | | | 0x70de-0x70df, |
47 * | Task Management | 0x803d | 0x8025-0x8026 | 48 * | Task Management | 0x803d | 0x8025-0x8026 |
48 * | | | 0x800b,0x8039 | 49 * | | | 0x800b,0x8039 |
49 * | AER/EEH | 0x9011 | | 50 * | AER/EEH | 0x9011 | |
@@ -59,7 +60,11 @@
59 * | | | 0xb13c-0xb140 | 60 * | | | 0xb13c-0xb140 |
60 * | | | 0xb149 | 61 * | | | 0xb149 |
61 * | MultiQ | 0xc00c | | 62 * | MultiQ | 0xc00c | |
62 * | Misc | 0xd010 | | 63 * | Misc | 0xd2ff | 0xd017-0xd019 |
64 * | | | 0xd020 |
65 * | | | 0xd02e-0xd0ff |
66 * | | | 0xd101-0xd1fe |
67 * | | | 0xd212-0xd2fe |
63 * | Target Mode | 0xe070 | 0xe021 | 68 * | Target Mode | 0xe070 | 0xe021 |
64 * | Target Mode Management | 0xf072 | 0xf002-0xf003 | 69 * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
65 * | | | 0xf046-0xf049 | 70 * | | | 0xf046-0xf049 |
@@ -104,7 +109,87 @@ qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
104 return ptr + (rsp->length * sizeof(response_t)); 109 return ptr + (rsp->length * sizeof(response_t));
105} 110}
106 111
107static int 112int
113qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
114 uint32_t ram_dwords, void **nxt)
115{
116 int rval;
117 uint32_t cnt, stat, timer, dwords, idx;
118 uint16_t mb0, mb1;
119 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
120 dma_addr_t dump_dma = ha->gid_list_dma;
121 uint32_t *dump = (uint32_t *)ha->gid_list;
122
123 rval = QLA_SUCCESS;
124 mb0 = 0;
125
126 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
127 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
128
129 dwords = qla2x00_gid_list_size(ha) / 4;
130 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
131 cnt += dwords, addr += dwords) {
132 if (cnt + dwords > ram_dwords)
133 dwords = ram_dwords - cnt;
134
135 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
136 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
137
138 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
139 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
140 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
141 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
142
143 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
144 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
145
146 WRT_REG_WORD(&reg->mailbox9, 0);
147 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
148
149 ha->flags.mbox_int = 0;
150 for (timer = 6000000; timer; timer--) {
151 /* Check for pending interrupts. */
152 stat = RD_REG_DWORD(&reg->host_status);
153 if (stat & HSRX_RISC_INT) {
154 stat &= 0xff;
155
156 if (stat == 0x1 || stat == 0x2 ||
157 stat == 0x10 || stat == 0x11) {
158 set_bit(MBX_INTERRUPT,
159 &ha->mbx_cmd_flags);
160
161 mb0 = RD_REG_WORD(&reg->mailbox0);
162 mb1 = RD_REG_WORD(&reg->mailbox1);
163
164 WRT_REG_DWORD(&reg->hccr,
165 HCCRX_CLR_RISC_INT);
166 RD_REG_DWORD(&reg->hccr);
167 break;
168 }
169
170 /* Clear this intr; it wasn't a mailbox intr */
171 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
172 RD_REG_DWORD(&reg->hccr);
173 }
174 udelay(5);
175 }
176 ha->flags.mbox_int = 1;
177
178 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
179 rval = mb0 & MBS_MASK;
180 for (idx = 0; idx < dwords; idx++)
181 ram[cnt + idx] = IS_QLA27XX(ha) ?
182 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
183 } else {
184 rval = QLA_FUNCTION_FAILED;
185 }
186 }
187
188 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
189 return rval;
190}
191
192int
108qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, 193qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
109 uint32_t ram_dwords, void **nxt) 194 uint32_t ram_dwords, void **nxt)
110{ 195{
@@ -139,6 +224,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
139 WRT_REG_WORD(&reg->mailbox5, LSW(dwords)); 224 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
140 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT); 225 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
141 226
227 ha->flags.mbox_int = 0;
142 for (timer = 6000000; timer; timer--) { 228 for (timer = 6000000; timer; timer--) {
143 /* Check for pending interrupts. */ 229 /* Check for pending interrupts. */
144 stat = RD_REG_DWORD(&reg->host_status); 230 stat = RD_REG_DWORD(&reg->host_status);
@@ -164,11 +250,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
164 } 250 }
165 udelay(5); 251 udelay(5);
166 } 252 }
253 ha->flags.mbox_int = 1;
167 254
168 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { 255 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
169 rval = mb0 & MBS_MASK; 256 rval = mb0 & MBS_MASK;
170 for (idx = 0; idx < dwords; idx++) 257 for (idx = 0; idx < dwords; idx++)
171 ram[cnt + idx] = swab32(dump[idx]); 258 ram[cnt + idx] = IS_QLA27XX(ha) ?
259 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
172 } else { 260 } else {
173 rval = QLA_FUNCTION_FAILED; 261 rval = QLA_FUNCTION_FAILED;
174 } 262 }
@@ -208,7 +296,7 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
208 return buf; 296 return buf;
209} 297}
210 298
211static inline int 299int
212qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) 300qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
213{ 301{
214 int rval = QLA_SUCCESS; 302 int rval = QLA_SUCCESS;
@@ -227,7 +315,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
227 return rval; 315 return rval;
228} 316}
229 317
230static int 318int
231qla24xx_soft_reset(struct qla_hw_data *ha) 319qla24xx_soft_reset(struct qla_hw_data *ha)
232{ 320{
233 int rval = QLA_SUCCESS; 321 int rval = QLA_SUCCESS;
@@ -537,7 +625,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
537 struct qla2xxx_mq_chain *mq = ptr; 625 struct qla2xxx_mq_chain *mq = ptr;
538 device_reg_t __iomem *reg; 626 device_reg_t __iomem *reg;
539 627
540 if (!ha->mqenable || IS_QLA83XX(ha)) 628 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
541 return ptr; 629 return ptr;
542 630
543 mq = ptr; 631 mq = ptr;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 35e20b4f8b6c..cc961040f8b1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,3 +348,10 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
348#define ql_dbg_tgt 0x00004000 /* Target mode */ 348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ 349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ 350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
351
352extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
353 uint32_t, void **);
354extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
355 uint32_t, void **);
356extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
357extern int qla24xx_soft_reset(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 266724b6b899..6a106136716c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -654,7 +654,7 @@ typedef union {
654 struct device_reg_25xxmq isp25mq; 654 struct device_reg_25xxmq isp25mq;
655 struct device_reg_82xx isp82; 655 struct device_reg_82xx isp82;
656 struct device_reg_fx00 ispfx00; 656 struct device_reg_fx00 ispfx00;
657} device_reg_t; 657} __iomem device_reg_t;
658 658
659#define ISP_REQ_Q_IN(ha, reg) \ 659#define ISP_REQ_Q_IN(ha, reg) \
660 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ 660 (IS_QLA2100(ha) || IS_QLA2200(ha) ? \
@@ -808,7 +808,7 @@ struct mbx_cmd_32 {
808 Notification */ 808 Notification */
809#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */ 809#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
810#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */ 810#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
811 811#define MBA_FW_INIT_INPROGRESS 0x8500 /* Firmware boot in progress */
812/* 83XX FCoE specific */ 812/* 83XX FCoE specific */
813#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */ 813#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
814 814
@@ -938,6 +938,7 @@ struct mbx_cmd_32 {
938 */ 938 */
939#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */ 939#define MBC_WRITE_SERDES 0x3 /* Write serdes word. */
940#define MBC_READ_SERDES 0x4 /* Read serdes word. */ 940#define MBC_READ_SERDES 0x4 /* Read serdes word. */
941#define MBC_LOAD_DUMP_MPI_RAM 0x5 /* Load/Dump MPI RAM. */
941#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */ 942#define MBC_SERDES_PARAMS 0x10 /* Serdes Tx Parameters. */
942#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */ 943#define MBC_GET_IOCB_STATUS 0x12 /* Get IOCB status command. */
943#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */ 944#define MBC_PORT_PARAMS 0x1A /* Port iDMA Parameters. */
@@ -1197,30 +1198,6 @@ typedef struct {
1197 uint8_t reserved_3[26]; 1198 uint8_t reserved_3[26];
1198} init_cb_t; 1199} init_cb_t;
1199 1200
1200
1201struct init_cb_fx {
1202 uint16_t version;
1203 uint16_t reserved_1[13];
1204 __le16 request_q_outpointer;
1205 __le16 response_q_inpointer;
1206 uint16_t reserved_2[2];
1207 __le16 response_q_length;
1208 __le16 request_q_length;
1209 uint16_t reserved_3[2];
1210 __le32 request_q_address[2];
1211 __le32 response_q_address[2];
1212 uint16_t reserved_4[4];
1213 uint8_t response_q_msivec;
1214 uint8_t reserved_5[19];
1215 uint16_t interrupt_delay_timer;
1216 uint16_t reserved_6;
1217 uint32_t fwoptions1;
1218 uint32_t fwoptions2;
1219 uint32_t fwoptions3;
1220 uint8_t reserved_7[24];
1221};
1222
1223
1224/* 1201/*
1225 * Get Link Status mailbox command return buffer. 1202 * Get Link Status mailbox command return buffer.
1226 */ 1203 */
@@ -2172,6 +2149,7 @@ struct ct_fdmi_hba_attributes {
2172#define FDMI_PORT_SPEED_4GB 0x8 2149#define FDMI_PORT_SPEED_4GB 0x8
2173#define FDMI_PORT_SPEED_8GB 0x10 2150#define FDMI_PORT_SPEED_8GB 0x10
2174#define FDMI_PORT_SPEED_16GB 0x20 2151#define FDMI_PORT_SPEED_16GB 0x20
2152#define FDMI_PORT_SPEED_32GB 0x40
2175#define FDMI_PORT_SPEED_UNKNOWN 0x8000 2153#define FDMI_PORT_SPEED_UNKNOWN 0x8000
2176 2154
2177struct ct_fdmi_port_attr { 2155struct ct_fdmi_port_attr {
@@ -2680,7 +2658,7 @@ struct bidi_statistics {
2680#define QLA_MQ_SIZE 32 2658#define QLA_MQ_SIZE 32
2681#define QLA_MAX_QUEUES 256 2659#define QLA_MAX_QUEUES 256
2682#define ISP_QUE_REG(ha, id) \ 2660#define ISP_QUE_REG(ha, id) \
2683 ((ha->mqenable || IS_QLA83XX(ha)) ? \ 2661 ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
2684 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\ 2662 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
2685 ((void __iomem *)ha->iobase)) 2663 ((void __iomem *)ha->iobase))
2686#define QLA_REQ_QUE_ID(tag) \ 2664#define QLA_REQ_QUE_ID(tag) \
@@ -2818,7 +2796,6 @@ struct qla_hw_data {
2818 uint32_t fac_supported :1; 2796 uint32_t fac_supported :1;
2819 2797
2820 uint32_t chip_reset_done :1; 2798 uint32_t chip_reset_done :1;
2821 uint32_t port0 :1;
2822 uint32_t running_gold_fw :1; 2799 uint32_t running_gold_fw :1;
2823 uint32_t eeh_busy :1; 2800 uint32_t eeh_busy :1;
2824 uint32_t cpu_affinity_enabled :1; 2801 uint32_t cpu_affinity_enabled :1;
@@ -2849,7 +2826,7 @@ struct qla_hw_data {
2849 spinlock_t hardware_lock ____cacheline_aligned; 2826 spinlock_t hardware_lock ____cacheline_aligned;
2850 int bars; 2827 int bars;
2851 int mem_only; 2828 int mem_only;
2852 device_reg_t __iomem *iobase; /* Base I/O address */ 2829 device_reg_t *iobase; /* Base I/O address */
2853 resource_size_t pio_address; 2830 resource_size_t pio_address;
2854 2831
2855#define MIN_IOBASE_LEN 0x100 2832#define MIN_IOBASE_LEN 0x100
@@ -2868,8 +2845,8 @@ struct qla_hw_data {
2868 uint32_t rsp_que_off; 2845 uint32_t rsp_que_off;
2869 2846
2870 /* Multi queue data structs */ 2847 /* Multi queue data structs */
2871 device_reg_t __iomem *mqiobase; 2848 device_reg_t *mqiobase;
2872 device_reg_t __iomem *msixbase; 2849 device_reg_t *msixbase;
2873 uint16_t msix_count; 2850 uint16_t msix_count;
2874 uint8_t mqenable; 2851 uint8_t mqenable;
2875 struct req_que **req_q_map; 2852 struct req_que **req_q_map;
@@ -2905,6 +2882,7 @@ struct qla_hw_data {
2905#define PORT_SPEED_4GB 0x03 2882#define PORT_SPEED_4GB 0x03
2906#define PORT_SPEED_8GB 0x04 2883#define PORT_SPEED_8GB 0x04
2907#define PORT_SPEED_16GB 0x05 2884#define PORT_SPEED_16GB 0x05
2885#define PORT_SPEED_32GB 0x06
2908#define PORT_SPEED_10GB 0x13 2886#define PORT_SPEED_10GB 0x13
2909 uint16_t link_data_rate; /* F/W operating speed */ 2887 uint16_t link_data_rate; /* F/W operating speed */
2910 2888
@@ -2928,6 +2906,7 @@ struct qla_hw_data {
2928#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 2906#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
2929#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 2907#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
2930#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 2908#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
2909#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
2931 uint32_t device_type; 2910 uint32_t device_type;
2932#define DT_ISP2100 BIT_0 2911#define DT_ISP2100 BIT_0
2933#define DT_ISP2200 BIT_1 2912#define DT_ISP2200 BIT_1
@@ -2948,7 +2927,8 @@ struct qla_hw_data {
2948#define DT_ISP8031 BIT_16 2927#define DT_ISP8031 BIT_16
2949#define DT_ISPFX00 BIT_17 2928#define DT_ISPFX00 BIT_17
2950#define DT_ISP8044 BIT_18 2929#define DT_ISP8044 BIT_18
2951#define DT_ISP_LAST (DT_ISP8044 << 1) 2930#define DT_ISP2071 BIT_19
2931#define DT_ISP_LAST (DT_ISP2071 << 1)
2952 2932
2953#define DT_T10_PI BIT_25 2933#define DT_T10_PI BIT_25
2954#define DT_IIDMA BIT_26 2934#define DT_IIDMA BIT_26
@@ -2978,6 +2958,7 @@ struct qla_hw_data {
2978#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031) 2958#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
2979#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) 2959#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
2980#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) 2960#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
2961#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
2981 2962
2982#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2963#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2983 IS_QLA6312(ha) || IS_QLA6322(ha)) 2964 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2986,6 +2967,7 @@ struct qla_hw_data {
2986#define IS_QLA25XX(ha) (IS_QLA2532(ha)) 2967#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2987#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) 2968#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
2988#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 2969#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2970#define IS_QLA27XX(ha) (IS_QLA2071(ha))
2989#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2971#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2990 IS_QLA84XX(ha)) 2972 IS_QLA84XX(ha))
2991#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ 2973#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -2994,11 +2976,13 @@ struct qla_hw_data {
2994#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2976#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2995 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ 2977 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2996 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ 2978 IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
2997 IS_QLA8044(ha)) 2979 IS_QLA8044(ha) || IS_QLA27XX(ha))
2998#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2980#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2999#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) 2981#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
3000#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2982#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
3001#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 2983 IS_QLA27XX(ha))
2984#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
2985 IS_QLA27XX(ha))
3002#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2986#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
3003 2987
3004#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) 2988#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -3008,7 +2992,8 @@ struct qla_hw_data {
3008#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) 2992#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
3009#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2993#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
3010#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) 2994#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
3011#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha)) 2995#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
2996 IS_QLA27XX(ha))
3012#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha))) 2997#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
3013/* Bit 21 of fw_attributes decides the MCTP capabilities */ 2998/* Bit 21 of fw_attributes decides the MCTP capabilities */
3014#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 2999#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
@@ -3133,6 +3118,9 @@ struct qla_hw_data {
3133 uint16_t fw_xcb_count; 3118 uint16_t fw_xcb_count;
3134 uint16_t fw_iocb_count; 3119 uint16_t fw_iocb_count;
3135 3120
3121 uint32_t fw_shared_ram_start;
3122 uint32_t fw_shared_ram_end;
3123
3136 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ 3124 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
3137 uint8_t fw_seriallink_options[4]; 3125 uint8_t fw_seriallink_options[4];
3138 uint16_t fw_seriallink_options24[4]; 3126 uint16_t fw_seriallink_options24[4];
@@ -3141,6 +3129,9 @@ struct qla_hw_data {
3141 uint32_t mpi_capabilities; 3129 uint32_t mpi_capabilities;
3142 uint8_t phy_version[3]; 3130 uint8_t phy_version[3];
3143 3131
3132 /* Firmware dump template */
3133 void *fw_dump_template;
3134 uint32_t fw_dump_template_len;
3144 /* Firmware dump information. */ 3135 /* Firmware dump information. */
3145 struct qla2xxx_fw_dump *fw_dump; 3136 struct qla2xxx_fw_dump *fw_dump;
3146 uint32_t fw_dump_len; 3137 uint32_t fw_dump_len;
@@ -3183,6 +3174,7 @@ struct qla_hw_data {
3183#define QLA_SWRITING 2 3174#define QLA_SWRITING 2
3184 uint32_t optrom_region_start; 3175 uint32_t optrom_region_start;
3185 uint32_t optrom_region_size; 3176 uint32_t optrom_region_size;
3177 struct mutex optrom_mutex;
3186 3178
3187/* PCI expansion ROM image information. */ 3179/* PCI expansion ROM image information. */
3188#define ROM_CODE_TYPE_BIOS 0 3180#define ROM_CODE_TYPE_BIOS 0
@@ -3309,6 +3301,7 @@ struct qla_hw_data {
3309 struct mr_data_fx00 mr; 3301 struct mr_data_fx00 mr;
3310 3302
3311 struct qlt_hw_data tgt; 3303 struct qlt_hw_data tgt;
3304 int allow_cna_fw_dump;
3312}; 3305};
3313 3306
3314/* 3307/*
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 792a29294b62..32ab80957688 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
114{ 114{
115 struct qla_hw_data *ha = vha->hw; 115 struct qla_hw_data *ha = vha->hw;
116 116
117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
118 !IS_QLA27XX(ha))
118 goto out; 119 goto out;
119 if (!ha->fce) 120 if (!ha->fce)
120 goto out; 121 goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 610d3aa905a0..3a7353eaccbd 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1378,6 +1378,10 @@ struct qla_flt_header {
1378#define FLT_REG_NVRAM_0 0x15 1378#define FLT_REG_NVRAM_0 0x15
1379#define FLT_REG_VPD_1 0x16 1379#define FLT_REG_VPD_1 0x16
1380#define FLT_REG_NVRAM_1 0x17 1380#define FLT_REG_NVRAM_1 0x17
1381#define FLT_REG_VPD_2 0xD4
1382#define FLT_REG_NVRAM_2 0xD5
1383#define FLT_REG_VPD_3 0xD6
1384#define FLT_REG_NVRAM_3 0xD7
1381#define FLT_REG_FDT 0x1a 1385#define FLT_REG_FDT 0x1a
1382#define FLT_REG_FLT 0x1c 1386#define FLT_REG_FLT 0x1c
1383#define FLT_REG_HW_EVENT_0 0x1d 1387#define FLT_REG_HW_EVENT_0 0x1d
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 1f426628a0a5..e665e8109933 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -330,6 +330,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
330 dma_addr_t); 330 dma_addr_t);
331 331
332extern int qla24xx_abort_command(srb_t *); 332extern int qla24xx_abort_command(srb_t *);
333extern int qla24xx_async_abort_command(srb_t *);
333extern int 334extern int
334qla24xx_abort_target(struct fc_port *, unsigned int, int); 335qla24xx_abort_target(struct fc_port *, unsigned int, int);
335extern int 336extern int
@@ -511,6 +512,16 @@ extern void qla2300_fw_dump(scsi_qla_host_t *, int);
511extern void qla24xx_fw_dump(scsi_qla_host_t *, int); 512extern void qla24xx_fw_dump(scsi_qla_host_t *, int);
512extern void qla25xx_fw_dump(scsi_qla_host_t *, int); 513extern void qla25xx_fw_dump(scsi_qla_host_t *, int);
513extern void qla81xx_fw_dump(scsi_qla_host_t *, int); 514extern void qla81xx_fw_dump(scsi_qla_host_t *, int);
515extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
516extern void qla8044_fw_dump(scsi_qla_host_t *, int);
517
518extern void qla27xx_fwdump(scsi_qla_host_t *, int);
519extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
520extern int qla27xx_fwdt_template_valid(void *);
521extern ulong qla27xx_fwdt_template_size(void *);
522extern const void *qla27xx_fwdt_template_default(void);
523extern ulong qla27xx_fwdt_template_default_size(void);
524
514extern void qla2x00_dump_regs(scsi_qla_host_t *); 525extern void qla2x00_dump_regs(scsi_qla_host_t *);
515extern void qla2x00_dump_buffer(uint8_t *, uint32_t); 526extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
516extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); 527extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
@@ -594,7 +605,6 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
594extern irqreturn_t qlafx00_intr_handler(int, void *); 605extern irqreturn_t qlafx00_intr_handler(int, void *);
595extern void qlafx00_enable_intrs(struct qla_hw_data *); 606extern void qlafx00_enable_intrs(struct qla_hw_data *);
596extern void qlafx00_disable_intrs(struct qla_hw_data *); 607extern void qlafx00_disable_intrs(struct qla_hw_data *);
597extern int qlafx00_abort_command(srb_t *);
598extern int qlafx00_abort_target(fc_port_t *, unsigned int, int); 608extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
599extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int); 609extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
600extern int qlafx00_start_scsi(srb_t *); 610extern int qlafx00_start_scsi(srb_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index cd47f1b32d9a..e377f9d2f92a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1532,6 +1532,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1532 if (IS_CNA_CAPABLE(ha)) 1532 if (IS_CNA_CAPABLE(ha))
1533 eiter->a.sup_speed = __constant_cpu_to_be32( 1533 eiter->a.sup_speed = __constant_cpu_to_be32(
1534 FDMI_PORT_SPEED_10GB); 1534 FDMI_PORT_SPEED_10GB);
1535 else if (IS_QLA27XX(ha))
1536 eiter->a.sup_speed = __constant_cpu_to_be32(
1537 FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
1538 FDMI_PORT_SPEED_8GB);
1535 else if (IS_QLA25XX(ha)) 1539 else if (IS_QLA25XX(ha))
1536 eiter->a.sup_speed = __constant_cpu_to_be32( 1540 eiter->a.sup_speed = __constant_cpu_to_be32(
1537 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| 1541 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
@@ -1580,6 +1584,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1580 eiter->a.cur_speed = 1584 eiter->a.cur_speed =
1581 __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB); 1585 __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
1582 break; 1586 break;
1587 case PORT_SPEED_32GB:
1588 eiter->a.cur_speed =
1589 __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
1590 break;
1583 default: 1591 default:
1584 eiter->a.cur_speed = 1592 eiter->a.cur_speed =
1585 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); 1593 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1889,6 +1897,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1889 case BIT_10: 1897 case BIT_10:
1890 list[i].fp_speed = PORT_SPEED_16GB; 1898 list[i].fp_speed = PORT_SPEED_16GB;
1891 break; 1899 break;
1900 case BIT_8:
1901 list[i].fp_speed = PORT_SPEED_32GB;
1902 break;
1892 } 1903 }
1893 1904
1894 ql_dbg(ql_dbg_disc, vha, 0x205b, 1905 ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e7e5f4facf7f..38aeb54cd9d8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -271,56 +271,46 @@ done:
271} 271}
272 272
273static void 273static void
274qla2x00_async_tm_cmd_done(void *data, void *ptr, int res) 274qla2x00_tmf_iocb_timeout(void *data)
275{ 275{
276 srb_t *sp = (srb_t *)ptr; 276 srb_t *sp = (srb_t *)data;
277 struct srb_iocb *iocb = &sp->u.iocb_cmd; 277 struct srb_iocb *tmf = &sp->u.iocb_cmd;
278 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
279 uint32_t flags;
280 uint16_t lun;
281 int rval;
282
283 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
284 flags = iocb->u.tmf.flags;
285 lun = (uint16_t)iocb->u.tmf.lun;
286 278
287 /* Issue Marker IOCB */ 279 tmf->u.tmf.comp_status = CS_TIMEOUT;
288 rval = qla2x00_marker(vha, vha->hw->req_q_map[0], 280 complete(&tmf->u.tmf.comp);
289 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 281}
290 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
291 282
292 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { 283static void
293 ql_dbg(ql_dbg_taskm, vha, 0x8030, 284qla2x00_tmf_sp_done(void *data, void *ptr, int res)
294 "TM IOCB failed (%x).\n", rval); 285{
295 } 286 srb_t *sp = (srb_t *)ptr;
296 } 287 struct srb_iocb *tmf = &sp->u.iocb_cmd;
297 sp->free(sp->fcport->vha, sp); 288 complete(&tmf->u.tmf.comp);
298} 289}
299 290
300int 291int
301qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun, 292qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
302 uint32_t tag) 293 uint32_t tag)
303{ 294{
304 struct scsi_qla_host *vha = fcport->vha; 295 struct scsi_qla_host *vha = fcport->vha;
296 struct srb_iocb *tm_iocb;
305 srb_t *sp; 297 srb_t *sp;
306 struct srb_iocb *tcf; 298 int rval = QLA_FUNCTION_FAILED;
307 int rval;
308 299
309 rval = QLA_FUNCTION_FAILED;
310 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 300 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
311 if (!sp) 301 if (!sp)
312 goto done; 302 goto done;
313 303
304 tm_iocb = &sp->u.iocb_cmd;
314 sp->type = SRB_TM_CMD; 305 sp->type = SRB_TM_CMD;
315 sp->name = "tmf"; 306 sp->name = "tmf";
316 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 307 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
317 308 tm_iocb->u.tmf.flags = flags;
318 tcf = &sp->u.iocb_cmd; 309 tm_iocb->u.tmf.lun = lun;
319 tcf->u.tmf.flags = tm_flags; 310 tm_iocb->u.tmf.data = tag;
320 tcf->u.tmf.lun = lun; 311 sp->done = qla2x00_tmf_sp_done;
321 tcf->u.tmf.data = tag; 312 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
322 tcf->timeout = qla2x00_async_iocb_timeout; 313 init_completion(&tm_iocb->u.tmf.comp);
323 sp->done = qla2x00_async_tm_cmd_done;
324 314
325 rval = qla2x00_start_sp(sp); 315 rval = qla2x00_start_sp(sp);
326 if (rval != QLA_SUCCESS) 316 if (rval != QLA_SUCCESS)
@@ -330,14 +320,121 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
330 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", 320 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
331 sp->handle, fcport->loop_id, fcport->d_id.b.domain, 321 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
332 fcport->d_id.b.area, fcport->d_id.b.al_pa); 322 fcport->d_id.b.area, fcport->d_id.b.al_pa);
323
324 wait_for_completion(&tm_iocb->u.tmf.comp);
325
326 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
327 QLA_SUCCESS : QLA_FUNCTION_FAILED;
328
329 if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
330 ql_dbg(ql_dbg_taskm, vha, 0x8030,
331 "TM IOCB failed (%x).\n", rval);
332 }
333
334 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
335 flags = tm_iocb->u.tmf.flags;
336 lun = (uint16_t)tm_iocb->u.tmf.lun;
337
338 /* Issue Marker IOCB */
339 qla2x00_marker(vha, vha->hw->req_q_map[0],
340 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
341 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
342 }
343
344done_free_sp:
345 sp->free(vha, sp);
346done:
333 return rval; 347 return rval;
348}
349
350static void
351qla24xx_abort_iocb_timeout(void *data)
352{
353 srb_t *sp = (srb_t *)data;
354 struct srb_iocb *abt = &sp->u.iocb_cmd;
355
356 abt->u.abt.comp_status = CS_TIMEOUT;
357 complete(&abt->u.abt.comp);
358}
359
360static void
361qla24xx_abort_sp_done(void *data, void *ptr, int res)
362{
363 srb_t *sp = (srb_t *)ptr;
364 struct srb_iocb *abt = &sp->u.iocb_cmd;
365
366 complete(&abt->u.abt.comp);
367}
368
369static int
370qla24xx_async_abort_cmd(srb_t *cmd_sp)
371{
372 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
373 fc_port_t *fcport = cmd_sp->fcport;
374 struct srb_iocb *abt_iocb;
375 srb_t *sp;
376 int rval = QLA_FUNCTION_FAILED;
377
378 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
379 if (!sp)
380 goto done;
381
382 abt_iocb = &sp->u.iocb_cmd;
383 sp->type = SRB_ABT_CMD;
384 sp->name = "abort";
385 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
386 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
387 sp->done = qla24xx_abort_sp_done;
388 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
389 init_completion(&abt_iocb->u.abt.comp);
390
391 rval = qla2x00_start_sp(sp);
392 if (rval != QLA_SUCCESS)
393 goto done_free_sp;
394
395 ql_dbg(ql_dbg_async, vha, 0x507c,
396 "Abort command issued - hdl=%x, target_id=%x\n",
397 cmd_sp->handle, fcport->tgt_id);
398
399 wait_for_completion(&abt_iocb->u.abt.comp);
400
401 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
402 QLA_SUCCESS : QLA_FUNCTION_FAILED;
334 403
335done_free_sp: 404done_free_sp:
336 sp->free(fcport->vha, sp); 405 sp->free(vha, sp);
337done: 406done:
338 return rval; 407 return rval;
339} 408}
340 409
410int
411qla24xx_async_abort_command(srb_t *sp)
412{
413 unsigned long flags = 0;
414
415 uint32_t handle;
416 fc_port_t *fcport = sp->fcport;
417 struct scsi_qla_host *vha = fcport->vha;
418 struct qla_hw_data *ha = vha->hw;
419 struct req_que *req = vha->req;
420
421 spin_lock_irqsave(&ha->hardware_lock, flags);
422 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
423 if (req->outstanding_cmds[handle] == sp)
424 break;
425 }
426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
427 if (handle == req->num_outstanding_cmds) {
428 /* Command not found. */
429 return QLA_FUNCTION_FAILED;
430 }
431 if (sp->type == SRB_FXIOCB_DCMD)
432 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
433 FXDISC_ABORT_IOCTL);
434
435 return qla24xx_async_abort_cmd(sp);
436}
437
341void 438void
342qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport, 439qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
343 uint16_t *data) 440 uint16_t *data)
@@ -1379,7 +1476,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1379 } 1476 }
1380 1477
1381 ha->fw_dumped = 0; 1478 ha->fw_dumped = 0;
1382 fixed_size = mem_size = eft_size = fce_size = mq_size = 0; 1479 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
1480 req_q_size = rsp_q_size = 0;
1481
1482 if (IS_QLA27XX(ha))
1483 goto try_fce;
1484
1383 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1485 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1384 fixed_size = sizeof(struct qla2100_fw_dump); 1486 fixed_size = sizeof(struct qla2100_fw_dump);
1385 } else if (IS_QLA23XX(ha)) { 1487 } else if (IS_QLA23XX(ha)) {
@@ -1395,6 +1497,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1395 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 1497 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
1396 else 1498 else
1397 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 1499 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1500
1398 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1501 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1399 sizeof(uint32_t); 1502 sizeof(uint32_t);
1400 if (ha->mqenable) { 1503 if (ha->mqenable) {
@@ -1412,9 +1515,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1412 if (ha->tgt.atio_ring) 1515 if (ha->tgt.atio_ring)
1413 mq_size += ha->tgt.atio_q_length * sizeof(request_t); 1516 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1414 /* Allocate memory for Fibre Channel Event Buffer. */ 1517 /* Allocate memory for Fibre Channel Event Buffer. */
1415 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1518 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
1519 !IS_QLA27XX(ha))
1416 goto try_eft; 1520 goto try_eft;
1417 1521
1522try_fce:
1523 if (ha->fce)
1524 dma_free_coherent(&ha->pdev->dev,
1525 FCE_SIZE, ha->fce, ha->fce_dma);
1526
1527 /* Allocate memory for Fibre Channel Event Buffer. */
1418 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1528 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
1419 GFP_KERNEL); 1529 GFP_KERNEL);
1420 if (!tc) { 1530 if (!tc) {
@@ -1442,7 +1552,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1442 ha->flags.fce_enabled = 1; 1552 ha->flags.fce_enabled = 1;
1443 ha->fce_dma = tc_dma; 1553 ha->fce_dma = tc_dma;
1444 ha->fce = tc; 1554 ha->fce = tc;
1555
1445try_eft: 1556try_eft:
1557 if (ha->eft)
1558 dma_free_coherent(&ha->pdev->dev,
1559 EFT_SIZE, ha->eft, ha->eft_dma);
1560
1446 /* Allocate memory for Extended Trace Buffer. */ 1561 /* Allocate memory for Extended Trace Buffer. */
1447 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 1562 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
1448 GFP_KERNEL); 1563 GFP_KERNEL);
@@ -1469,15 +1584,28 @@ try_eft:
1469 ha->eft_dma = tc_dma; 1584 ha->eft_dma = tc_dma;
1470 ha->eft = tc; 1585 ha->eft = tc;
1471 } 1586 }
1587
1472cont_alloc: 1588cont_alloc:
1589 if (IS_QLA27XX(ha)) {
1590 if (!ha->fw_dump_template) {
1591 ql_log(ql_log_warn, vha, 0x00ba,
1592 "Failed missing fwdump template\n");
1593 return;
1594 }
1595 dump_size = qla27xx_fwdt_calculate_dump_size(vha);
1596 ql_dbg(ql_dbg_init, vha, 0x00fa,
1597 "-> allocating fwdump (%x bytes)...\n", dump_size);
1598 goto allocate;
1599 }
1600
1473 req_q_size = req->length * sizeof(request_t); 1601 req_q_size = req->length * sizeof(request_t);
1474 rsp_q_size = rsp->length * sizeof(response_t); 1602 rsp_q_size = rsp->length * sizeof(response_t);
1475
1476 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 1603 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
1477 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; 1604 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
1478 ha->chain_offset = dump_size; 1605 ha->chain_offset = dump_size;
1479 dump_size += mq_size + fce_size; 1606 dump_size += mq_size + fce_size;
1480 1607
1608allocate:
1481 ha->fw_dump = vmalloc(dump_size); 1609 ha->fw_dump = vmalloc(dump_size);
1482 if (!ha->fw_dump) { 1610 if (!ha->fw_dump) {
1483 ql_log(ql_log_warn, vha, 0x00c4, 1611 ql_log(ql_log_warn, vha, 0x00c4,
@@ -1499,10 +1627,13 @@ cont_alloc:
1499 } 1627 }
1500 return; 1628 return;
1501 } 1629 }
1630 ha->fw_dump_len = dump_size;
1502 ql_dbg(ql_dbg_init, vha, 0x00c5, 1631 ql_dbg(ql_dbg_init, vha, 0x00c5,
1503 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); 1632 "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
1504 1633
1505 ha->fw_dump_len = dump_size; 1634 if (IS_QLA27XX(ha))
1635 return;
1636
1506 ha->fw_dump->signature[0] = 'Q'; 1637 ha->fw_dump->signature[0] = 'Q';
1507 ha->fw_dump->signature[1] = 'L'; 1638 ha->fw_dump->signature[1] = 'L';
1508 ha->fw_dump->signature[2] = 'G'; 1639 ha->fw_dump->signature[2] = 'G';
@@ -1718,9 +1849,6 @@ enable_82xx_npiv:
1718 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1719 } 1850 }
1720 1851
1721 if (IS_QLA83XX(ha))
1722 goto skip_fac_check;
1723
1724 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1852 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1725 uint32_t size; 1853 uint32_t size;
1726 1854
@@ -1733,8 +1861,8 @@ enable_82xx_npiv:
1733 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1861 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1734 ha->fw_major_version, ha->fw_minor_version, 1862 ha->fw_major_version, ha->fw_minor_version,
1735 ha->fw_subminor_version); 1863 ha->fw_subminor_version);
1736skip_fac_check: 1864
1737 if (IS_QLA83XX(ha)) { 1865 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
1738 ha->flags.fac_supported = 0; 1866 ha->flags.fac_supported = 0;
1739 rval = QLA_SUCCESS; 1867 rval = QLA_SUCCESS;
1740 } 1868 }
@@ -1933,7 +2061,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1933 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 2061 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
1934 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 2062 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
1935 2063
1936 if (ha->mqenable || IS_QLA83XX(ha)) { 2064 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
1937 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 2065 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1938 icb->rid = __constant_cpu_to_le16(rid); 2066 icb->rid = __constant_cpu_to_le16(rid);
1939 if (ha->flags.msix_enabled) { 2067 if (ha->flags.msix_enabled) {
@@ -4792,13 +4920,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4792 nv = ha->nvram; 4920 nv = ha->nvram;
4793 4921
4794 /* Determine NVRAM starting address. */ 4922 /* Determine NVRAM starting address. */
4795 if (ha->flags.port0) { 4923 if (ha->port_no == 0) {
4796 ha->nvram_base = FA_NVRAM_FUNC0_ADDR; 4924 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
4797 ha->vpd_base = FA_NVRAM_VPD0_ADDR; 4925 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
4798 } else { 4926 } else {
4799 ha->nvram_base = FA_NVRAM_FUNC1_ADDR; 4927 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
4800 ha->vpd_base = FA_NVRAM_VPD1_ADDR; 4928 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
4801 } 4929 }
4930
4802 ha->nvram_size = sizeof(struct nvram_24xx); 4931 ha->nvram_size = sizeof(struct nvram_24xx);
4803 ha->vpd_size = FA_NVRAM_VPD_SIZE; 4932 ha->vpd_size = FA_NVRAM_VPD_SIZE;
4804 4933
@@ -4842,7 +4971,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4842 nv->exchange_count = __constant_cpu_to_le16(0); 4971 nv->exchange_count = __constant_cpu_to_le16(0);
4843 nv->hard_address = __constant_cpu_to_le16(124); 4972 nv->hard_address = __constant_cpu_to_le16(124);
4844 nv->port_name[0] = 0x21; 4973 nv->port_name[0] = 0x21;
4845 nv->port_name[1] = 0x00 + ha->port_no; 4974 nv->port_name[1] = 0x00 + ha->port_no + 1;
4846 nv->port_name[2] = 0x00; 4975 nv->port_name[2] = 0x00;
4847 nv->port_name[3] = 0xe0; 4976 nv->port_name[3] = 0xe0;
4848 nv->port_name[4] = 0x8b; 4977 nv->port_name[4] = 0x8b;
@@ -5117,6 +5246,99 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
5117 segments--; 5246 segments--;
5118 } 5247 }
5119 5248
5249 if (!IS_QLA27XX(ha))
5250 return rval;
5251
5252 if (ha->fw_dump_template)
5253 vfree(ha->fw_dump_template);
5254 ha->fw_dump_template = NULL;
5255 ha->fw_dump_template_len = 0;
5256
5257 ql_dbg(ql_dbg_init, vha, 0x0161,
5258 "Loading fwdump template from %x\n", faddr);
5259 qla24xx_read_flash_data(vha, dcode, faddr, 7);
5260 risc_size = be32_to_cpu(dcode[2]);
5261 ql_dbg(ql_dbg_init, vha, 0x0162,
5262 "-> array size %x dwords\n", risc_size);
5263 if (risc_size == 0 || risc_size == ~0)
5264 goto default_template;
5265
5266 dlen = (risc_size - 8) * sizeof(*dcode);
5267 ql_dbg(ql_dbg_init, vha, 0x0163,
5268 "-> template allocating %x bytes...\n", dlen);
5269 ha->fw_dump_template = vmalloc(dlen);
5270 if (!ha->fw_dump_template) {
5271 ql_log(ql_log_warn, vha, 0x0164,
5272 "Failed fwdump template allocate %x bytes.\n", risc_size);
5273 goto default_template;
5274 }
5275
5276 faddr += 7;
5277 risc_size -= 8;
5278 dcode = ha->fw_dump_template;
5279 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
5280 for (i = 0; i < risc_size; i++)
5281 dcode[i] = le32_to_cpu(dcode[i]);
5282
5283 if (!qla27xx_fwdt_template_valid(dcode)) {
5284 ql_log(ql_log_warn, vha, 0x0165,
5285 "Failed fwdump template validate\n");
5286 goto default_template;
5287 }
5288
5289 dlen = qla27xx_fwdt_template_size(dcode);
5290 ql_dbg(ql_dbg_init, vha, 0x0166,
5291 "-> template size %x bytes\n", dlen);
5292 if (dlen > risc_size * sizeof(*dcode)) {
5293 ql_log(ql_log_warn, vha, 0x0167,
5294 "Failed fwdump template exceeds array by %x bytes\n",
5295 (uint32_t)(dlen - risc_size * sizeof(*dcode)));
5296 goto default_template;
5297 }
5298 ha->fw_dump_template_len = dlen;
5299 return rval;
5300
5301default_template:
5302 ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n");
5303 if (ha->fw_dump_template)
5304 vfree(ha->fw_dump_template);
5305 ha->fw_dump_template = NULL;
5306 ha->fw_dump_template_len = 0;
5307
5308 dlen = qla27xx_fwdt_template_default_size();
5309 ql_dbg(ql_dbg_init, vha, 0x0169,
5310 "-> template allocating %x bytes...\n", dlen);
5311 ha->fw_dump_template = vmalloc(dlen);
5312 if (!ha->fw_dump_template) {
5313 ql_log(ql_log_warn, vha, 0x016a,
5314 "Failed fwdump template allocate %x bytes.\n", risc_size);
5315 goto failed_template;
5316 }
5317
5318 dcode = ha->fw_dump_template;
5319 risc_size = dlen / sizeof(*dcode);
5320 memcpy(dcode, qla27xx_fwdt_template_default(), dlen);
5321 for (i = 0; i < risc_size; i++)
5322 dcode[i] = be32_to_cpu(dcode[i]);
5323
5324 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5325 ql_log(ql_log_warn, vha, 0x016b,
5326 "Failed fwdump template validate\n");
5327 goto failed_template;
5328 }
5329
5330 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5331 ql_dbg(ql_dbg_init, vha, 0x016c,
5332 "-> template size %x bytes\n", dlen);
5333 ha->fw_dump_template_len = dlen;
5334 return rval;
5335
5336failed_template:
5337 ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n");
5338 if (ha->fw_dump_template)
5339 vfree(ha->fw_dump_template);
5340 ha->fw_dump_template = NULL;
5341 ha->fw_dump_template_len = 0;
5120 return rval; 5342 return rval;
5121} 5343}
5122 5344
@@ -5231,7 +5453,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5231 uint32_t risc_size; 5453 uint32_t risc_size;
5232 uint32_t i; 5454 uint32_t i;
5233 struct fw_blob *blob; 5455 struct fw_blob *blob;
5234 uint32_t *fwcode, fwclen; 5456 const uint32_t *fwcode;
5457 uint32_t fwclen;
5235 struct qla_hw_data *ha = vha->hw; 5458 struct qla_hw_data *ha = vha->hw;
5236 struct req_que *req = ha->req_q_map[0]; 5459 struct req_que *req = ha->req_q_map[0];
5237 5460
@@ -5263,7 +5486,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5263 ql_log(ql_log_fatal, vha, 0x0093, 5486 ql_log(ql_log_fatal, vha, 0x0093,
5264 "Unable to verify integrity of firmware image (%Zd).\n", 5487 "Unable to verify integrity of firmware image (%Zd).\n",
5265 blob->fw->size); 5488 blob->fw->size);
5266 goto fail_fw_integrity; 5489 return QLA_FUNCTION_FAILED;
5267 } 5490 }
5268 for (i = 0; i < 4; i++) 5491 for (i = 0; i < 4; i++)
5269 dcode[i] = be32_to_cpu(fwcode[i + 4]); 5492 dcode[i] = be32_to_cpu(fwcode[i + 4]);
@@ -5277,7 +5500,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5277 ql_log(ql_log_fatal, vha, 0x0095, 5500 ql_log(ql_log_fatal, vha, 0x0095,
5278 "Firmware data: %08x %08x %08x %08x.\n", 5501 "Firmware data: %08x %08x %08x %08x.\n",
5279 dcode[0], dcode[1], dcode[2], dcode[3]); 5502 dcode[0], dcode[1], dcode[2], dcode[3]);
5280 goto fail_fw_integrity; 5503 return QLA_FUNCTION_FAILED;
5281 } 5504 }
5282 5505
5283 while (segments && rval == QLA_SUCCESS) { 5506 while (segments && rval == QLA_SUCCESS) {
@@ -5291,8 +5514,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5291 ql_log(ql_log_fatal, vha, 0x0096, 5514 ql_log(ql_log_fatal, vha, 0x0096,
5292 "Unable to verify integrity of firmware image " 5515 "Unable to verify integrity of firmware image "
5293 "(%Zd).\n", blob->fw->size); 5516 "(%Zd).\n", blob->fw->size);
5294 5517 return QLA_FUNCTION_FAILED;
5295 goto fail_fw_integrity;
5296 } 5518 }
5297 5519
5298 fragment = 0; 5520 fragment = 0;
@@ -5326,10 +5548,100 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5326 /* Next segment. */ 5548 /* Next segment. */
5327 segments--; 5549 segments--;
5328 } 5550 }
5551
5552 if (!IS_QLA27XX(ha))
5553 return rval;
5554
5555 if (ha->fw_dump_template)
5556 vfree(ha->fw_dump_template);
5557 ha->fw_dump_template = NULL;
5558 ha->fw_dump_template_len = 0;
5559
5560 ql_dbg(ql_dbg_init, vha, 0x171,
5561 "Loading fwdump template from %x\n",
5562 (uint32_t)((void *)fwcode - (void *)blob->fw->data));
5563 risc_size = be32_to_cpu(fwcode[2]);
5564 ql_dbg(ql_dbg_init, vha, 0x172,
5565 "-> array size %x dwords\n", risc_size);
5566 if (risc_size == 0 || risc_size == ~0)
5567 goto default_template;
5568
5569 dlen = (risc_size - 8) * sizeof(*fwcode);
5570 ql_dbg(ql_dbg_init, vha, 0x0173,
5571 "-> template allocating %x bytes...\n", dlen);
5572 ha->fw_dump_template = vmalloc(dlen);
5573 if (!ha->fw_dump_template) {
5574 ql_log(ql_log_warn, vha, 0x0174,
5575 "Failed fwdump template allocate %x bytes.\n", risc_size);
5576 goto default_template;
5577 }
5578
5579 fwcode += 7;
5580 risc_size -= 8;
5581 dcode = ha->fw_dump_template;
5582 for (i = 0; i < risc_size; i++)
5583 dcode[i] = le32_to_cpu(fwcode[i]);
5584
5585 if (!qla27xx_fwdt_template_valid(dcode)) {
5586 ql_log(ql_log_warn, vha, 0x0175,
5587 "Failed fwdump template validate\n");
5588 goto default_template;
5589 }
5590
5591 dlen = qla27xx_fwdt_template_size(dcode);
5592 ql_dbg(ql_dbg_init, vha, 0x0176,
5593 "-> template size %x bytes\n", dlen);
5594 if (dlen > risc_size * sizeof(*fwcode)) {
5595 ql_log(ql_log_warn, vha, 0x0177,
5596 "Failed fwdump template exceeds array by %x bytes\n",
5597 (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
5598 goto default_template;
5599 }
5600 ha->fw_dump_template_len = dlen;
5329 return rval; 5601 return rval;
5330 5602
5331fail_fw_integrity: 5603default_template:
5332 return QLA_FUNCTION_FAILED; 5604 ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n");
5605 if (ha->fw_dump_template)
5606 vfree(ha->fw_dump_template);
5607 ha->fw_dump_template = NULL;
5608 ha->fw_dump_template_len = 0;
5609
5610 dlen = qla27xx_fwdt_template_default_size();
5611 ql_dbg(ql_dbg_init, vha, 0x0179,
5612 "-> template allocating %x bytes...\n", dlen);
5613 ha->fw_dump_template = vmalloc(dlen);
5614 if (!ha->fw_dump_template) {
5615 ql_log(ql_log_warn, vha, 0x017a,
5616 "Failed fwdump template allocate %x bytes.\n", risc_size);
5617 goto failed_template;
5618 }
5619
5620 dcode = ha->fw_dump_template;
5621 risc_size = dlen / sizeof(*fwcode);
5622 fwcode = qla27xx_fwdt_template_default();
5623 for (i = 0; i < risc_size; i++)
5624 dcode[i] = be32_to_cpu(fwcode[i]);
5625
5626 if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) {
5627 ql_log(ql_log_warn, vha, 0x017b,
5628 "Failed fwdump template validate\n");
5629 goto failed_template;
5630 }
5631
5632 dlen = qla27xx_fwdt_template_size(ha->fw_dump_template);
5633 ql_dbg(ql_dbg_init, vha, 0x017c,
5634 "-> template size %x bytes\n", dlen);
5635 ha->fw_dump_template_len = dlen;
5636 return rval;
5637
5638failed_template:
5639 ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n");
5640 if (ha->fw_dump_template)
5641 vfree(ha->fw_dump_template);
5642 ha->fw_dump_template = NULL;
5643 ha->fw_dump_template_len = 0;
5644 return rval;
5333} 5645}
5334 5646
5335int 5647int
@@ -5605,7 +5917,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5605 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5917 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5606 nv->exchange_count = __constant_cpu_to_le16(0); 5918 nv->exchange_count = __constant_cpu_to_le16(0);
5607 nv->port_name[0] = 0x21; 5919 nv->port_name[0] = 0x21;
5608 nv->port_name[1] = 0x00 + ha->port_no; 5920 nv->port_name[1] = 0x00 + ha->port_no + 1;
5609 nv->port_name[2] = 0x00; 5921 nv->port_name[2] = 0x00;
5610 nv->port_name[3] = 0xe0; 5922 nv->port_name[3] = 0xe0;
5611 nv->port_name[4] = 0x8b; 5923 nv->port_name[4] = 0x8b;
@@ -5639,7 +5951,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5639 nv->enode_mac[2] = 0xDD; 5951 nv->enode_mac[2] = 0xDD;
5640 nv->enode_mac[3] = 0x04; 5952 nv->enode_mac[3] = 0x04;
5641 nv->enode_mac[4] = 0x05; 5953 nv->enode_mac[4] = 0x05;
5642 nv->enode_mac[5] = 0x06 + ha->port_no; 5954 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
5643 5955
5644 rval = 1; 5956 rval = 1;
5645 } 5957 }
@@ -5677,7 +5989,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5677 icb->enode_mac[2] = 0xDD; 5989 icb->enode_mac[2] = 0xDD;
5678 icb->enode_mac[3] = 0x04; 5990 icb->enode_mac[3] = 0x04;
5679 icb->enode_mac[4] = 0x05; 5991 icb->enode_mac[4] = 0x05;
5680 icb->enode_mac[5] = 0x06 + ha->port_no; 5992 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
5681 } 5993 }
5682 5994
5683 /* Use extended-initialization control block. */ 5995 /* Use extended-initialization control block. */
@@ -5780,7 +6092,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5780 ha->login_retry_count = ql2xloginretrycount; 6092 ha->login_retry_count = ql2xloginretrycount;
5781 6093
5782 /* if not running MSI-X we need handshaking on interrupts */ 6094 /* if not running MSI-X we need handshaking on interrupts */
5783 if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha)) 6095 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
5784 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); 6096 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
5785 6097
5786 /* Enable ZIO. */ 6098 /* Enable ZIO. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 46b9307e8be4..e607568bce49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -488,7 +488,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
488 req->ring_ptr++; 488 req->ring_ptr++;
489 489
490 /* Set chip new ring index. */ 490 /* Set chip new ring index. */
491 if (ha->mqenable || IS_QLA83XX(ha)) { 491 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492 WRT_REG_DWORD(req->req_q_in, req->ring_index); 492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); 493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494 } else if (IS_QLAFX00(ha)) { 494 } else if (IS_QLAFX00(ha)) {
@@ -524,7 +524,6 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
524{ 524{
525 mrk_entry_t *mrk; 525 mrk_entry_t *mrk;
526 struct mrk_entry_24xx *mrk24 = NULL; 526 struct mrk_entry_24xx *mrk24 = NULL;
527 struct mrk_entry_fx00 *mrkfx = NULL;
528 527
529 struct qla_hw_data *ha = vha->hw; 528 struct qla_hw_data *ha = vha->hw;
530 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 529 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
@@ -541,15 +540,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
541 mrk->entry_type = MARKER_TYPE; 540 mrk->entry_type = MARKER_TYPE;
542 mrk->modifier = type; 541 mrk->modifier = type;
543 if (type != MK_SYNC_ALL) { 542 if (type != MK_SYNC_ALL) {
544 if (IS_QLAFX00(ha)) { 543 if (IS_FWI2_CAPABLE(ha)) {
545 mrkfx = (struct mrk_entry_fx00 *) mrk;
546 mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
547 mrkfx->handle_hi = 0;
548 mrkfx->tgt_id = cpu_to_le16(loop_id);
549 mrkfx->lun[1] = LSB(lun);
550 mrkfx->lun[2] = MSB(lun);
551 host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
552 } else if (IS_FWI2_CAPABLE(ha)) {
553 mrk24 = (struct mrk_entry_24xx *) mrk; 544 mrk24 = (struct mrk_entry_24xx *) mrk;
554 mrk24->nport_handle = cpu_to_le16(loop_id); 545 mrk24->nport_handle = cpu_to_le16(loop_id);
555 mrk24->lun[1] = LSB(lun); 546 mrk24->lun[1] = LSB(lun);
@@ -1823,7 +1814,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1823 1814
1824 /* Check for room in outstanding command list. */ 1815 /* Check for room in outstanding command list. */
1825 handle = req->current_outstanding_cmd; 1816 handle = req->current_outstanding_cmd;
1826 for (index = 1; req->num_outstanding_cmds; index++) { 1817 for (index = 1; index < req->num_outstanding_cmds; index++) {
1827 handle++; 1818 handle++;
1828 if (handle == req->num_outstanding_cmds) 1819 if (handle == req->num_outstanding_cmds)
1829 handle = 1; 1820 handle = 1;
@@ -1848,7 +1839,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1848skip_cmd_array: 1839skip_cmd_array:
1849 /* Check for room on request queue. */ 1840 /* Check for room on request queue. */
1850 if (req->cnt < req_cnt) { 1841 if (req->cnt < req_cnt) {
1851 if (ha->mqenable || IS_QLA83XX(ha)) 1842 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1852 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 1843 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1853 else if (IS_P3P_TYPE(ha)) 1844 else if (IS_P3P_TYPE(ha))
1854 cnt = RD_REG_DWORD(&reg->isp82.req_q_out); 1845 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2594,6 +2585,29 @@ queuing_error:
2594 return QLA_FUNCTION_FAILED; 2585 return QLA_FUNCTION_FAILED;
2595} 2586}
2596 2587
2588void
2589qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2590{
2591 struct srb_iocb *aio = &sp->u.iocb_cmd;
2592 scsi_qla_host_t *vha = sp->fcport->vha;
2593 struct req_que *req = vha->req;
2594
2595 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2596 abt_iocb->entry_type = ABORT_IOCB_TYPE;
2597 abt_iocb->entry_count = 1;
2598 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2599 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2600 abt_iocb->handle_to_abort =
2601 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2602 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2603 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2604 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2605 abt_iocb->vp_index = vha->vp_idx;
2606 abt_iocb->req_que_no = cpu_to_le16(req->id);
2607 /* Send the command to the firmware */
2608 wmb();
2609}
2610
2597int 2611int
2598qla2x00_start_sp(srb_t *sp) 2612qla2x00_start_sp(srb_t *sp)
2599{ 2613{
@@ -2647,7 +2661,9 @@ qla2x00_start_sp(srb_t *sp)
2647 qlafx00_fxdisc_iocb(sp, pkt); 2661 qlafx00_fxdisc_iocb(sp, pkt);
2648 break; 2662 break;
2649 case SRB_ABT_CMD: 2663 case SRB_ABT_CMD:
2650 qlafx00_abort_iocb(sp, pkt); 2664 IS_QLAFX00(ha) ?
2665 qlafx00_abort_iocb(sp, pkt) :
2666 qla24xx_abort_iocb(sp, pkt);
2651 break; 2667 break;
2652 default: 2668 default:
2653 break; 2669 break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0a1dcb43d18b..95314ef2e505 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -356,15 +356,16 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
356const char * 356const char *
357qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 357qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
358{ 358{
359 static const char * const link_speeds[] = { 359 static const char *const link_speeds[] = {
360 "1", "2", "?", "4", "8", "16", "10" 360 "1", "2", "?", "4", "8", "16", "32", "10"
361 }; 361 };
362#define QLA_LAST_SPEED 7
362 363
363 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 364 if (IS_QLA2100(ha) || IS_QLA2200(ha))
364 return link_speeds[0]; 365 return link_speeds[0];
365 else if (speed == 0x13) 366 else if (speed == 0x13)
366 return link_speeds[6]; 367 return link_speeds[QLA_LAST_SPEED];
367 else if (speed < 6) 368 else if (speed < QLA_LAST_SPEED)
368 return link_speeds[speed]; 369 return link_speeds[speed];
369 else 370 else
370 return link_speeds[LS_UNKNOWN]; 371 return link_speeds[LS_UNKNOWN];
@@ -649,7 +650,7 @@ skip_rio:
649 break; 650 break;
650 651
651 case MBA_SYSTEM_ERR: /* System Error */ 652 case MBA_SYSTEM_ERR: /* System Error */
652 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? 653 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
653 RD_REG_WORD(&reg24->mailbox7) : 0; 654 RD_REG_WORD(&reg24->mailbox7) : 0;
654 ql_log(ql_log_warn, vha, 0x5003, 655 ql_log(ql_log_warn, vha, 0x5003,
655 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 656 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
@@ -666,7 +667,7 @@ skip_rio:
666 vha->device_flags |= DFLG_DEV_FAILED; 667 vha->device_flags |= DFLG_DEV_FAILED;
667 } else { 668 } else {
668 /* Check to see if MPI timeout occurred */ 669 /* Check to see if MPI timeout occurred */
669 if ((mbx & MBX_3) && (ha->flags.port0)) 670 if ((mbx & MBX_3) && (ha->port_no == 0))
670 set_bit(MPI_RESET_NEEDED, 671 set_bit(MPI_RESET_NEEDED,
671 &vha->dpc_flags); 672 &vha->dpc_flags);
672 673
@@ -1497,8 +1498,7 @@ logio_done:
1497} 1498}
1498 1499
1499static void 1500static void
1500qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1501qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1501 struct tsk_mgmt_entry *tsk)
1502{ 1502{
1503 const char func[] = "TMF-IOCB"; 1503 const char func[] = "TMF-IOCB";
1504 const char *type; 1504 const char *type;
@@ -1506,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1506 srb_t *sp; 1506 srb_t *sp;
1507 struct srb_iocb *iocb; 1507 struct srb_iocb *iocb;
1508 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1508 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1509 int error = 1;
1510 1509
1511 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1510 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1512 if (!sp) 1511 if (!sp)
@@ -1515,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1515 iocb = &sp->u.iocb_cmd; 1514 iocb = &sp->u.iocb_cmd;
1516 type = sp->name; 1515 type = sp->name;
1517 fcport = sp->fcport; 1516 fcport = sp->fcport;
1517 iocb->u.tmf.data = QLA_SUCCESS;
1518 1518
1519 if (sts->entry_status) { 1519 if (sts->entry_status) {
1520 ql_log(ql_log_warn, fcport->vha, 0x5038, 1520 ql_log(ql_log_warn, fcport->vha, 0x5038,
1521 "Async-%s error - hdl=%x entry-status(%x).\n", 1521 "Async-%s error - hdl=%x entry-status(%x).\n",
1522 type, sp->handle, sts->entry_status); 1522 type, sp->handle, sts->entry_status);
1523 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1523 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1524 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1524 ql_log(ql_log_warn, fcport->vha, 0x5039, 1525 ql_log(ql_log_warn, fcport->vha, 0x5039,
1525 "Async-%s error - hdl=%x completion status(%x).\n", 1526 "Async-%s error - hdl=%x completion status(%x).\n",
1526 type, sp->handle, sts->comp_status); 1527 type, sp->handle, sts->comp_status);
1527 } else if (!(le16_to_cpu(sts->scsi_status) & 1528 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1529 } else if ((le16_to_cpu(sts->scsi_status) &
1528 SS_RESPONSE_INFO_LEN_VALID)) { 1530 SS_RESPONSE_INFO_LEN_VALID)) {
1529 ql_log(ql_log_warn, fcport->vha, 0x503a, 1531 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1530 "Async-%s error - hdl=%x no response info(%x).\n", 1532 ql_log(ql_log_warn, fcport->vha, 0x503b,
1531 type, sp->handle, sts->scsi_status); 1533 "Async-%s error - hdl=%x not enough response(%d).\n",
1532 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1534 type, sp->handle, sts->rsp_data_len);
1533 ql_log(ql_log_warn, fcport->vha, 0x503b, 1535 } else if (sts->data[3]) {
1534 "Async-%s error - hdl=%x not enough response(%d).\n", 1536 ql_log(ql_log_warn, fcport->vha, 0x503c,
1535 type, sp->handle, sts->rsp_data_len); 1537 "Async-%s error - hdl=%x response(%x).\n",
1536 } else if (sts->data[3]) { 1538 type, sp->handle, sts->data[3]);
1537 ql_log(ql_log_warn, fcport->vha, 0x503c, 1539 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1538 "Async-%s error - hdl=%x response(%x).\n", 1540 }
1539 type, sp->handle, sts->data[3]);
1540 } else {
1541 error = 0;
1542 } 1541 }
1543 1542
1544 if (error) { 1543 if (iocb->u.tmf.data != QLA_SUCCESS)
1545 iocb->u.tmf.data = error;
1546 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1544 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1547 (uint8_t *)sts, sizeof(*sts)); 1545 (uint8_t *)sts, sizeof(*sts));
1548 }
1549 1546
1550 sp->done(vha, sp, 0); 1547 sp->done(vha, sp, 0);
1551} 1548}
@@ -2025,6 +2022,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2025 return; 2022 return;
2026 } 2023 }
2027 2024
2025 /* Task Management completion. */
2026 if (sp->type == SRB_TM_CMD) {
2027 qla24xx_tm_iocb_entry(vha, req, pkt);
2028 return;
2029 }
2030
2028 /* Fast path completion. */ 2031 /* Fast path completion. */
2029 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2032 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2030 qla2x00_process_completed_request(vha, req, handle); 2033 qla2x00_process_completed_request(vha, req, handle);
@@ -2425,6 +2428,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2425 } 2428 }
2426} 2429}
2427 2430
2431static void
2432qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2433 struct abort_entry_24xx *pkt)
2434{
2435 const char func[] = "ABT_IOCB";
2436 srb_t *sp;
2437 struct srb_iocb *abt;
2438
2439 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2440 if (!sp)
2441 return;
2442
2443 abt = &sp->u.iocb_cmd;
2444 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2445 sp->done(vha, sp, 0);
2446}
2447
2428/** 2448/**
2429 * qla24xx_process_response_queue() - Process response queue entries. 2449 * qla24xx_process_response_queue() - Process response queue entries.
2430 * @ha: SCSI driver HA context 2450 * @ha: SCSI driver HA context
@@ -2474,10 +2494,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2474 qla24xx_logio_entry(vha, rsp->req, 2494 qla24xx_logio_entry(vha, rsp->req,
2475 (struct logio_entry_24xx *)pkt); 2495 (struct logio_entry_24xx *)pkt);
2476 break; 2496 break;
2477 case TSK_MGMT_IOCB_TYPE:
2478 qla24xx_tm_iocb_entry(vha, rsp->req,
2479 (struct tsk_mgmt_entry *)pkt);
2480 break;
2481 case CT_IOCB_TYPE: 2497 case CT_IOCB_TYPE:
2482 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2498 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2483 break; 2499 break;
@@ -2497,6 +2513,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2497 * from falling into default case 2513 * from falling into default case
2498 */ 2514 */
2499 break; 2515 break;
2516 case ABORT_IOCB_TYPE:
2517 qla24xx_abort_iocb_entry(vha, rsp->req,
2518 (struct abort_entry_24xx *)pkt);
2519 break;
2500 default: 2520 default:
2501 /* Type Not Supported. */ 2521 /* Type Not Supported. */
2502 ql_dbg(ql_dbg_async, vha, 0x5042, 2522 ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2525,7 +2545,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2525 struct qla_hw_data *ha = vha->hw; 2545 struct qla_hw_data *ha = vha->hw;
2526 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2546 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2527 2547
2528 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 2548 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2549 !IS_QLA27XX(ha))
2529 return; 2550 return;
2530 2551
2531 rval = QLA_SUCCESS; 2552 rval = QLA_SUCCESS;
@@ -2979,7 +3000,7 @@ msix_register_fail:
2979 } 3000 }
2980 3001
2981 /* Enable MSI-X vector for response queue update for queue 0 */ 3002 /* Enable MSI-X vector for response queue update for queue 0 */
2982 if (IS_QLA83XX(ha)) { 3003 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2983 if (ha->msixbase && ha->mqiobase && 3004 if (ha->msixbase && ha->mqiobase &&
2984 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3005 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2985 ha->mqenable = 1; 3006 ha->mqenable = 1;
@@ -3003,12 +3024,13 @@ int
3003qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3024qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3004{ 3025{
3005 int ret = QLA_FUNCTION_FAILED; 3026 int ret = QLA_FUNCTION_FAILED;
3006 device_reg_t __iomem *reg = ha->iobase; 3027 device_reg_t *reg = ha->iobase;
3007 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3028 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3008 3029
3009 /* If possible, enable MSI-X. */ 3030 /* If possible, enable MSI-X. */
3010 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3031 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3011 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha)) 3032 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3033 !IS_QLA27XX(ha))
3012 goto skip_msi; 3034 goto skip_msi;
3013 3035
3014 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3036 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -3043,7 +3065,8 @@ skip_msix:
3043 "Falling back-to MSI mode -%d.\n", ret); 3065 "Falling back-to MSI mode -%d.\n", ret);
3044 3066
3045 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3067 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3046 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha)) 3068 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3069 !IS_QLA27XX(ha))
3047 goto skip_msi; 3070 goto skip_msi;
3048 3071
3049 ret = pci_enable_msi(ha->pdev); 3072 ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b94511ae0051..2528709c4add 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -35,7 +35,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
35{ 35{
36 int rval; 36 int rval;
37 unsigned long flags = 0; 37 unsigned long flags = 0;
38 device_reg_t __iomem *reg; 38 device_reg_t *reg;
39 uint8_t abort_active; 39 uint8_t abort_active;
40 uint8_t io_lock_on; 40 uint8_t io_lock_on;
41 uint16_t command = 0; 41 uint16_t command = 0;
@@ -468,7 +468,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
468 mcp->mb[1] = MSW(risc_addr); 468 mcp->mb[1] = MSW(risc_addr);
469 mcp->mb[2] = LSW(risc_addr); 469 mcp->mb[2] = LSW(risc_addr);
470 mcp->mb[3] = 0; 470 mcp->mb[3] = 0;
471 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) { 471 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
472 IS_QLA27XX(ha)) {
472 struct nvram_81xx *nv = ha->nvram; 473 struct nvram_81xx *nv = ha->nvram;
473 mcp->mb[4] = (nv->enhanced_features & 474 mcp->mb[4] = (nv->enhanced_features &
474 EXTENDED_BB_CREDITS); 475 EXTENDED_BB_CREDITS);
@@ -539,6 +540,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
539 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 540 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
540 if (IS_FWI2_CAPABLE(ha)) 541 if (IS_FWI2_CAPABLE(ha))
541 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 542 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
543 if (IS_QLA27XX(ha))
544 mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
542 mcp->flags = 0; 545 mcp->flags = 0;
543 mcp->tov = MBX_TOV_SECONDS; 546 mcp->tov = MBX_TOV_SECONDS;
544 rval = qla2x00_mailbox_command(vha, mcp); 547 rval = qla2x00_mailbox_command(vha, mcp);
@@ -574,6 +577,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
574 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 577 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
575 __func__, mcp->mb[17], mcp->mb[16]); 578 __func__, mcp->mb[17], mcp->mb[16]);
576 } 579 }
580 if (IS_QLA27XX(ha)) {
581 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
582 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
583 }
577 584
578failed: 585failed:
579 if (rval != QLA_SUCCESS) { 586 if (rval != QLA_SUCCESS) {
@@ -1225,7 +1232,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1225 } 1232 }
1226 /* 1 and 2 should normally be captured. */ 1233 /* 1 and 2 should normally be captured. */
1227 mcp->in_mb = MBX_2|MBX_1|MBX_0; 1234 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1228 if (IS_QLA83XX(ha)) 1235 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1229 /* mb3 is additional info about the installed SFP. */ 1236 /* mb3 is additional info about the installed SFP. */
1230 mcp->in_mb |= MBX_3; 1237 mcp->in_mb |= MBX_3;
1231 mcp->buf_size = size; 1238 mcp->buf_size = size;
@@ -2349,7 +2356,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2349 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2356 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2350 mcp->out_mb = MBX_0; 2357 mcp->out_mb = MBX_0;
2351 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2358 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2352 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 2359 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2353 mcp->in_mb |= MBX_12; 2360 mcp->in_mb |= MBX_12;
2354 mcp->tov = MBX_TOV_SECONDS; 2361 mcp->tov = MBX_TOV_SECONDS;
2355 mcp->flags = 0; 2362 mcp->flags = 0;
@@ -2590,6 +2597,9 @@ qla24xx_abort_command(srb_t *sp)
2590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c, 2597 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2591 "Entered %s.\n", __func__); 2598 "Entered %s.\n", __func__);
2592 2599
2600 if (ql2xasynctmfenable)
2601 return qla24xx_async_abort_command(sp);
2602
2593 spin_lock_irqsave(&ha->hardware_lock, flags); 2603 spin_lock_irqsave(&ha->hardware_lock, flags);
2594 for (handle = 1; handle < req->num_outstanding_cmds; handle++) { 2604 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2595 if (req->outstanding_cmds[handle] == sp) 2605 if (req->outstanding_cmds[handle] == sp)
@@ -3032,7 +3042,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3032 "Entered %s.\n", __func__); 3042 "Entered %s.\n", __func__);
3033 3043
3034 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 3044 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3035 !IS_QLA83XX(vha->hw)) 3045 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3036 return QLA_FUNCTION_FAILED; 3046 return QLA_FUNCTION_FAILED;
3037 3047
3038 if (unlikely(pci_channel_offline(vha->hw->pdev))) 3048 if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -3662,7 +3672,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3662 mcp->mb[12] = req->qos; 3672 mcp->mb[12] = req->qos;
3663 mcp->mb[11] = req->vp_idx; 3673 mcp->mb[11] = req->vp_idx;
3664 mcp->mb[13] = req->rid; 3674 mcp->mb[13] = req->rid;
3665 if (IS_QLA83XX(ha)) 3675 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3666 mcp->mb[15] = 0; 3676 mcp->mb[15] = 0;
3667 3677
3668 mcp->mb[4] = req->id; 3678 mcp->mb[4] = req->id;
@@ -3676,9 +3686,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3676 mcp->flags = MBX_DMA_OUT; 3686 mcp->flags = MBX_DMA_OUT;
3677 mcp->tov = MBX_TOV_SECONDS * 2; 3687 mcp->tov = MBX_TOV_SECONDS * 2;
3678 3688
3679 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) 3689 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
3680 mcp->in_mb |= MBX_1; 3690 mcp->in_mb |= MBX_1;
3681 if (IS_QLA83XX(ha)) { 3691 if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
3682 mcp->out_mb |= MBX_15; 3692 mcp->out_mb |= MBX_15;
3683 /* debug q create issue in SR-IOV */ 3693 /* debug q create issue in SR-IOV */
3684 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; 3694 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3687,7 +3697,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3687 spin_lock_irqsave(&ha->hardware_lock, flags); 3697 spin_lock_irqsave(&ha->hardware_lock, flags);
3688 if (!(req->options & BIT_0)) { 3698 if (!(req->options & BIT_0)) {
3689 WRT_REG_DWORD(req->req_q_in, 0); 3699 WRT_REG_DWORD(req->req_q_in, 0);
3690 if (!IS_QLA83XX(ha)) 3700 if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
3691 WRT_REG_DWORD(req->req_q_out, 0); 3701 WRT_REG_DWORD(req->req_q_out, 0);
3692 } 3702 }
3693 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3703 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3725,7 +3735,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3725 mcp->mb[5] = rsp->length; 3735 mcp->mb[5] = rsp->length;
3726 mcp->mb[14] = rsp->msix->entry; 3736 mcp->mb[14] = rsp->msix->entry;
3727 mcp->mb[13] = rsp->rid; 3737 mcp->mb[13] = rsp->rid;
3728 if (IS_QLA83XX(ha)) 3738 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3729 mcp->mb[15] = 0; 3739 mcp->mb[15] = 0;
3730 3740
3731 mcp->mb[4] = rsp->id; 3741 mcp->mb[4] = rsp->id;
@@ -3742,7 +3752,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3742 if (IS_QLA81XX(ha)) { 3752 if (IS_QLA81XX(ha)) {
3743 mcp->out_mb |= MBX_12|MBX_11|MBX_10; 3753 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
3744 mcp->in_mb |= MBX_1; 3754 mcp->in_mb |= MBX_1;
3745 } else if (IS_QLA83XX(ha)) { 3755 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3746 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; 3756 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
3747 mcp->in_mb |= MBX_1; 3757 mcp->in_mb |= MBX_1;
3748 /* debug q create issue in SR-IOV */ 3758 /* debug q create issue in SR-IOV */
@@ -3809,7 +3819,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc, 3819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3810 "Entered %s.\n", __func__); 3820 "Entered %s.\n", __func__);
3811 3821
3812 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3822 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3823 !IS_QLA27XX(vha->hw))
3813 return QLA_FUNCTION_FAILED; 3824 return QLA_FUNCTION_FAILED;
3814 3825
3815 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3826 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3840,7 +3851,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3840 mbx_cmd_t mc; 3851 mbx_cmd_t mc;
3841 mbx_cmd_t *mcp = &mc; 3852 mbx_cmd_t *mcp = &mc;
3842 3853
3843 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3854 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3855 !IS_QLA27XX(vha->hw))
3844 return QLA_FUNCTION_FAILED; 3856 return QLA_FUNCTION_FAILED;
3845 3857
3846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, 3858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
@@ -3874,7 +3886,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3874 mbx_cmd_t mc; 3886 mbx_cmd_t mc;
3875 mbx_cmd_t *mcp = &mc; 3887 mbx_cmd_t *mcp = &mc;
3876 3888
3877 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3889 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
3890 !IS_QLA27XX(vha->hw))
3878 return QLA_FUNCTION_FAILED; 3891 return QLA_FUNCTION_FAILED;
3879 3892
3880 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, 3893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
@@ -4545,7 +4558,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4545 mcp->mb[1] = 0; 4558 mcp->mb[1] = 0;
4546 mcp->out_mb = MBX_1|MBX_0; 4559 mcp->out_mb = MBX_1|MBX_0;
4547 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4560 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4548 if (IS_QLA83XX(ha)) 4561 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4549 mcp->in_mb |= MBX_3; 4562 mcp->in_mb |= MBX_3;
4550 mcp->tov = MBX_TOV_SECONDS; 4563 mcp->tov = MBX_TOV_SECONDS;
4551 mcp->flags = 0; 4564 mcp->flags = 0;
@@ -4574,7 +4587,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4574 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109, 4587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4575 "Entered %s.\n", __func__); 4588 "Entered %s.\n", __func__);
4576 4589
4577 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha)) 4590 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
4591 !IS_QLA27XX(ha))
4578 return QLA_FUNCTION_FAILED; 4592 return QLA_FUNCTION_FAILED;
4579 mcp->mb[0] = MBC_GET_PORT_CONFIG; 4593 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4580 mcp->out_mb = MBX_0; 4594 mcp->out_mb = MBX_0;
@@ -5070,7 +5084,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5070 mbx_cmd_t mc; 5084 mbx_cmd_t mc;
5071 mbx_cmd_t *mcp = &mc; 5085 mbx_cmd_t *mcp = &mc;
5072 5086
5073 if (!IS_QLA83XX(ha)) 5087 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5074 return QLA_FUNCTION_FAILED; 5088 return QLA_FUNCTION_FAILED;
5075 5089
5076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, 5090 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
@@ -5145,7 +5159,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5145 struct qla_hw_data *ha = vha->hw; 5159 struct qla_hw_data *ha = vha->hw;
5146 unsigned long retry_max_time = jiffies + (2 * HZ); 5160 unsigned long retry_max_time = jiffies + (2 * HZ);
5147 5161
5148 if (!IS_QLA83XX(ha)) 5162 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5149 return QLA_FUNCTION_FAILED; 5163 return QLA_FUNCTION_FAILED;
5150 5164
5151 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); 5165 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index a72df701fb38..f0a852257f99 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -630,7 +630,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
630 struct req_que *req = NULL; 630 struct req_que *req = NULL;
631 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 631 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
632 uint16_t que_id = 0; 632 uint16_t que_id = 0;
633 device_reg_t __iomem *reg; 633 device_reg_t *reg;
634 uint32_t cnt; 634 uint32_t cnt;
635 635
636 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 636 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
@@ -754,7 +754,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
754 struct rsp_que *rsp = NULL; 754 struct rsp_que *rsp = NULL;
755 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 755 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
756 uint16_t que_id = 0; 756 uint16_t que_id = 0;
757 device_reg_t __iomem *reg; 757 device_reg_t *reg;
758 758
759 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 759 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
760 if (rsp == NULL) { 760 if (rsp == NULL) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ba6f8b139c98..0aaf6a9c87d3 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -40,7 +40,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
40{ 40{
41 int rval; 41 int rval;
42 unsigned long flags = 0; 42 unsigned long flags = 0;
43 device_reg_t __iomem *reg; 43 device_reg_t *reg;
44 uint8_t abort_active; 44 uint8_t abort_active;
45 uint8_t io_lock_on; 45 uint8_t io_lock_on;
46 uint16_t command = 0; 46 uint16_t command = 0;
@@ -631,20 +631,6 @@ qlafx00_config_rings(struct scsi_qla_host *vha)
631{ 631{
632 struct qla_hw_data *ha = vha->hw; 632 struct qla_hw_data *ha = vha->hw;
633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
634 struct init_cb_fx *icb;
635 struct req_que *req = ha->req_q_map[0];
636 struct rsp_que *rsp = ha->rsp_q_map[0];
637
638 /* Setup ring parameters in initialization control block. */
639 icb = (struct init_cb_fx *)ha->init_cb;
640 icb->request_q_outpointer = __constant_cpu_to_le16(0);
641 icb->response_q_inpointer = __constant_cpu_to_le16(0);
642 icb->request_q_length = cpu_to_le16(req->length);
643 icb->response_q_length = cpu_to_le16(rsp->length);
644 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
645 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
646 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
647 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
648 634
649 WRT_REG_DWORD(&reg->req_q_in, 0); 635 WRT_REG_DWORD(&reg->req_q_in, 0);
650 WRT_REG_DWORD(&reg->req_q_out, 0); 636 WRT_REG_DWORD(&reg->req_q_out, 0);
@@ -699,78 +685,16 @@ qlafx00_disable_intrs(struct qla_hw_data *ha)
699 spin_unlock_irqrestore(&ha->hardware_lock, flags); 685 spin_unlock_irqrestore(&ha->hardware_lock, flags);
700} 686}
701 687
702static void
703qlafx00_tmf_iocb_timeout(void *data)
704{
705 srb_t *sp = (srb_t *)data;
706 struct srb_iocb *tmf = &sp->u.iocb_cmd;
707
708 tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
709 complete(&tmf->u.tmf.comp);
710}
711
712static void
713qlafx00_tmf_sp_done(void *data, void *ptr, int res)
714{
715 srb_t *sp = (srb_t *)ptr;
716 struct srb_iocb *tmf = &sp->u.iocb_cmd;
717
718 complete(&tmf->u.tmf.comp);
719}
720
721static int
722qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
723 uint32_t lun, uint32_t tag)
724{
725 scsi_qla_host_t *vha = fcport->vha;
726 struct srb_iocb *tm_iocb;
727 srb_t *sp;
728 int rval = QLA_FUNCTION_FAILED;
729
730 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
731 if (!sp)
732 goto done;
733
734 tm_iocb = &sp->u.iocb_cmd;
735 sp->type = SRB_TM_CMD;
736 sp->name = "tmf";
737 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
738 tm_iocb->u.tmf.flags = flags;
739 tm_iocb->u.tmf.lun = lun;
740 tm_iocb->u.tmf.data = tag;
741 sp->done = qlafx00_tmf_sp_done;
742 tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
743 init_completion(&tm_iocb->u.tmf.comp);
744
745 rval = qla2x00_start_sp(sp);
746 if (rval != QLA_SUCCESS)
747 goto done_free_sp;
748
749 ql_dbg(ql_dbg_async, vha, 0x507b,
750 "Task management command issued target_id=%x\n",
751 fcport->tgt_id);
752
753 wait_for_completion(&tm_iocb->u.tmf.comp);
754
755 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
756 QLA_SUCCESS : QLA_FUNCTION_FAILED;
757
758done_free_sp:
759 sp->free(vha, sp);
760done:
761 return rval;
762}
763
764int 688int
765qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag) 689qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
766{ 690{
767 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); 691 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
768} 692}
769 693
770int 694int
771qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag) 695qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
772{ 696{
773 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); 697 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
774} 698}
775 699
776int 700int
@@ -997,6 +921,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
997 break; 921 break;
998 922
999 default: 923 default:
924 if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS)
925 break;
926
1000 /* If fw is apparently not ready. In order to continue, 927 /* If fw is apparently not ready. In order to continue,
1001 * we might need to issue Mbox cmd, but the problem is 928 * we might need to issue Mbox cmd, but the problem is
1002 * that the DoorBell vector values that come with the 929 * that the DoorBell vector values that come with the
@@ -2014,7 +1941,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
2014 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); 1941 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
2015 } else if (fx_type == FXDISC_ABORT_IOCTL) 1942 } else if (fx_type == FXDISC_ABORT_IOCTL)
2016 fdisc->u.fxiocb.result = 1943 fdisc->u.fxiocb.result =
2017 (fdisc->u.fxiocb.result == cpu_to_le32(0x68)) ? 1944 (fdisc->u.fxiocb.result ==
1945 cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ?
2018 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); 1946 cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED);
2019 1947
2020 rval = le32_to_cpu(fdisc->u.fxiocb.result); 1948 rval = le32_to_cpu(fdisc->u.fxiocb.result);
@@ -2034,94 +1962,6 @@ done:
2034 return rval; 1962 return rval;
2035} 1963}
2036 1964
2037static void
2038qlafx00_abort_iocb_timeout(void *data)
2039{
2040 srb_t *sp = (srb_t *)data;
2041 struct srb_iocb *abt = &sp->u.iocb_cmd;
2042
2043 abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
2044 complete(&abt->u.abt.comp);
2045}
2046
2047static void
2048qlafx00_abort_sp_done(void *data, void *ptr, int res)
2049{
2050 srb_t *sp = (srb_t *)ptr;
2051 struct srb_iocb *abt = &sp->u.iocb_cmd;
2052
2053 complete(&abt->u.abt.comp);
2054}
2055
2056static int
2057qlafx00_async_abt_cmd(srb_t *cmd_sp)
2058{
2059 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
2060 fc_port_t *fcport = cmd_sp->fcport;
2061 struct srb_iocb *abt_iocb;
2062 srb_t *sp;
2063 int rval = QLA_FUNCTION_FAILED;
2064
2065 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2066 if (!sp)
2067 goto done;
2068
2069 abt_iocb = &sp->u.iocb_cmd;
2070 sp->type = SRB_ABT_CMD;
2071 sp->name = "abort";
2072 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
2073 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
2074 sp->done = qlafx00_abort_sp_done;
2075 abt_iocb->timeout = qlafx00_abort_iocb_timeout;
2076 init_completion(&abt_iocb->u.abt.comp);
2077
2078 rval = qla2x00_start_sp(sp);
2079 if (rval != QLA_SUCCESS)
2080 goto done_free_sp;
2081
2082 ql_dbg(ql_dbg_async, vha, 0x507c,
2083 "Abort command issued - hdl=%x, target_id=%x\n",
2084 cmd_sp->handle, fcport->tgt_id);
2085
2086 wait_for_completion(&abt_iocb->u.abt.comp);
2087
2088 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
2089 QLA_SUCCESS : QLA_FUNCTION_FAILED;
2090
2091done_free_sp:
2092 sp->free(vha, sp);
2093done:
2094 return rval;
2095}
2096
2097int
2098qlafx00_abort_command(srb_t *sp)
2099{
2100 unsigned long flags = 0;
2101
2102 uint32_t handle;
2103 fc_port_t *fcport = sp->fcport;
2104 struct scsi_qla_host *vha = fcport->vha;
2105 struct qla_hw_data *ha = vha->hw;
2106 struct req_que *req = vha->req;
2107
2108 spin_lock_irqsave(&ha->hardware_lock, flags);
2109 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
2110 if (req->outstanding_cmds[handle] == sp)
2111 break;
2112 }
2113 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2114 if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
2115 /* Command not found. */
2116 return QLA_FUNCTION_FAILED;
2117 }
2118 if (sp->type == SRB_FXIOCB_DCMD)
2119 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2120 FXDISC_ABORT_IOCTL);
2121
2122 return qlafx00_async_abt_cmd(sp);
2123}
2124
2125/* 1965/*
2126 * qlafx00_initialize_adapter 1966 * qlafx00_initialize_adapter
2127 * Initialize board. 1967 * Initialize board.
@@ -2150,7 +1990,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2150 vha->device_flags = DFLG_NO_CABLE; 1990 vha->device_flags = DFLG_NO_CABLE;
2151 vha->dpc_flags = 0; 1991 vha->dpc_flags = 0;
2152 vha->flags.management_server_logged_in = 0; 1992 vha->flags.management_server_logged_in = 0;
2153 vha->marker_needed = 0;
2154 ha->isp_abort_cnt = 0; 1993 ha->isp_abort_cnt = 0;
2155 ha->beacon_blink_led = 0; 1994 ha->beacon_blink_led = 0;
2156 1995
@@ -2354,8 +2193,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2354 fstatus.ioctl_flags = pkt->fw_iotcl_flags; 2193 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2355 fstatus.ioctl_data = pkt->dataword_r; 2194 fstatus.ioctl_data = pkt->dataword_r;
2356 fstatus.adapid = pkt->adapid; 2195 fstatus.adapid = pkt->adapid;
2357 fstatus.adapid_hi = pkt->adapid_hi; 2196 fstatus.reserved_2 = pkt->dataword_r_extra;
2358 fstatus.reserved_2 = pkt->reserved_1;
2359 fstatus.res_count = pkt->residuallen; 2197 fstatus.res_count = pkt->residuallen;
2360 fstatus.status = pkt->status; 2198 fstatus.status = pkt->status;
2361 fstatus.seq_number = pkt->seq_no; 2199 fstatus.seq_number = pkt->seq_no;
@@ -2804,7 +2642,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2804 srb_t *sp; 2642 srb_t *sp;
2805 struct qla_hw_data *ha = vha->hw; 2643 struct qla_hw_data *ha = vha->hw;
2806 const char func[] = "ERROR-IOCB"; 2644 const char func[] = "ERROR-IOCB";
2807 uint16_t que = MSW(pkt->handle); 2645 uint16_t que = 0;
2808 struct req_que *req = NULL; 2646 struct req_que *req = NULL;
2809 int res = DID_ERROR << 16; 2647 int res = DID_ERROR << 16;
2810 2648
@@ -2833,16 +2671,22 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2833{ 2671{
2834 struct sts_entry_fx00 *pkt; 2672 struct sts_entry_fx00 *pkt;
2835 response_t *lptr; 2673 response_t *lptr;
2674 uint16_t lreq_q_in = 0;
2675 uint16_t lreq_q_out = 0;
2836 2676
2837 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) != 2677 lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
2838 RESPONSE_PROCESSED) { 2678 lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
2679
2680 while (lreq_q_in != lreq_q_out) {
2839 lptr = rsp->ring_ptr; 2681 lptr = rsp->ring_ptr;
2840 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr, 2682 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2841 sizeof(rsp->rsp_pkt)); 2683 sizeof(rsp->rsp_pkt));
2842 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt; 2684 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2843 2685
2844 rsp->ring_index++; 2686 rsp->ring_index++;
2687 lreq_q_out++;
2845 if (rsp->ring_index == rsp->length) { 2688 if (rsp->ring_index == rsp->length) {
2689 lreq_q_out = 0;
2846 rsp->ring_index = 0; 2690 rsp->ring_index = 0;
2847 rsp->ring_ptr = rsp->ring; 2691 rsp->ring_ptr = rsp->ring;
2848 } else { 2692 } else {
@@ -2854,7 +2698,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2854 qlafx00_error_entry(vha, rsp, 2698 qlafx00_error_entry(vha, rsp,
2855 (struct sts_entry_fx00 *)pkt, pkt->entry_status, 2699 (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2856 pkt->entry_type); 2700 pkt->entry_type);
2857 goto next_iter;
2858 continue; 2701 continue;
2859 } 2702 }
2860 2703
@@ -2888,10 +2731,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
2888 pkt->entry_type, pkt->entry_status); 2731 pkt->entry_type, pkt->entry_status);
2889 break; 2732 break;
2890 } 2733 }
2891next_iter:
2892 WRT_REG_DWORD((void __iomem *)&lptr->signature,
2893 RESPONSE_PROCESSED);
2894 wmb();
2895 } 2734 }
2896 2735
2897 /* Adjust ring index */ 2736 /* Adjust ring index */
@@ -2926,9 +2765,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
2926 break; 2765 break;
2927 2766
2928 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ 2767 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2929 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1); 2768 ha->aenmb[1] = RD_REG_DWORD(&reg->aenmailbox1);
2930 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2); 2769 ha->aenmb[2] = RD_REG_DWORD(&reg->aenmailbox2);
2931 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3); 2770 ha->aenmb[3] = RD_REG_DWORD(&reg->aenmailbox3);
2932 ql_dbg(ql_dbg_async, vha, 0x5077, 2771 ql_dbg(ql_dbg_async, vha, 0x5077,
2933 "Asynchronous port Update received " 2772 "Asynchronous port Update received "
2934 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", 2773 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
@@ -2985,7 +2824,7 @@ static void
2985qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) 2824qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2986{ 2825{
2987 uint16_t cnt; 2826 uint16_t cnt;
2988 uint16_t __iomem *wptr; 2827 uint32_t __iomem *wptr;
2989 struct qla_hw_data *ha = vha->hw; 2828 struct qla_hw_data *ha = vha->hw;
2990 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; 2829 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2991 2830
@@ -2995,10 +2834,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2995 /* Load return mailbox registers. */ 2834 /* Load return mailbox registers. */
2996 ha->flags.mbox_int = 1; 2835 ha->flags.mbox_int = 1;
2997 ha->mailbox_out32[0] = mb0; 2836 ha->mailbox_out32[0] = mb0;
2998 wptr = (uint16_t __iomem *)&reg->mailbox17; 2837 wptr = (uint32_t __iomem *)&reg->mailbox17;
2999 2838
3000 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2839 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3001 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr); 2840 ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);
3002 wptr++; 2841 wptr++;
3003 } 2842 }
3004} 2843}
@@ -3025,6 +2864,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
3025 struct rsp_que *rsp; 2864 struct rsp_que *rsp;
3026 unsigned long flags; 2865 unsigned long flags;
3027 uint32_t clr_intr = 0; 2866 uint32_t clr_intr = 0;
2867 uint32_t intr_stat = 0;
3028 2868
3029 rsp = (struct rsp_que *) dev_id; 2869 rsp = (struct rsp_que *) dev_id;
3030 if (!rsp) { 2870 if (!rsp) {
@@ -3046,34 +2886,26 @@ qlafx00_intr_handler(int irq, void *dev_id)
3046 stat = QLAFX00_RD_INTR_REG(ha); 2886 stat = QLAFX00_RD_INTR_REG(ha);
3047 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2887 if (qla2x00_check_reg_for_disconnect(vha, stat))
3048 break; 2888 break;
3049 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0) 2889 intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
2890 if (!intr_stat)
3050 break; 2891 break;
3051 2892
3052 switch (stat & QLAFX00_HST_INT_STS_BITS) { 2893 if (stat & QLAFX00_INTR_MB_CMPLT) {
3053 case QLAFX00_INTR_MB_CMPLT:
3054 case QLAFX00_INTR_MB_RSP_CMPLT:
3055 case QLAFX00_INTR_MB_ASYNC_CMPLT:
3056 case QLAFX00_INTR_ALL_CMPLT:
3057 mb[0] = RD_REG_WORD(&reg->mailbox16); 2894 mb[0] = RD_REG_WORD(&reg->mailbox16);
3058 qlafx00_mbx_completion(vha, mb[0]); 2895 qlafx00_mbx_completion(vha, mb[0]);
3059 status |= MBX_INTERRUPT; 2896 status |= MBX_INTERRUPT;
3060 clr_intr |= QLAFX00_INTR_MB_CMPLT; 2897 clr_intr |= QLAFX00_INTR_MB_CMPLT;
3061 break; 2898 }
3062 case QLAFX00_INTR_ASYNC_CMPLT: 2899 if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {
3063 case QLAFX00_INTR_RSP_ASYNC_CMPLT:
3064 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0); 2900 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
3065 qlafx00_async_event(vha); 2901 qlafx00_async_event(vha);
3066 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; 2902 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
3067 break; 2903 }
3068 case QLAFX00_INTR_RSP_CMPLT: 2904 if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {
3069 qlafx00_process_response_queue(vha, rsp); 2905 qlafx00_process_response_queue(vha, rsp);
3070 clr_intr |= QLAFX00_INTR_RSP_CMPLT; 2906 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
3071 break;
3072 default:
3073 ql_dbg(ql_dbg_async, vha, 0x507a,
3074 "Unrecognized interrupt type (%d).\n", stat);
3075 break;
3076 } 2907 }
2908
3077 QLAFX00_CLR_INTR_REG(ha, clr_intr); 2909 QLAFX00_CLR_INTR_REG(ha, clr_intr);
3078 QLAFX00_RD_INTR_REG(ha); 2910 QLAFX00_RD_INTR_REG(ha);
3079 } 2911 }
@@ -3223,17 +3055,6 @@ qlafx00_start_scsi(srb_t *sp)
3223 /* So we know we haven't pci_map'ed anything yet */ 3055 /* So we know we haven't pci_map'ed anything yet */
3224 tot_dsds = 0; 3056 tot_dsds = 0;
3225 3057
3226 /* Forcing marker needed for now */
3227 vha->marker_needed = 0;
3228
3229 /* Send marker if required */
3230 if (vha->marker_needed != 0) {
3231 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
3232 QLA_SUCCESS)
3233 return QLA_FUNCTION_FAILED;
3234 vha->marker_needed = 0;
3235 }
3236
3237 /* Acquire ring specific lock */ 3058 /* Acquire ring specific lock */
3238 spin_lock_irqsave(&ha->hardware_lock, flags); 3059 spin_lock_irqsave(&ha->hardware_lock, flags);
3239 3060
@@ -3284,7 +3105,9 @@ qlafx00_start_scsi(srb_t *sp)
3284 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); 3105 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3285 3106
3286 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); 3107 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3287 lcmd_pkt.handle_hi = 0; 3108 lcmd_pkt.reserved_0 = 0;
3109 lcmd_pkt.port_path_ctrl = 0;
3110 lcmd_pkt.reserved_1 = 0;
3288 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds); 3111 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3289 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); 3112 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3290 3113
@@ -3364,8 +3187,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3364 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; 3187 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3365 tm_iocb.entry_count = 1; 3188 tm_iocb.entry_count = 1;
3366 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); 3189 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3367 tm_iocb.handle_hi = 0; 3190 tm_iocb.reserved_0 = 0;
3368 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3369 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); 3191 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3370 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); 3192 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3371 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { 3193 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 6cd7072cc0ff..e529dfaeb854 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -22,13 +22,16 @@ struct cmd_type_7_fx00 {
22 uint8_t entry_status; /* Entry Status. */ 22 uint8_t entry_status; /* Entry Status. */
23 23
24 uint32_t handle; /* System handle. */ 24 uint32_t handle; /* System handle. */
25 uint32_t handle_hi; 25 uint8_t reserved_0;
26 uint8_t port_path_ctrl;
27 uint16_t reserved_1;
26 28
27 __le16 tgt_idx; /* Target Idx. */ 29 __le16 tgt_idx; /* Target Idx. */
28 uint16_t timeout; /* Command timeout. */ 30 uint16_t timeout; /* Command timeout. */
29 31
30 __le16 dseg_count; /* Data segment count. */ 32 __le16 dseg_count; /* Data segment count. */
31 uint16_t scsi_rsp_dsd_len; 33 uint8_t scsi_rsp_dsd_len;
34 uint8_t reserved_2;
32 35
33 struct scsi_lun lun; /* LUN (LE). */ 36 struct scsi_lun lun; /* LUN (LE). */
34 37
@@ -47,30 +50,6 @@ struct cmd_type_7_fx00 {
47 uint32_t dseg_0_len; /* Data segment 0 length. */ 50 uint32_t dseg_0_len; /* Data segment 0 length. */
48}; 51};
49 52
50/*
51 * ISP queue - marker entry structure definition.
52 */
53struct mrk_entry_fx00 {
54 uint8_t entry_type; /* Entry type. */
55 uint8_t entry_count; /* Entry count. */
56 uint8_t handle_count; /* Handle count. */
57 uint8_t entry_status; /* Entry Status. */
58
59 uint32_t handle; /* System handle. */
60 uint32_t handle_hi; /* System handle. */
61
62 uint16_t tgt_id; /* Target ID. */
63
64 uint8_t modifier; /* Modifier (7-0). */
65 uint8_t reserved_1;
66
67 uint8_t reserved_2[5];
68
69 uint8_t lun[8]; /* FCP LUN (BE). */
70 uint8_t reserved_3[36];
71};
72
73
74#define STATUS_TYPE_FX00 0x01 /* Status entry. */ 53#define STATUS_TYPE_FX00 0x01 /* Status entry. */
75struct sts_entry_fx00 { 54struct sts_entry_fx00 {
76 uint8_t entry_type; /* Entry type. */ 55 uint8_t entry_type; /* Entry type. */
@@ -79,7 +58,7 @@ struct sts_entry_fx00 {
79 uint8_t entry_status; /* Entry Status. */ 58 uint8_t entry_status; /* Entry Status. */
80 59
81 uint32_t handle; /* System handle. */ 60 uint32_t handle; /* System handle. */
82 uint32_t handle_hi; /* System handle. */ 61 uint32_t reserved_3; /* System handle. */
83 62
84 __le16 comp_status; /* Completion status. */ 63 __le16 comp_status; /* Completion status. */
85 uint16_t reserved_0; /* OX_ID used by the firmware. */ 64 uint16_t reserved_0; /* OX_ID used by the firmware. */
@@ -102,7 +81,7 @@ struct sts_entry_fx00 {
102 81
103struct multi_sts_entry_fx00 { 82struct multi_sts_entry_fx00 {
104 uint8_t entry_type; /* Entry type. */ 83 uint8_t entry_type; /* Entry type. */
105 uint8_t sys_define; /* System defined. */ 84 uint8_t entry_count; /* Entry count. */
106 uint8_t handle_count; 85 uint8_t handle_count;
107 uint8_t entry_status; 86 uint8_t entry_status;
108 87
@@ -118,15 +97,13 @@ struct tsk_mgmt_entry_fx00 {
118 97
119 __le32 handle; /* System handle. */ 98 __le32 handle; /* System handle. */
120 99
121 uint32_t handle_hi; /* System handle. */ 100 uint32_t reserved_0;
122 101
123 __le16 tgt_id; /* Target Idx. */ 102 __le16 tgt_id; /* Target Idx. */
124 103
125 uint16_t reserved_1; 104 uint16_t reserved_1;
126 105 uint16_t reserved_3;
127 uint16_t delay; /* Activity delay in seconds. */ 106 uint16_t reserved_4;
128
129 __le16 timeout; /* Command timeout. */
130 107
131 struct scsi_lun lun; /* LUN (LE). */ 108 struct scsi_lun lun; /* LUN (LE). */
132 109
@@ -144,13 +121,13 @@ struct abort_iocb_entry_fx00 {
144 uint8_t entry_status; /* Entry Status. */ 121 uint8_t entry_status; /* Entry Status. */
145 122
146 __le32 handle; /* System handle. */ 123 __le32 handle; /* System handle. */
147 __le32 handle_hi; /* System handle. */ 124 __le32 reserved_0;
148 125
149 __le16 tgt_id_sts; /* Completion status. */ 126 __le16 tgt_id_sts; /* Completion status. */
150 __le16 options; 127 __le16 options;
151 128
152 __le32 abort_handle; /* System handle. */ 129 __le32 abort_handle; /* System handle. */
153 __le32 abort_handle_hi; /* System handle. */ 130 __le32 reserved_2;
154 131
155 __le16 req_que_no; 132 __le16 req_que_no;
156 uint8_t reserved_1[38]; 133 uint8_t reserved_1[38];
@@ -171,8 +148,7 @@ struct ioctl_iocb_entry_fx00 {
171 148
172 __le32 dataword_r; /* Data word returned */ 149 __le32 dataword_r; /* Data word returned */
173 uint32_t adapid; /* Adapter ID */ 150 uint32_t adapid; /* Adapter ID */
174 uint32_t adapid_hi; /* Adapter ID high */ 151 uint32_t dataword_r_extra;
175 uint32_t reserved_1;
176 152
177 __le32 seq_no; 153 __le32 seq_no;
178 uint8_t reserved_2[20]; 154 uint8_t reserved_2[20];
@@ -360,11 +336,7 @@ struct config_info_data {
360 336
361#define QLAFX00_INTR_MB_CMPLT 0x1 337#define QLAFX00_INTR_MB_CMPLT 0x1
362#define QLAFX00_INTR_RSP_CMPLT 0x2 338#define QLAFX00_INTR_RSP_CMPLT 0x2
363#define QLAFX00_INTR_MB_RSP_CMPLT 0x3
364#define QLAFX00_INTR_ASYNC_CMPLT 0x4 339#define QLAFX00_INTR_ASYNC_CMPLT 0x4
365#define QLAFX00_INTR_MB_ASYNC_CMPLT 0x5
366#define QLAFX00_INTR_RSP_ASYNC_CMPLT 0x6
367#define QLAFX00_INTR_ALL_CMPLT 0x7
368 340
369#define QLAFX00_MBA_SYSTEM_ERR 0x8002 341#define QLAFX00_MBA_SYSTEM_ERR 0x8002
370#define QLAFX00_MBA_TEMP_OVER 0x8005 342#define QLAFX00_MBA_TEMP_OVER 0x8005
@@ -548,4 +520,7 @@ struct mr_data_fx00 {
548/* Max conncurrent IOs that can be queued */ 520/* Max conncurrent IOs that can be queued */
549#define QLAFX00_MAX_CANQUEUE 1024 521#define QLAFX00_MAX_CANQUEUE 1024
550 522
523/* IOCTL IOCB abort success */
524#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS 0x68
525
551#endif 526#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1e6ba4a369e2..5511e24b1f11 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1664,10 +1664,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1664 /* Mapping of IO base pointer */ 1664 /* Mapping of IO base pointer */
1665 if (IS_QLA8044(ha)) { 1665 if (IS_QLA8044(ha)) {
1666 ha->iobase = 1666 ha->iobase =
1667 (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase); 1667 (device_reg_t *)((uint8_t *)ha->nx_pcibase);
1668 } else if (IS_QLA82XX(ha)) { 1668 } else if (IS_QLA82XX(ha)) {
1669 ha->iobase = 1669 ha->iobase =
1670 (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 1670 (device_reg_t *)((uint8_t *)ha->nx_pcibase +
1671 0xbc000 + (ha->pdev->devfn << 11)); 1671 0xbc000 + (ha->pdev->devfn << 11));
1672 } 1672 }
1673 1673
@@ -4502,3 +4502,20 @@ exit:
4502 qla82xx_idc_unlock(ha); 4502 qla82xx_idc_unlock(ha);
4503 return rval; 4503 return rval;
4504} 4504}
4505
4506void
4507qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
4508{
4509 struct qla_hw_data *ha = vha->hw;
4510
4511 if (!ha->allow_cna_fw_dump)
4512 return;
4513
4514 scsi_block_requests(vha->host);
4515 ha->flags.isp82xx_no_md_cap = 1;
4516 qla82xx_idc_lock(ha);
4517 qla82xx_set_reset_owner(vha);
4518 qla82xx_idc_unlock(ha);
4519 qla2x00_wait_for_chip_reset(vha);
4520 scsi_unblock_requests(vha->host);
4521}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index f60989d729a8..86cf10815db0 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1578,8 +1578,8 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
1578 do { 1578 do {
1579 if (time_after_eq(jiffies, dev_init_timeout)) { 1579 if (time_after_eq(jiffies, dev_init_timeout)) {
1580 ql_log(ql_log_info, vha, 0xb0c4, 1580 ql_log(ql_log_info, vha, 0xb0c4,
1581 "%s: Non Reset owner DEV INIT " 1581 "%s: Non Reset owner: Reset Ack Timeout!\n",
1582 "TIMEOUT!\n", __func__); 1582 __func__);
1583 break; 1583 break;
1584 } 1584 }
1585 1585
@@ -2014,8 +2014,6 @@ qla8044_watchdog(struct scsi_qla_host *vha)
2014 2014
2015 /* don't poll if reset is going on or FW hang in quiescent state */ 2015 /* don't poll if reset is going on or FW hang in quiescent state */
2016 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 2016 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2017 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2018 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
2019 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { 2017 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2020 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); 2018 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2021 2019
@@ -3715,3 +3713,19 @@ exit_isp_reset:
3715 return rval; 3713 return rval;
3716} 3714}
3717 3715
3716void
3717qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
3718{
3719 struct qla_hw_data *ha = vha->hw;
3720
3721 if (!ha->allow_cna_fw_dump)
3722 return;
3723
3724 scsi_block_requests(vha->host);
3725 ha->flags.isp82xx_no_md_cap = 1;
3726 qla8044_idc_lock(ha);
3727 qla82xx_set_reset_owner(vha);
3728 qla8044_idc_unlock(ha);
3729 qla2x00_wait_for_chip_reset(vha);
3730 scsi_unblock_requests(vha->host);
3731}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 89a53002b585..19e99cc33724 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,15 +120,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
120int ql2xenabledif = 2; 120int ql2xenabledif = 2;
121module_param(ql2xenabledif, int, S_IRUGO); 121module_param(ql2xenabledif, int, S_IRUGO);
122MODULE_PARM_DESC(ql2xenabledif, 122MODULE_PARM_DESC(ql2xenabledif,
123 " Enable T10-CRC-DIF " 123 " Enable T10-CRC-DIF:\n"
124 " Default is 0 - No DIF Support. 1 - Enable it" 124 " Default is 2.\n"
125 ", 2 - Enable DIF for all types, except Type 0."); 125 " 0 -- No DIF Support\n"
126 " 1 -- Enable DIF for all types\n"
127 " 2 -- Enable DIF for all types, except Type 0.\n");
126 128
127int ql2xenablehba_err_chk = 2; 129int ql2xenablehba_err_chk = 2;
128module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR); 130module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
129MODULE_PARM_DESC(ql2xenablehba_err_chk, 131MODULE_PARM_DESC(ql2xenablehba_err_chk,
130 " Enable T10-CRC-DIF Error isolation by HBA:\n" 132 " Enable T10-CRC-DIF Error isolation by HBA:\n"
131 " Default is 1.\n" 133 " Default is 2.\n"
132 " 0 -- Error isolation disabled\n" 134 " 0 -- Error isolation disabled\n"
133 " 1 -- Error isolation enabled only for DIX Type 0\n" 135 " 1 -- Error isolation enabled only for DIX Type 0\n"
134 " 2 -- Error isolation enabled for all Types\n"); 136 " 2 -- Error isolation enabled for all Types\n");
@@ -1975,7 +1977,7 @@ static struct isp_operations qla82xx_isp_ops = {
1975 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 1977 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1976 .read_nvram = qla24xx_read_nvram_data, 1978 .read_nvram = qla24xx_read_nvram_data,
1977 .write_nvram = qla24xx_write_nvram_data, 1979 .write_nvram = qla24xx_write_nvram_data,
1978 .fw_dump = qla24xx_fw_dump, 1980 .fw_dump = qla82xx_fw_dump,
1979 .beacon_on = qla82xx_beacon_on, 1981 .beacon_on = qla82xx_beacon_on,
1980 .beacon_off = qla82xx_beacon_off, 1982 .beacon_off = qla82xx_beacon_off,
1981 .beacon_blink = NULL, 1983 .beacon_blink = NULL,
@@ -2013,11 +2015,11 @@ static struct isp_operations qla8044_isp_ops = {
2013 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb, 2015 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2014 .read_nvram = NULL, 2016 .read_nvram = NULL,
2015 .write_nvram = NULL, 2017 .write_nvram = NULL,
2016 .fw_dump = qla24xx_fw_dump, 2018 .fw_dump = qla8044_fw_dump,
2017 .beacon_on = qla82xx_beacon_on, 2019 .beacon_on = qla82xx_beacon_on,
2018 .beacon_off = qla82xx_beacon_off, 2020 .beacon_off = qla82xx_beacon_off,
2019 .beacon_blink = NULL, 2021 .beacon_blink = NULL,
2020 .read_optrom = qla82xx_read_optrom_data, 2022 .read_optrom = qla8044_read_optrom_data,
2021 .write_optrom = qla8044_write_optrom_data, 2023 .write_optrom = qla8044_write_optrom_data,
2022 .get_flash_version = qla82xx_get_flash_version, 2024 .get_flash_version = qla82xx_get_flash_version,
2023 .start_scsi = qla82xx_start_scsi, 2025 .start_scsi = qla82xx_start_scsi,
@@ -2078,7 +2080,7 @@ static struct isp_operations qlafx00_isp_ops = {
2078 .intr_handler = qlafx00_intr_handler, 2080 .intr_handler = qlafx00_intr_handler,
2079 .enable_intrs = qlafx00_enable_intrs, 2081 .enable_intrs = qlafx00_enable_intrs,
2080 .disable_intrs = qlafx00_disable_intrs, 2082 .disable_intrs = qlafx00_disable_intrs,
2081 .abort_command = qlafx00_abort_command, 2083 .abort_command = qla24xx_async_abort_command,
2082 .target_reset = qlafx00_abort_target, 2084 .target_reset = qlafx00_abort_target,
2083 .lun_reset = qlafx00_lun_reset, 2085 .lun_reset = qlafx00_lun_reset,
2084 .fabric_login = NULL, 2086 .fabric_login = NULL,
@@ -2102,6 +2104,44 @@ static struct isp_operations qlafx00_isp_ops = {
2102 .initialize_adapter = qlafx00_initialize_adapter, 2104 .initialize_adapter = qlafx00_initialize_adapter,
2103}; 2105};
2104 2106
2107static struct isp_operations qla27xx_isp_ops = {
2108 .pci_config = qla25xx_pci_config,
2109 .reset_chip = qla24xx_reset_chip,
2110 .chip_diag = qla24xx_chip_diag,
2111 .config_rings = qla24xx_config_rings,
2112 .reset_adapter = qla24xx_reset_adapter,
2113 .nvram_config = qla81xx_nvram_config,
2114 .update_fw_options = qla81xx_update_fw_options,
2115 .load_risc = qla81xx_load_risc,
2116 .pci_info_str = qla24xx_pci_info_str,
2117 .fw_version_str = qla24xx_fw_version_str,
2118 .intr_handler = qla24xx_intr_handler,
2119 .enable_intrs = qla24xx_enable_intrs,
2120 .disable_intrs = qla24xx_disable_intrs,
2121 .abort_command = qla24xx_abort_command,
2122 .target_reset = qla24xx_abort_target,
2123 .lun_reset = qla24xx_lun_reset,
2124 .fabric_login = qla24xx_login_fabric,
2125 .fabric_logout = qla24xx_fabric_logout,
2126 .calc_req_entries = NULL,
2127 .build_iocbs = NULL,
2128 .prep_ms_iocb = qla24xx_prep_ms_iocb,
2129 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
2130 .read_nvram = NULL,
2131 .write_nvram = NULL,
2132 .fw_dump = qla27xx_fwdump,
2133 .beacon_on = qla24xx_beacon_on,
2134 .beacon_off = qla24xx_beacon_off,
2135 .beacon_blink = qla83xx_beacon_blink,
2136 .read_optrom = qla25xx_read_optrom_data,
2137 .write_optrom = qla24xx_write_optrom_data,
2138 .get_flash_version = qla24xx_get_flash_version,
2139 .start_scsi = qla24xx_dif_start_scsi,
2140 .abort_isp = qla2x00_abort_isp,
2141 .iospace_config = qla83xx_iospace_config,
2142 .initialize_adapter = qla2x00_initialize_adapter,
2143};
2144
2105static inline void 2145static inline void
2106qla2x00_set_isp_flags(struct qla_hw_data *ha) 2146qla2x00_set_isp_flags(struct qla_hw_data *ha)
2107{ 2147{
@@ -2223,21 +2263,29 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
2223 case PCI_DEVICE_ID_QLOGIC_ISPF001: 2263 case PCI_DEVICE_ID_QLOGIC_ISPF001:
2224 ha->device_type |= DT_ISPFX00; 2264 ha->device_type |= DT_ISPFX00;
2225 break; 2265 break;
2266 case PCI_DEVICE_ID_QLOGIC_ISP2071:
2267 ha->device_type |= DT_ISP2071;
2268 ha->device_type |= DT_ZIO_SUPPORTED;
2269 ha->device_type |= DT_FWI2;
2270 ha->device_type |= DT_IIDMA;
2271 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2272 break;
2226 } 2273 }
2227 2274
2228 if (IS_QLA82XX(ha)) 2275 if (IS_QLA82XX(ha))
2229 ha->port_no = !(ha->portnum & 1); 2276 ha->port_no = ha->portnum & 1;
2230 else 2277 else {
2231 /* Get adapter physical port no from interrupt pin register. */ 2278 /* Get adapter physical port no from interrupt pin register. */
2232 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); 2279 pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2280 if (IS_QLA27XX(ha))
2281 ha->port_no--;
2282 else
2283 ha->port_no = !(ha->port_no & 1);
2284 }
2233 2285
2234 if (ha->port_no & 1)
2235 ha->flags.port0 = 1;
2236 else
2237 ha->flags.port0 = 0;
2238 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b, 2286 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
2239 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", 2287 "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
2240 ha->device_type, ha->flags.port0, ha->fw_srisc_address); 2288 ha->device_type, ha->port_no, ha->fw_srisc_address);
2241} 2289}
2242 2290
2243static void 2291static void
@@ -2297,7 +2345,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2297 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 || 2345 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2298 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || 2346 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2299 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2347 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2300 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) { 2348 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2349 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
2301 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2350 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2302 mem_only = 1; 2351 mem_only = 1;
2303 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2352 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2334,13 +2383,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2334 spin_lock_init(&ha->hardware_lock); 2383 spin_lock_init(&ha->hardware_lock);
2335 spin_lock_init(&ha->vport_slock); 2384 spin_lock_init(&ha->vport_slock);
2336 mutex_init(&ha->selflogin_lock); 2385 mutex_init(&ha->selflogin_lock);
2386 mutex_init(&ha->optrom_mutex);
2337 2387
2338 /* Set ISP-type information. */ 2388 /* Set ISP-type information. */
2339 qla2x00_set_isp_flags(ha); 2389 qla2x00_set_isp_flags(ha);
2340 2390
2341 /* Set EEH reset type to fundamental if required by hba */ 2391 /* Set EEH reset type to fundamental if required by hba */
2342 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || 2392 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2343 IS_QLA83XX(ha)) 2393 IS_QLA83XX(ha) || IS_QLA27XX(ha))
2344 pdev->needs_freset = 1; 2394 pdev->needs_freset = 1;
2345 2395
2346 ha->prev_topology = 0; 2396 ha->prev_topology = 0;
@@ -2488,7 +2538,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2488 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00; 2538 ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
2489 req_length = REQUEST_ENTRY_CNT_FX00; 2539 req_length = REQUEST_ENTRY_CNT_FX00;
2490 rsp_length = RESPONSE_ENTRY_CNT_FX00; 2540 rsp_length = RESPONSE_ENTRY_CNT_FX00;
2491 ha->init_cb_size = sizeof(struct init_cb_fx);
2492 ha->isp_ops = &qlafx00_isp_ops; 2541 ha->isp_ops = &qlafx00_isp_ops;
2493 ha->port_down_retry_count = 30; /* default value */ 2542 ha->port_down_retry_count = 30; /* default value */
2494 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL; 2543 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
@@ -2497,6 +2546,22 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2497 ha->mr.fw_hbt_en = 1; 2546 ha->mr.fw_hbt_en = 1;
2498 ha->mr.host_info_resend = false; 2547 ha->mr.host_info_resend = false;
2499 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; 2548 ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
2549 } else if (IS_QLA27XX(ha)) {
2550 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2551 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2552 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2553 req_length = REQUEST_ENTRY_CNT_24XX;
2554 rsp_length = RESPONSE_ENTRY_CNT_2300;
2555 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2556 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2557 ha->gid_list_info_size = 8;
2558 ha->optrom_size = OPTROM_SIZE_83XX;
2559 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2560 ha->isp_ops = &qla27xx_isp_ops;
2561 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2562 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2563 ha->nvram_conf_off = ~0;
2564 ha->nvram_data_off = ~0;
2500 } 2565 }
2501 2566
2502 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2567 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2536,7 +2601,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2536 ha->flags.enable_64bit_addressing ? "enable" : 2601 ha->flags.enable_64bit_addressing ? "enable" :
2537 "disable"); 2602 "disable");
2538 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); 2603 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
2539 if (!ret) { 2604 if (ret) {
2540 ql_log_pci(ql_log_fatal, pdev, 0x0031, 2605 ql_log_pci(ql_log_fatal, pdev, 0x0031,
2541 "Failed to allocate memory for adapter, aborting.\n"); 2606 "Failed to allocate memory for adapter, aborting.\n");
2542 2607
@@ -2561,10 +2626,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2561 2626
2562 host = base_vha->host; 2627 host = base_vha->host;
2563 base_vha->req = req; 2628 base_vha->req = req;
2564 if (IS_QLAFX00(ha))
2565 host->can_queue = QLAFX00_MAX_CANQUEUE;
2566 else
2567 host->can_queue = req->length + 128;
2568 if (IS_QLA2XXX_MIDTYPE(ha)) 2629 if (IS_QLA2XXX_MIDTYPE(ha))
2569 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; 2630 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
2570 else 2631 else
@@ -2587,11 +2648,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2587 if (!IS_QLA82XX(ha)) 2648 if (!IS_QLA82XX(ha))
2588 host->sg_tablesize = QLA_SG_ALL; 2649 host->sg_tablesize = QLA_SG_ALL;
2589 } 2650 }
2590 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2591 "can_queue=%d, req=%p, "
2592 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2593 host->can_queue, base_vha->req,
2594 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2595 host->max_id = ha->max_fibre_devices; 2651 host->max_id = ha->max_fibre_devices;
2596 host->cmd_per_lun = 3; 2652 host->cmd_per_lun = 3;
2597 host->unique_id = host->host_no; 2653 host->unique_id = host->host_no;
@@ -2646,7 +2702,7 @@ que_init:
2646 req->req_q_out = &ha->iobase->isp24.req_q_out; 2702 req->req_q_out = &ha->iobase->isp24.req_q_out;
2647 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 2703 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2648 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 2704 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2649 if (ha->mqenable || IS_QLA83XX(ha)) { 2705 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2650 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 2706 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2651 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 2707 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2652 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 2708 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2707,6 +2763,16 @@ que_init:
2707 goto probe_failed; 2763 goto probe_failed;
2708 } 2764 }
2709 2765
2766 if (IS_QLAFX00(ha))
2767 host->can_queue = QLAFX00_MAX_CANQUEUE;
2768 else
2769 host->can_queue = req->num_outstanding_cmds - 10;
2770
2771 ql_dbg(ql_dbg_init, base_vha, 0x0032,
2772 "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2773 host->can_queue, base_vha->req,
2774 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2775
2710 if (ha->mqenable) { 2776 if (ha->mqenable) {
2711 if (qla25xx_setup_mode(base_vha)) { 2777 if (qla25xx_setup_mode(base_vha)) {
2712 ql_log(ql_log_warn, base_vha, 0x00ec, 2778 ql_log(ql_log_warn, base_vha, 0x00ec,
@@ -2887,9 +2953,9 @@ probe_hw_failed:
2887iospace_config_failed: 2953iospace_config_failed:
2888 if (IS_P3P_TYPE(ha)) { 2954 if (IS_P3P_TYPE(ha)) {
2889 if (!ha->nx_pcibase) 2955 if (!ha->nx_pcibase)
2890 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 2956 iounmap((device_reg_t *)ha->nx_pcibase);
2891 if (!ql2xdbwr) 2957 if (!ql2xdbwr)
2892 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 2958 iounmap((device_reg_t *)ha->nxdb_wr_ptr);
2893 } else { 2959 } else {
2894 if (ha->iobase) 2960 if (ha->iobase)
2895 iounmap(ha->iobase); 2961 iounmap(ha->iobase);
@@ -3020,9 +3086,9 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
3020{ 3086{
3021 if (IS_QLA82XX(ha)) { 3087 if (IS_QLA82XX(ha)) {
3022 3088
3023 iounmap((device_reg_t __iomem *)ha->nx_pcibase); 3089 iounmap((device_reg_t *)ha->nx_pcibase);
3024 if (!ql2xdbwr) 3090 if (!ql2xdbwr)
3025 iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); 3091 iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3026 } else { 3092 } else {
3027 if (ha->iobase) 3093 if (ha->iobase)
3028 iounmap(ha->iobase); 3094 iounmap(ha->iobase);
@@ -3033,7 +3099,7 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
3033 if (ha->mqiobase) 3099 if (ha->mqiobase)
3034 iounmap(ha->mqiobase); 3100 iounmap(ha->mqiobase);
3035 3101
3036 if (IS_QLA83XX(ha) && ha->msixbase) 3102 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
3037 iounmap(ha->msixbase); 3103 iounmap(ha->msixbase);
3038 } 3104 }
3039} 3105}
@@ -3447,7 +3513,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3447 ha->npiv_info = NULL; 3513 ha->npiv_info = NULL;
3448 3514
3449 /* Get consistent memory allocated for EX-INIT-CB. */ 3515 /* Get consistent memory allocated for EX-INIT-CB. */
3450 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) { 3516 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
3451 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3517 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
3452 &ha->ex_init_cb_dma); 3518 &ha->ex_init_cb_dma);
3453 if (!ha->ex_init_cb) 3519 if (!ha->ex_init_cb)
@@ -3478,10 +3544,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
3478 else { 3544 else {
3479 qla2x00_set_reserved_loop_ids(ha); 3545 qla2x00_set_reserved_loop_ids(ha);
3480 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, 3546 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
3481 "loop_id_map=%p. \n", ha->loop_id_map); 3547 "loop_id_map=%p.\n", ha->loop_id_map);
3482 } 3548 }
3483 3549
3484 return 1; 3550 return 0;
3485 3551
3486fail_async_pd: 3552fail_async_pd:
3487 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); 3553 dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
@@ -3562,22 +3628,28 @@ static void
3562qla2x00_free_fw_dump(struct qla_hw_data *ha) 3628qla2x00_free_fw_dump(struct qla_hw_data *ha)
3563{ 3629{
3564 if (ha->fce) 3630 if (ha->fce)
3565 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 3631 dma_free_coherent(&ha->pdev->dev,
3566 ha->fce_dma); 3632 FCE_SIZE, ha->fce, ha->fce_dma);
3567 3633
3568 if (ha->fw_dump) { 3634 if (ha->eft)
3569 if (ha->eft) 3635 dma_free_coherent(&ha->pdev->dev,
3570 dma_free_coherent(&ha->pdev->dev, 3636 EFT_SIZE, ha->eft, ha->eft_dma);
3571 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 3637
3638 if (ha->fw_dump)
3572 vfree(ha->fw_dump); 3639 vfree(ha->fw_dump);
3573 } 3640 if (ha->fw_dump_template)
3641 vfree(ha->fw_dump_template);
3642
3574 ha->fce = NULL; 3643 ha->fce = NULL;
3575 ha->fce_dma = 0; 3644 ha->fce_dma = 0;
3576 ha->eft = NULL; 3645 ha->eft = NULL;
3577 ha->eft_dma = 0; 3646 ha->eft_dma = 0;
3578 ha->fw_dump = NULL;
3579 ha->fw_dumped = 0; 3647 ha->fw_dumped = 0;
3580 ha->fw_dump_reading = 0; 3648 ha->fw_dump_reading = 0;
3649 ha->fw_dump = NULL;
3650 ha->fw_dump_len = 0;
3651 ha->fw_dump_template = NULL;
3652 ha->fw_dump_template_len = 0;
3581} 3653}
3582 3654
3583/* 3655/*
@@ -5242,7 +5314,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5242 5314
5243/* Firmware interface routines. */ 5315/* Firmware interface routines. */
5244 5316
5245#define FW_BLOBS 10 5317#define FW_BLOBS 11
5246#define FW_ISP21XX 0 5318#define FW_ISP21XX 0
5247#define FW_ISP22XX 1 5319#define FW_ISP22XX 1
5248#define FW_ISP2300 2 5320#define FW_ISP2300 2
@@ -5253,6 +5325,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5253#define FW_ISP82XX 7 5325#define FW_ISP82XX 7
5254#define FW_ISP2031 8 5326#define FW_ISP2031 8
5255#define FW_ISP8031 9 5327#define FW_ISP8031 9
5328#define FW_ISP2071 10
5256 5329
5257#define FW_FILE_ISP21XX "ql2100_fw.bin" 5330#define FW_FILE_ISP21XX "ql2100_fw.bin"
5258#define FW_FILE_ISP22XX "ql2200_fw.bin" 5331#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -5264,6 +5337,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
5264#define FW_FILE_ISP82XX "ql8200_fw.bin" 5337#define FW_FILE_ISP82XX "ql8200_fw.bin"
5265#define FW_FILE_ISP2031 "ql2600_fw.bin" 5338#define FW_FILE_ISP2031 "ql2600_fw.bin"
5266#define FW_FILE_ISP8031 "ql8300_fw.bin" 5339#define FW_FILE_ISP8031 "ql8300_fw.bin"
5340#define FW_FILE_ISP2071 "ql2700_fw.bin"
5341
5267 5342
5268static DEFINE_MUTEX(qla_fw_lock); 5343static DEFINE_MUTEX(qla_fw_lock);
5269 5344
@@ -5278,6 +5353,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
5278 { .name = FW_FILE_ISP82XX, }, 5353 { .name = FW_FILE_ISP82XX, },
5279 { .name = FW_FILE_ISP2031, }, 5354 { .name = FW_FILE_ISP2031, },
5280 { .name = FW_FILE_ISP8031, }, 5355 { .name = FW_FILE_ISP8031, },
5356 { .name = FW_FILE_ISP2071, },
5281}; 5357};
5282 5358
5283struct fw_blob * 5359struct fw_blob *
@@ -5306,6 +5382,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
5306 blob = &qla_fw_blobs[FW_ISP2031]; 5382 blob = &qla_fw_blobs[FW_ISP2031];
5307 } else if (IS_QLA8031(ha)) { 5383 } else if (IS_QLA8031(ha)) {
5308 blob = &qla_fw_blobs[FW_ISP8031]; 5384 blob = &qla_fw_blobs[FW_ISP8031];
5385 } else if (IS_QLA2071(ha)) {
5386 blob = &qla_fw_blobs[FW_ISP2071];
5309 } else { 5387 } else {
5310 return NULL; 5388 return NULL;
5311 } 5389 }
@@ -5635,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
5635 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) }, 5713 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
5636 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, 5714 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
5637 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 5715 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
5716 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
5638 { 0 }, 5717 { 0 },
5639}; 5718};
5640MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5719MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bd56cde795fc..f28123e8ed65 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,7 +568,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
568 else if (IS_P3P_TYPE(ha)) { 568 else if (IS_P3P_TYPE(ha)) {
569 *start = FA_FLASH_LAYOUT_ADDR_82; 569 *start = FA_FLASH_LAYOUT_ADDR_82;
570 goto end; 570 goto end;
571 } else if (IS_QLA83XX(ha)) { 571 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
572 *start = FA_FLASH_LAYOUT_ADDR_83; 572 *start = FA_FLASH_LAYOUT_ADDR_83;
573 goto end; 573 goto end;
574 } 574 }
@@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
682 /* Assign FCP prio region since older adapters may not have FLT, or 682 /* Assign FCP prio region since older adapters may not have FLT, or
683 FCP prio region in it's FLT. 683 FCP prio region in it's FLT.
684 */ 684 */
685 ha->flt_region_fcp_prio = ha->flags.port0 ? 685 ha->flt_region_fcp_prio = (ha->port_no == 0) ?
686 fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; 686 fcp_prio_cfg0[def] : fcp_prio_cfg1[def];
687 687
688 ha->flt_region_flt = flt_addr; 688 ha->flt_region_flt = flt_addr;
@@ -743,47 +743,71 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
743 ha->flt_region_vpd_nvram = start; 743 ha->flt_region_vpd_nvram = start;
744 if (IS_P3P_TYPE(ha)) 744 if (IS_P3P_TYPE(ha))
745 break; 745 break;
746 if (ha->flags.port0) 746 if (ha->port_no == 0)
747 ha->flt_region_vpd = start; 747 ha->flt_region_vpd = start;
748 break; 748 break;
749 case FLT_REG_VPD_1: 749 case FLT_REG_VPD_1:
750 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) 750 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
751 break; 751 break;
752 if (!ha->flags.port0) 752 if (ha->port_no == 1)
753 ha->flt_region_vpd = start;
754 break;
755 case FLT_REG_VPD_2:
756 if (!IS_QLA27XX(ha))
757 break;
758 if (ha->port_no == 2)
759 ha->flt_region_vpd = start;
760 break;
761 case FLT_REG_VPD_3:
762 if (!IS_QLA27XX(ha))
763 break;
764 if (ha->port_no == 3)
753 ha->flt_region_vpd = start; 765 ha->flt_region_vpd = start;
754 break; 766 break;
755 case FLT_REG_NVRAM_0: 767 case FLT_REG_NVRAM_0:
756 if (IS_QLA8031(ha)) 768 if (IS_QLA8031(ha))
757 break; 769 break;
758 if (ha->flags.port0) 770 if (ha->port_no == 0)
759 ha->flt_region_nvram = start; 771 ha->flt_region_nvram = start;
760 break; 772 break;
761 case FLT_REG_NVRAM_1: 773 case FLT_REG_NVRAM_1:
762 if (IS_QLA8031(ha)) 774 if (IS_QLA8031(ha))
763 break; 775 break;
764 if (!ha->flags.port0) 776 if (ha->port_no == 1)
777 ha->flt_region_nvram = start;
778 break;
779 case FLT_REG_NVRAM_2:
780 if (!IS_QLA27XX(ha))
781 break;
782 if (ha->port_no == 2)
783 ha->flt_region_nvram = start;
784 break;
785 case FLT_REG_NVRAM_3:
786 if (!IS_QLA27XX(ha))
787 break;
788 if (ha->port_no == 3)
765 ha->flt_region_nvram = start; 789 ha->flt_region_nvram = start;
766 break; 790 break;
767 case FLT_REG_FDT: 791 case FLT_REG_FDT:
768 ha->flt_region_fdt = start; 792 ha->flt_region_fdt = start;
769 break; 793 break;
770 case FLT_REG_NPIV_CONF_0: 794 case FLT_REG_NPIV_CONF_0:
771 if (ha->flags.port0) 795 if (ha->port_no == 0)
772 ha->flt_region_npiv_conf = start; 796 ha->flt_region_npiv_conf = start;
773 break; 797 break;
774 case FLT_REG_NPIV_CONF_1: 798 case FLT_REG_NPIV_CONF_1:
775 if (!ha->flags.port0) 799 if (ha->port_no == 1)
776 ha->flt_region_npiv_conf = start; 800 ha->flt_region_npiv_conf = start;
777 break; 801 break;
778 case FLT_REG_GOLD_FW: 802 case FLT_REG_GOLD_FW:
779 ha->flt_region_gold_fw = start; 803 ha->flt_region_gold_fw = start;
780 break; 804 break;
781 case FLT_REG_FCP_PRIO_0: 805 case FLT_REG_FCP_PRIO_0:
782 if (ha->flags.port0) 806 if (ha->port_no == 0)
783 ha->flt_region_fcp_prio = start; 807 ha->flt_region_fcp_prio = start;
784 break; 808 break;
785 case FLT_REG_FCP_PRIO_1: 809 case FLT_REG_FCP_PRIO_1:
786 if (!ha->flags.port0) 810 if (ha->port_no == 1)
787 ha->flt_region_fcp_prio = start; 811 ha->flt_region_fcp_prio = start;
788 break; 812 break;
789 case FLT_REG_BOOT_CODE_82XX: 813 case FLT_REG_BOOT_CODE_82XX:
@@ -813,13 +837,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
813 case FLT_REG_FCOE_NVRAM_0: 837 case FLT_REG_FCOE_NVRAM_0:
814 if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) 838 if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
815 break; 839 break;
816 if (ha->flags.port0) 840 if (ha->port_no == 0)
817 ha->flt_region_nvram = start; 841 ha->flt_region_nvram = start;
818 break; 842 break;
819 case FLT_REG_FCOE_NVRAM_1: 843 case FLT_REG_FCOE_NVRAM_1:
820 if (!(IS_QLA8031(ha) || IS_QLA8044(ha))) 844 if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
821 break; 845 break;
822 if (!ha->flags.port0) 846 if (ha->port_no == 1)
823 ha->flt_region_nvram = start; 847 ha->flt_region_nvram = start;
824 break; 848 break;
825 } 849 }
@@ -832,12 +856,12 @@ no_flash_data:
832 ha->flt_region_fw = def_fw[def]; 856 ha->flt_region_fw = def_fw[def];
833 ha->flt_region_boot = def_boot[def]; 857 ha->flt_region_boot = def_boot[def];
834 ha->flt_region_vpd_nvram = def_vpd_nvram[def]; 858 ha->flt_region_vpd_nvram = def_vpd_nvram[def];
835 ha->flt_region_vpd = ha->flags.port0 ? 859 ha->flt_region_vpd = (ha->port_no == 0) ?
836 def_vpd0[def] : def_vpd1[def]; 860 def_vpd0[def] : def_vpd1[def];
837 ha->flt_region_nvram = ha->flags.port0 ? 861 ha->flt_region_nvram = (ha->port_no == 0) ?
838 def_nvram0[def] : def_nvram1[def]; 862 def_nvram0[def] : def_nvram1[def];
839 ha->flt_region_fdt = def_fdt[def]; 863 ha->flt_region_fdt = def_fdt[def];
840 ha->flt_region_npiv_conf = ha->flags.port0 ? 864 ha->flt_region_npiv_conf = (ha->port_no == 0) ?
841 def_npiv_conf0[def] : def_npiv_conf1[def]; 865 def_npiv_conf0[def] : def_npiv_conf1[def];
842done: 866done:
843 ql_dbg(ql_dbg_init, vha, 0x004a, 867 ql_dbg(ql_dbg_init, vha, 0x004a,
@@ -989,7 +1013,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
989 struct qla_hw_data *ha = vha->hw; 1013 struct qla_hw_data *ha = vha->hw;
990 1014
991 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 1015 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
992 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 1016 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
993 return QLA_SUCCESS; 1017 return QLA_SUCCESS;
994 1018
995 ret = qla2xxx_find_flt_start(vha, &flt_addr); 1019 ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -1192,7 +1216,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1192 struct qla_hw_data *ha = vha->hw; 1216 struct qla_hw_data *ha = vha->hw;
1193 1217
1194 /* Prepare burst-capable write on supported ISPs. */ 1218 /* Prepare burst-capable write on supported ISPs. */
1195 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) && 1219 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1220 IS_QLA27XX(ha)) &&
1196 !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { 1221 !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
1197 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1222 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1198 &optrom_dma, GFP_KERNEL); 1223 &optrom_dma, GFP_KERNEL);
@@ -1675,7 +1700,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1675 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha))
1676 goto out; 1701 goto out;
1677 1702
1678 if (ha->flags.port0) 1703 if (ha->port_no == 0)
1679 led_select_value = QLA83XX_LED_PORT0; 1704 led_select_value = QLA83XX_LED_PORT0;
1680 else 1705 else
1681 led_select_value = QLA83XX_LED_PORT1; 1706 led_select_value = QLA83XX_LED_PORT1;
@@ -2332,7 +2357,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2332 */ 2357 */
2333 rest_addr = 0xffff; 2358 rest_addr = 0xffff;
2334 sec_mask = 0x10000; 2359 sec_mask = 0x10000;
2335 break; 2360 break;
2336 } 2361 }
2337 /* 2362 /*
2338 * ST m29w010b part - 16kb sector size 2363 * ST m29w010b part - 16kb sector size
@@ -2558,7 +2583,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2558 uint32_t faddr, left, burst; 2583 uint32_t faddr, left, burst;
2559 struct qla_hw_data *ha = vha->hw; 2584 struct qla_hw_data *ha = vha->hw;
2560 2585
2561 if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) 2586 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
2562 goto try_fast; 2587 goto try_fast;
2563 if (offset & 0xfff) 2588 if (offset & 0xfff)
2564 goto slow_read; 2589 goto slow_read;
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
new file mode 100644
index 000000000000..a804e9b744bb
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -0,0 +1,909 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8#include "qla_tmpl.h"
9
10/* note default template is in big endian */
11static const uint32_t ql27xx_fwdt_default_template[] = {
12 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
13 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
14 0x00000000, 0x00000000, 0x00000000, 0x00000000,
15 0x00000000, 0x00000000, 0x00000000, 0x00000000,
16 0x00000000, 0x00000000, 0x00000000, 0x00000000,
17 0x00000000, 0x00000000, 0x00000000, 0x00000000,
18 0x00000000, 0x00000000, 0x00000000, 0x00000000,
19 0x00000000, 0x00000000, 0x00000000, 0x00000000,
20 0x00000000, 0x00000000, 0x00000000, 0x00000000,
21 0x00000000, 0x00000000, 0x00000000, 0x00000000,
22 0x00000000, 0x04010000, 0x14000000, 0x00000000,
23 0x02000000, 0x44000000, 0x09010000, 0x10000000,
24 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
25 0x00000000, 0x02000000, 0x00600000, 0x00000000,
26 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
27 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
28 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
29 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
30 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
31 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
32 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
33 0x00010000, 0x18000000, 0x00000000, 0x02000000,
34 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
35 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
36 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
37 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
38 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
39 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
40 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
41 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
42 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
43 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
44 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
45 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
46 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
47 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
48 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
49 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
50 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
51 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
52 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
53 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
54 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
55 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
56 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
57 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
58 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
59 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
60 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
61 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
62 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
63 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
64 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
65 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
66 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
67 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
68 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
69 0x00010000, 0x18000000, 0x00000000, 0x02000000,
70 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
71 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
72 0x00010000, 0x18000000, 0x00000000, 0x02000000,
73 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
74 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
75 0x00010000, 0x18000000, 0x00000000, 0x02000000,
76 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
77 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
78 0x00010000, 0x18000000, 0x00000000, 0x02000000,
79 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
80 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
81 0x00010000, 0x18000000, 0x00000000, 0x02000000,
82 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
83 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
84 0x00010000, 0x18000000, 0x00000000, 0x02000000,
85 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
86 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
87 0x00010000, 0x18000000, 0x00000000, 0x02000000,
88 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
89 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
90 0x00000000, 0x02000000, 0x01000000, 0x00000200,
91 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
92 0x02000000, 0x02000000, 0x00001000, 0x00000000,
93 0x07010000, 0x18000000, 0x00000000, 0x02000000,
94 0x00000000, 0x01000000, 0x07010000, 0x18000000,
95 0x00000000, 0x02000000, 0x00000000, 0x02000000,
96 0x07010000, 0x18000000, 0x00000000, 0x02000000,
97 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
98 0x00000000, 0x02000000, 0x00000000, 0xff000000,
99 0x10000000, 0x00000000, 0x00000080,
100};
101
102static inline void __iomem *
103qla27xx_isp_reg(struct scsi_qla_host *vha)
104{
105 return &vha->hw->iobase->isp24;
106}
107
108static inline void
109qla27xx_insert16(uint16_t value, void *buf, ulong *len)
110{
111 if (buf) {
112 buf += *len;
113 *(__le16 *)buf = cpu_to_le16(value);
114 }
115 *len += sizeof(value);
116}
117
118static inline void
119qla27xx_insert32(uint32_t value, void *buf, ulong *len)
120{
121 if (buf) {
122 buf += *len;
123 *(__le32 *)buf = cpu_to_le32(value);
124 }
125 *len += sizeof(value);
126}
127
128static inline void
129qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
130{
131 ulong cnt = size;
132
133 if (buf && mem) {
134 buf += *len;
135 while (cnt >= sizeof(uint32_t)) {
136 *(__le32 *)buf = cpu_to_le32p(mem);
137 buf += sizeof(uint32_t);
138 mem += sizeof(uint32_t);
139 cnt -= sizeof(uint32_t);
140 }
141 if (cnt)
142 memcpy(buf, mem, cnt);
143 }
144 *len += size;
145}
146
147static inline void
148qla27xx_read8(void *window, void *buf, ulong *len)
149{
150 uint8_t value = ~0;
151
152 if (buf) {
153 value = RD_REG_BYTE((__iomem void *)window);
154 ql_dbg(ql_dbg_misc, NULL, 0xd011,
155 "%s: -> %x\n", __func__, value);
156 }
157 qla27xx_insert32(value, buf, len);
158}
159
160static inline void
161qla27xx_read16(void *window, void *buf, ulong *len)
162{
163 uint16_t value = ~0;
164
165 if (buf) {
166 value = RD_REG_WORD((__iomem void *)window);
167 ql_dbg(ql_dbg_misc, NULL, 0xd012,
168 "%s: -> %x\n", __func__, value);
169 }
170 qla27xx_insert32(value, buf, len);
171}
172
173static inline void
174qla27xx_read32(void *window, void *buf, ulong *len)
175{
176 uint32_t value = ~0;
177
178 if (buf) {
179 value = RD_REG_DWORD((__iomem void *)window);
180 ql_dbg(ql_dbg_misc, NULL, 0xd013,
181 "%s: -> %x\n", __func__, value);
182 }
183 qla27xx_insert32(value, buf, len);
184}
185
186static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
187{
188 return
189 (width == 1) ? qla27xx_read8 :
190 (width == 2) ? qla27xx_read16 :
191 qla27xx_read32;
192}
193
194static inline void
195qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
196 uint offset, void *buf, ulong *len)
197{
198 void *window = (void *)reg + offset;
199
200 if (buf) {
201 ql_dbg(ql_dbg_misc, NULL, 0xd014,
202 "%s: @%x\n", __func__, offset);
203 }
204 qla27xx_insert32(offset, buf, len);
205 qla27xx_read32(window, buf, len);
206}
207
208static inline void
209qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
210 uint offset, uint32_t data, void *buf)
211{
212 __iomem void *window = reg + offset;
213
214 if (buf) {
215 ql_dbg(ql_dbg_misc, NULL, 0xd015,
216 "%s: @%x <- %x\n", __func__, offset, data);
217 WRT_REG_DWORD(window, data);
218 }
219}
220
221static inline void
222qla27xx_read_window(__iomem struct device_reg_24xx *reg,
223 uint32_t base, uint offset, uint count, uint width, void *buf,
224 ulong *len)
225{
226 void *window = (void *)reg + offset;
227 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
228
229 if (buf) {
230 ql_dbg(ql_dbg_misc, NULL, 0xd016,
231 "%s: base=%x offset=%x count=%x width=%x\n",
232 __func__, base, offset, count, width);
233 }
234 qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
235 while (count--) {
236 qla27xx_insert32(base, buf, len);
237 readn(window, buf, len);
238 window += width;
239 base += width;
240 }
241}
242
243static inline void
244qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
245{
246 if (buf)
247 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
248}
249
250static int
251qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
252 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
253{
254 ql_dbg(ql_dbg_misc, vha, 0xd100,
255 "%s: nop [%lx]\n", __func__, *len);
256 qla27xx_skip_entry(ent, buf);
257
258 return false;
259}
260
261static int
262qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
263 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
264{
265 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
266 "%s: end [%lx]\n", __func__, *len);
267 qla27xx_skip_entry(ent, buf);
268
269 /* terminate */
270 return true;
271}
272
273static int
274qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
275 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
276{
277 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
278
279 ql_dbg(ql_dbg_misc, vha, 0xd200,
280 "%s: rdio t1 [%lx]\n", __func__, *len);
281 qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
282 ent->t256.reg_count, ent->t256.reg_width, buf, len);
283
284 return false;
285}
286
287static int
288qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
289 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
290{
291 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
292
293 ql_dbg(ql_dbg_misc, vha, 0xd201,
294 "%s: wrio t1 [%lx]\n", __func__, *len);
295 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
296 qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
297
298 return false;
299}
300
301static int
302qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
303 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
304{
305 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
306
307 ql_dbg(ql_dbg_misc, vha, 0xd202,
308 "%s: rdio t2 [%lx]\n", __func__, *len);
309 qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
310 qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
311 ent->t258.reg_count, ent->t258.reg_width, buf, len);
312
313 return false;
314}
315
316static int
317qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
318 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
319{
320 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
321
322 ql_dbg(ql_dbg_misc, vha, 0xd203,
323 "%s: wrio t2 [%lx]\n", __func__, *len);
324 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
325 qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
326 qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
327
328 return false;
329}
330
331static int
332qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
333 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
334{
335 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
336
337 ql_dbg(ql_dbg_misc, vha, 0xd204,
338 "%s: rdpci [%lx]\n", __func__, *len);
339 qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
340
341 return false;
342}
343
344static int
345qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
346 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
347{
348 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
349
350 ql_dbg(ql_dbg_misc, vha, 0xd205,
351 "%s: wrpci [%lx]\n", __func__, *len);
352 qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
353
354 return false;
355}
356
357static int
358qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
359 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
360{
361 ulong dwords;
362 ulong start;
363 ulong end;
364
365 ql_dbg(ql_dbg_misc, vha, 0xd206,
366 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
367 start = ent->t262.start_addr;
368 end = ent->t262.end_addr;
369
370 if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
371 ;
372 } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
373 end = vha->hw->fw_memory_size;
374 if (buf)
375 ent->t262.end_addr = end;
376 } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
377 start = vha->hw->fw_shared_ram_start;
378 end = vha->hw->fw_shared_ram_end;
379 if (buf) {
380 ent->t262.start_addr = start;
381 ent->t262.end_addr = end;
382 }
383 } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
384 ql_dbg(ql_dbg_misc, vha, 0xd021,
385 "%s: unsupported ddr ram\n", __func__);
386 qla27xx_skip_entry(ent, buf);
387 goto done;
388 } else {
389 ql_dbg(ql_dbg_misc, vha, 0xd022,
390 "%s: unknown area %u\n", __func__, ent->t262.ram_area);
391 qla27xx_skip_entry(ent, buf);
392 goto done;
393 }
394
395 if (end < start) {
396 ql_dbg(ql_dbg_misc, vha, 0xd023,
397 "%s: bad range (start=%x end=%x)\n", __func__,
398 ent->t262.end_addr, ent->t262.start_addr);
399 qla27xx_skip_entry(ent, buf);
400 goto done;
401 }
402
403 dwords = end - start + 1;
404 if (buf) {
405 ql_dbg(ql_dbg_misc, vha, 0xd024,
406 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
407 buf += *len;
408 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
409 }
410 *len += dwords * sizeof(uint32_t);
411done:
412 return false;
413}
414
415static int
416qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
417 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
418{
419 uint count = 0;
420 uint i;
421 uint length;
422
423 ql_dbg(ql_dbg_misc, vha, 0xd207,
424 "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
425 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
426 for (i = 0; i < vha->hw->max_req_queues; i++) {
427 struct req_que *req = vha->hw->req_q_map[i];
428 if (req || !buf) {
429 length = req ?
430 req->length : REQUEST_ENTRY_CNT_24XX;
431 qla27xx_insert16(i, buf, len);
432 qla27xx_insert16(length, buf, len);
433 qla27xx_insertbuf(req ? req->ring : NULL,
434 length * sizeof(*req->ring), buf, len);
435 count++;
436 }
437 }
438 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
439 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
440 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
441 if (rsp || !buf) {
442 length = rsp ?
443 rsp->length : RESPONSE_ENTRY_CNT_MQ;
444 qla27xx_insert16(i, buf, len);
445 qla27xx_insert16(length, buf, len);
446 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
447 length * sizeof(*rsp->ring), buf, len);
448 count++;
449 }
450 }
451 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
452 ql_dbg(ql_dbg_misc, vha, 0xd025,
453 "%s: unsupported atio queue\n", __func__);
454 qla27xx_skip_entry(ent, buf);
455 goto done;
456 } else {
457 ql_dbg(ql_dbg_misc, vha, 0xd026,
458 "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
459 qla27xx_skip_entry(ent, buf);
460 goto done;
461 }
462
463 if (buf)
464 ent->t263.num_queues = count;
465done:
466 return false;
467}
468
469static int
470qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
471 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
472{
473 ql_dbg(ql_dbg_misc, vha, 0xd208,
474 "%s: getfce [%lx]\n", __func__, *len);
475 if (vha->hw->fce) {
476 if (buf) {
477 ent->t264.fce_trace_size = FCE_SIZE;
478 ent->t264.write_pointer = vha->hw->fce_wr;
479 ent->t264.base_pointer = vha->hw->fce_dma;
480 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
481 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
482 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
483 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
484 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
485 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
486 }
487 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
488 } else {
489 ql_dbg(ql_dbg_misc, vha, 0xd027,
490 "%s: missing fce\n", __func__);
491 qla27xx_skip_entry(ent, buf);
492 }
493
494 return false;
495}
496
497static int
498qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
499 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
500{
501 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
502
503 ql_dbg(ql_dbg_misc, vha, 0xd209,
504 "%s: pause risc [%lx]\n", __func__, *len);
505 if (buf)
506 qla24xx_pause_risc(reg);
507
508 return false;
509}
510
511static int
512qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
513 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
514{
515 ql_dbg(ql_dbg_misc, vha, 0xd20a,
516 "%s: reset risc [%lx]\n", __func__, *len);
517 if (buf)
518 qla24xx_soft_reset(vha->hw);
519
520 return false;
521}
522
523static int
524qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
525 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
526{
527 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
528
529 ql_dbg(ql_dbg_misc, vha, 0xd20b,
530 "%s: dis intr [%lx]\n", __func__, *len);
531 qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
532
533 return false;
534}
535
536static int
537qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
538 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
539{
540 ql_dbg(ql_dbg_misc, vha, 0xd20c,
541 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
542 if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
543 if (vha->hw->eft) {
544 if (buf) {
545 ent->t268.buf_size = EFT_SIZE;
546 ent->t268.start_addr = vha->hw->eft_dma;
547 }
548 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
549 } else {
550 ql_dbg(ql_dbg_misc, vha, 0xd028,
551 "%s: missing eft\n", __func__);
552 qla27xx_skip_entry(ent, buf);
553 }
554 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
555 ql_dbg(ql_dbg_misc, vha, 0xd029,
556 "%s: unsupported exchange offload buffer\n", __func__);
557 qla27xx_skip_entry(ent, buf);
558 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
559 ql_dbg(ql_dbg_misc, vha, 0xd02a,
560 "%s: unsupported extended login buffer\n", __func__);
561 qla27xx_skip_entry(ent, buf);
562 } else {
563 ql_dbg(ql_dbg_misc, vha, 0xd02b,
564 "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
565 qla27xx_skip_entry(ent, buf);
566 }
567
568 return false;
569}
570
571static int
572qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
573 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
574{
575 ql_dbg(ql_dbg_misc, vha, 0xd20d,
576 "%s: scratch [%lx]\n", __func__, *len);
577 qla27xx_insert32(0xaaaaaaaa, buf, len);
578 qla27xx_insert32(0xbbbbbbbb, buf, len);
579 qla27xx_insert32(0xcccccccc, buf, len);
580 qla27xx_insert32(0xdddddddd, buf, len);
581 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
582 if (buf)
583 ent->t269.scratch_size = 5 * sizeof(uint32_t);
584
585 return false;
586}
587
588static int
589qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
590 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
591{
592 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
593 void *window = (void *)reg + 0xc4;
594 ulong dwords = ent->t270.count;
595 ulong addr = ent->t270.addr;
596
597 ql_dbg(ql_dbg_misc, vha, 0xd20e,
598 "%s: rdremreg [%lx]\n", __func__, *len);
599 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
600 while (dwords--) {
601 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
602 qla27xx_read_reg(reg, 0xc4, buf, len);
603 qla27xx_insert32(addr, buf, len);
604 qla27xx_read32(window, buf, len);
605 addr++;
606 }
607
608 return false;
609}
610
611static int
612qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
613 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
614{
615 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
616 ulong addr = ent->t271.addr;
617
618 ql_dbg(ql_dbg_misc, vha, 0xd20f,
619 "%s: wrremreg [%lx]\n", __func__, *len);
620 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
621 qla27xx_read_reg(reg, 0xc4, buf, len);
622 qla27xx_insert32(addr, buf, len);
623 qla27xx_write_reg(reg, 0xc0, addr, buf);
624
625 return false;
626}
627
628static int
629qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
630 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
631{
632 ulong dwords = ent->t272.count;
633 ulong start = ent->t272.addr;
634
635 ql_dbg(ql_dbg_misc, vha, 0xd210,
636 "%s: rdremram [%lx]\n", __func__, *len);
637 if (buf) {
638 ql_dbg(ql_dbg_misc, vha, 0xd02c,
639 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
640 buf += *len;
641 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
642 }
643 *len += dwords * sizeof(uint32_t);
644
645 return false;
646}
647
648static int
649qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
650 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
651{
652 ulong dwords = ent->t273.count;
653 ulong addr = ent->t273.addr;
654 uint32_t value;
655
656 ql_dbg(ql_dbg_misc, vha, 0xd211,
657 "%s: pcicfg [%lx]\n", __func__, *len);
658 while (dwords--) {
659 value = ~0;
660 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
661 ql_dbg(ql_dbg_misc, vha, 0xd02d,
662 "%s: failed pcicfg read at %lx\n", __func__, addr);
663 qla27xx_insert32(addr, buf, len);
664 qla27xx_insert32(value, buf, len);
665 addr += 4;
666 }
667
668 return false;
669}
670
671static int
672qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
673 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
674{
675 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
676 "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
677 qla27xx_skip_entry(ent, buf);
678
679 return false;
680}
681
682struct qla27xx_fwdt_entry_call {
683 int type;
684 int (*call)(
685 struct scsi_qla_host *,
686 struct qla27xx_fwdt_entry *,
687 void *,
688 ulong *);
689};
690
691static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
692 { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
693 { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
694 { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
695 { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
696 { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
697 { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
698 { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
699 { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
700 { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
701 { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
702 { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
703 { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
704 { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
705 { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
706 { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
707 { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
708 { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
709 { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
710 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
711 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
712 { -1 , qla27xx_fwdt_entry_other }
713};
714
715static inline int (*qla27xx_find_entry(int type))
716 (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
717{
718 struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
719
720 while (list->type != -1 && list->type != type)
721 list++;
722
723 return list->call;
724}
725
726static inline void *
727qla27xx_next_entry(void *p)
728{
729 struct qla27xx_fwdt_entry *ent = p;
730
731 return p + ent->hdr.entry_size;
732}
733
734static void
735qla27xx_walk_template(struct scsi_qla_host *vha,
736 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
737{
738 struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
739 ulong count = tmp->entry_count;
740
741 ql_dbg(ql_dbg_misc, vha, 0xd01a,
742 "%s: entry count %lx\n", __func__, count);
743 while (count--) {
744 if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
745 break;
746 ent = qla27xx_next_entry(ent);
747 }
748 ql_dbg(ql_dbg_misc, vha, 0xd01b,
749 "%s: len=%lx\n", __func__, *len);
750}
751
752static void
753qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
754{
755 tmp->capture_timestamp = jiffies;
756}
757
758static void
759qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
760{
761 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
762 int rval = 0;
763
764 rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
765 v+0, v+1, v+2, v+3, v+4, v+5);
766
767 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
768 tmp->driver_info[1] = v[5] << 8 | v[4];
769 tmp->driver_info[2] = 0x12345678;
770}
771
772static void
773qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
774 struct scsi_qla_host *vha)
775{
776 tmp->firmware_version[0] = vha->hw->fw_major_version;
777 tmp->firmware_version[1] = vha->hw->fw_minor_version;
778 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
779 tmp->firmware_version[3] =
780 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
781 tmp->firmware_version[4] =
782 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
783}
784
785static void
786ql27xx_edit_template(struct scsi_qla_host *vha,
787 struct qla27xx_fwdt_template *tmp)
788{
789 qla27xx_time_stamp(tmp);
790 qla27xx_driver_info(tmp);
791 qla27xx_firmware_info(tmp, vha);
792}
793
794static inline uint32_t
795qla27xx_template_checksum(void *p, ulong size)
796{
797 uint32_t *buf = p;
798 uint64_t sum = 0;
799
800 size /= sizeof(*buf);
801
802 while (size--)
803 sum += *buf++;
804
805 sum = (sum & 0xffffffff) + (sum >> 32);
806
807 return ~sum;
808}
809
810static inline int
811qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
812{
813 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
814}
815
816static inline int
817qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
818{
819 return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
820}
821
822static void
823qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
824{
825 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
826 ulong len;
827
828 if (qla27xx_fwdt_template_valid(tmp)) {
829 len = tmp->template_size;
830 tmp = memcpy(vha->hw->fw_dump, tmp, len);
831 ql27xx_edit_template(vha, tmp);
832 qla27xx_walk_template(vha, tmp, tmp, &len);
833 vha->hw->fw_dump_len = len;
834 vha->hw->fw_dumped = 1;
835 }
836}
837
838ulong
839qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
840{
841 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
842 ulong len = 0;
843
844 if (qla27xx_fwdt_template_valid(tmp)) {
845 len = tmp->template_size;
846 qla27xx_walk_template(vha, tmp, NULL, &len);
847 }
848
849 return len;
850}
851
852ulong
853qla27xx_fwdt_template_size(void *p)
854{
855 struct qla27xx_fwdt_template *tmp = p;
856
857 return tmp->template_size;
858}
859
860ulong
861qla27xx_fwdt_template_default_size(void)
862{
863 return sizeof(ql27xx_fwdt_default_template);
864}
865
866const void *
867qla27xx_fwdt_template_default(void)
868{
869 return ql27xx_fwdt_default_template;
870}
871
872int
873qla27xx_fwdt_template_valid(void *p)
874{
875 struct qla27xx_fwdt_template *tmp = p;
876
877 if (!qla27xx_verify_template_header(tmp)) {
878 ql_log(ql_log_warn, NULL, 0xd01c,
879 "%s: template type %x\n", __func__, tmp->template_type);
880 return false;
881 }
882
883 if (!qla27xx_verify_template_checksum(tmp)) {
884 ql_log(ql_log_warn, NULL, 0xd01d,
885 "%s: failed template checksum\n", __func__);
886 return false;
887 }
888
889 return true;
890}
891
892void
893qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
894{
895 ulong flags = 0;
896
897 if (!hardware_locked)
898 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
899
900 if (!vha->hw->fw_dump)
901 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
902 else if (!vha->hw->fw_dump_template)
903 ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
904 else
905 qla27xx_execute_fwdt_template(vha);
906
907 if (!hardware_locked)
908 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
909}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
new file mode 100644
index 000000000000..c9d2fff4d964
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -0,0 +1,205 @@
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8#ifndef __QLA_DMP27_H__
9#define __QLA_DMP27_H__
10
11#define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr)
12
13struct __packed qla27xx_fwdt_template {
14 uint32_t template_type;
15 uint32_t entry_offset;
16 uint32_t template_size;
17 uint32_t reserved_1;
18
19 uint32_t entry_count;
20 uint32_t template_version;
21 uint32_t capture_timestamp;
22 uint32_t template_checksum;
23
24 uint32_t reserved_2;
25 uint32_t driver_info[3];
26
27 uint32_t saved_state[16];
28
29 uint32_t reserved_3[8];
30 uint32_t firmware_version[5];
31};
32
33#define TEMPLATE_TYPE_FWDUMP 99
34
35#define ENTRY_TYPE_NOP 0
36#define ENTRY_TYPE_TMP_END 255
37#define ENTRY_TYPE_RD_IOB_T1 256
38#define ENTRY_TYPE_WR_IOB_T1 257
39#define ENTRY_TYPE_RD_IOB_T2 258
40#define ENTRY_TYPE_WR_IOB_T2 259
41#define ENTRY_TYPE_RD_PCI 260
42#define ENTRY_TYPE_WR_PCI 261
43#define ENTRY_TYPE_RD_RAM 262
44#define ENTRY_TYPE_GET_QUEUE 263
45#define ENTRY_TYPE_GET_FCE 264
46#define ENTRY_TYPE_PSE_RISC 265
47#define ENTRY_TYPE_RST_RISC 266
48#define ENTRY_TYPE_DIS_INTR 267
49#define ENTRY_TYPE_GET_HBUF 268
50#define ENTRY_TYPE_SCRATCH 269
51#define ENTRY_TYPE_RDREMREG 270
52#define ENTRY_TYPE_WRREMREG 271
53#define ENTRY_TYPE_RDREMRAM 272
54#define ENTRY_TYPE_PCICFG 273
55
56#define CAPTURE_FLAG_PHYS_ONLY BIT_0
57#define CAPTURE_FLAG_PHYS_VIRT BIT_1
58
59#define DRIVER_FLAG_SKIP_ENTRY BIT_7
60
61struct __packed qla27xx_fwdt_entry {
62 struct __packed {
63 uint32_t entry_type;
64 uint32_t entry_size;
65 uint32_t reserved_1;
66
67 uint8_t capture_flags;
68 uint8_t reserved_2[2];
69 uint8_t driver_flags;
70 } hdr;
71 union __packed {
72 struct __packed {
73 } t0;
74
75 struct __packed {
76 } t255;
77
78 struct __packed {
79 uint32_t base_addr;
80 uint8_t reg_width;
81 uint16_t reg_count;
82 uint8_t pci_offset;
83 } t256;
84
85 struct __packed {
86 uint32_t base_addr;
87 uint32_t write_data;
88 uint8_t pci_offset;
89 uint8_t reserved[3];
90 } t257;
91
92 struct __packed {
93 uint32_t base_addr;
94 uint8_t reg_width;
95 uint16_t reg_count;
96 uint8_t pci_offset;
97 uint8_t banksel_offset;
98 uint8_t reserved[3];
99 uint32_t bank;
100 } t258;
101
102 struct __packed {
103 uint32_t base_addr;
104 uint32_t write_data;
105 uint8_t reserved[2];
106 uint8_t pci_offset;
107 uint8_t banksel_offset;
108 uint32_t bank;
109 } t259;
110
111 struct __packed {
112 uint8_t pci_addr;
113 uint8_t reserved[3];
114 } t260;
115
116 struct __packed {
117 uint8_t pci_addr;
118 uint8_t reserved[3];
119 uint32_t write_data;
120 } t261;
121
122 struct __packed {
123 uint8_t ram_area;
124 uint8_t reserved[3];
125 uint32_t start_addr;
126 uint32_t end_addr;
127 } t262;
128
129 struct __packed {
130 uint32_t num_queues;
131 uint8_t queue_type;
132 uint8_t reserved[3];
133 } t263;
134
135 struct __packed {
136 uint32_t fce_trace_size;
137 uint64_t write_pointer;
138 uint64_t base_pointer;
139 uint32_t fce_enable_mb0;
140 uint32_t fce_enable_mb2;
141 uint32_t fce_enable_mb3;
142 uint32_t fce_enable_mb4;
143 uint32_t fce_enable_mb5;
144 uint32_t fce_enable_mb6;
145 } t264;
146
147 struct __packed {
148 } t265;
149
150 struct __packed {
151 } t266;
152
153 struct __packed {
154 uint8_t pci_offset;
155 uint8_t reserved[3];
156 uint32_t data;
157 } t267;
158
159 struct __packed {
160 uint8_t buf_type;
161 uint8_t reserved[3];
162 uint32_t buf_size;
163 uint64_t start_addr;
164 } t268;
165
166 struct __packed {
167 uint32_t scratch_size;
168 } t269;
169
170 struct __packed {
171 uint32_t addr;
172 uint32_t count;
173 } t270;
174
175 struct __packed {
176 uint32_t addr;
177 uint32_t data;
178 } t271;
179
180 struct __packed {
181 uint32_t addr;
182 uint32_t count;
183 } t272;
184
185 struct __packed {
186 uint32_t addr;
187 uint32_t count;
188 } t273;
189 };
190};
191
192#define T262_RAM_AREA_CRITICAL_RAM 1
193#define T262_RAM_AREA_EXTERNAL_RAM 2
194#define T262_RAM_AREA_SHARED_RAM 3
195#define T262_RAM_AREA_DDR_RAM 4
196
197#define T263_QUEUE_TYPE_REQ 1
198#define T263_QUEUE_TYPE_RSP 2
199#define T263_QUEUE_TYPE_ATIO 3
200
201#define T268_BUF_TYPE_EXTD_TRACE 1
202#define T268_BUF_TYPE_EXCH_BUFOFF 2
203#define T268_BUF_TYPE_EXTD_LOGIN 3
204
205#endif
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 31d19535b015..e36b94712544 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.06.00.12-k" 10#define QLA2XXX_VERSION "8.07.00.02-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 6 13#define QLA_DRIVER_MINOR_VER 7
14#define QLA_DRIVER_PATCH_VER 0 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0