aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c328
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h584
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c481
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1251
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h45
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c358
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c840
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c886
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c516
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c1471
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c554
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
17 files changed, 4427 insertions, 3093 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ed731968f15f..cd53627cc761 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -19,8 +19,9 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
19 struct bin_attribute *bin_attr, 19 struct bin_attribute *bin_attr,
20 char *buf, loff_t off, size_t count) 20 char *buf, loff_t off, size_t count)
21{ 21{
22 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 22 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
23 struct device, kobj))); 23 struct device, kobj)));
24 struct qla_hw_data *ha = vha->hw;
24 25
25 if (ha->fw_dump_reading == 0) 26 if (ha->fw_dump_reading == 0)
26 return 0; 27 return 0;
@@ -34,8 +35,9 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
34 struct bin_attribute *bin_attr, 35 struct bin_attribute *bin_attr,
35 char *buf, loff_t off, size_t count) 36 char *buf, loff_t off, size_t count)
36{ 37{
37 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 38 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
38 struct device, kobj))); 39 struct device, kobj)));
40 struct qla_hw_data *ha = vha->hw;
39 int reading; 41 int reading;
40 42
41 if (off != 0) 43 if (off != 0)
@@ -48,7 +50,7 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
48 break; 50 break;
49 51
50 qla_printk(KERN_INFO, ha, 52 qla_printk(KERN_INFO, ha,
51 "Firmware dump cleared on (%ld).\n", ha->host_no); 53 "Firmware dump cleared on (%ld).\n", vha->host_no);
52 54
53 ha->fw_dump_reading = 0; 55 ha->fw_dump_reading = 0;
54 ha->fw_dumped = 0; 56 ha->fw_dumped = 0;
@@ -59,14 +61,14 @@ qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
59 61
60 qla_printk(KERN_INFO, ha, 62 qla_printk(KERN_INFO, ha,
61 "Raw firmware dump ready for read on (%ld).\n", 63 "Raw firmware dump ready for read on (%ld).\n",
62 ha->host_no); 64 vha->host_no);
63 } 65 }
64 break; 66 break;
65 case 2: 67 case 2:
66 qla2x00_alloc_fw_dump(ha); 68 qla2x00_alloc_fw_dump(vha);
67 break; 69 break;
68 case 3: 70 case 3:
69 qla2x00_system_error(ha); 71 qla2x00_system_error(vha);
70 break; 72 break;
71 } 73 }
72 return (count); 74 return (count);
@@ -87,8 +89,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
87 struct bin_attribute *bin_attr, 89 struct bin_attribute *bin_attr,
88 char *buf, loff_t off, size_t count) 90 char *buf, loff_t off, size_t count)
89{ 91{
90 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 92 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
91 struct device, kobj))); 93 struct device, kobj)));
94 struct qla_hw_data *ha = vha->hw;
92 95
93 if (!capable(CAP_SYS_ADMIN)) 96 if (!capable(CAP_SYS_ADMIN))
94 return 0; 97 return 0;
@@ -103,8 +106,9 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
103 struct bin_attribute *bin_attr, 106 struct bin_attribute *bin_attr,
104 char *buf, loff_t off, size_t count) 107 char *buf, loff_t off, size_t count)
105{ 108{
106 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 109 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
107 struct device, kobj))); 110 struct device, kobj)));
111 struct qla_hw_data *ha = vha->hw;
108 uint16_t cnt; 112 uint16_t cnt;
109 113
110 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size) 114 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
@@ -134,11 +138,11 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
134 } 138 }
135 139
136 /* Write NVRAM. */ 140 /* Write NVRAM. */
137 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->nvram_base, count); 141 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
138 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->nvram, ha->nvram_base, 142 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
139 count); 143 count);
140 144
141 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 145 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
142 146
143 return (count); 147 return (count);
144} 148}
@@ -158,8 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
158 struct bin_attribute *bin_attr, 162 struct bin_attribute *bin_attr,
159 char *buf, loff_t off, size_t count) 163 char *buf, loff_t off, size_t count)
160{ 164{
161 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 165 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
162 struct device, kobj))); 166 struct device, kobj)));
167 struct qla_hw_data *ha = vha->hw;
163 168
164 if (ha->optrom_state != QLA_SREADING) 169 if (ha->optrom_state != QLA_SREADING)
165 return 0; 170 return 0;
@@ -173,8 +178,9 @@ qla2x00_sysfs_write_optrom(struct kobject *kobj,
173 struct bin_attribute *bin_attr, 178 struct bin_attribute *bin_attr,
174 char *buf, loff_t off, size_t count) 179 char *buf, loff_t off, size_t count)
175{ 180{
176 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 181 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
177 struct device, kobj))); 182 struct device, kobj)));
183 struct qla_hw_data *ha = vha->hw;
178 184
179 if (ha->optrom_state != QLA_SWRITING) 185 if (ha->optrom_state != QLA_SWRITING)
180 return -EINVAL; 186 return -EINVAL;
@@ -203,8 +209,10 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
203 struct bin_attribute *bin_attr, 209 struct bin_attribute *bin_attr,
204 char *buf, loff_t off, size_t count) 210 char *buf, loff_t off, size_t count)
205{ 211{
206 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 212 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
207 struct device, kobj))); 213 struct device, kobj)));
214 struct qla_hw_data *ha = vha->hw;
215
208 uint32_t start = 0; 216 uint32_t start = 0;
209 uint32_t size = ha->optrom_size; 217 uint32_t size = ha->optrom_size;
210 int val, valid; 218 int val, valid;
@@ -262,7 +270,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
262 ha->optrom_region_start, ha->optrom_region_size)); 270 ha->optrom_region_start, ha->optrom_region_size));
263 271
264 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 272 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
265 ha->isp_ops->read_optrom(ha, ha->optrom_buffer, 273 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
266 ha->optrom_region_start, ha->optrom_region_size); 274 ha->optrom_region_start, ha->optrom_region_size);
267 break; 275 break;
268 case 2: 276 case 2:
@@ -333,7 +341,7 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
333 "Writing flash region -- 0x%x/0x%x.\n", 341 "Writing flash region -- 0x%x/0x%x.\n",
334 ha->optrom_region_start, ha->optrom_region_size)); 342 ha->optrom_region_start, ha->optrom_region_size));
335 343
336 ha->isp_ops->write_optrom(ha, ha->optrom_buffer, 344 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
337 ha->optrom_region_start, ha->optrom_region_size); 345 ha->optrom_region_start, ha->optrom_region_size);
338 break; 346 break;
339 default: 347 default:
@@ -356,8 +364,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
356 struct bin_attribute *bin_attr, 364 struct bin_attribute *bin_attr,
357 char *buf, loff_t off, size_t count) 365 char *buf, loff_t off, size_t count)
358{ 366{
359 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 367 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
360 struct device, kobj))); 368 struct device, kobj)));
369 struct qla_hw_data *ha = vha->hw;
361 370
362 if (!capable(CAP_SYS_ADMIN)) 371 if (!capable(CAP_SYS_ADMIN))
363 return 0; 372 return 0;
@@ -371,15 +380,16 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
371 struct bin_attribute *bin_attr, 380 struct bin_attribute *bin_attr,
372 char *buf, loff_t off, size_t count) 381 char *buf, loff_t off, size_t count)
373{ 382{
374 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 383 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
375 struct device, kobj))); 384 struct device, kobj)));
385 struct qla_hw_data *ha = vha->hw;
376 386
377 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size) 387 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
378 return 0; 388 return 0;
379 389
380 /* Write NVRAM. */ 390 /* Write NVRAM. */
381 ha->isp_ops->write_nvram(ha, (uint8_t *)buf, ha->vpd_base, count); 391 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
382 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, ha->vpd_base, count); 392 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
383 393
384 return count; 394 return count;
385} 395}
@@ -399,8 +409,9 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
399 struct bin_attribute *bin_attr, 409 struct bin_attribute *bin_attr,
400 char *buf, loff_t off, size_t count) 410 char *buf, loff_t off, size_t count)
401{ 411{
402 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 412 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
403 struct device, kobj))); 413 struct device, kobj)));
414 struct qla_hw_data *ha = vha->hw;
404 uint16_t iter, addr, offset; 415 uint16_t iter, addr, offset;
405 int rval; 416 int rval;
406 417
@@ -429,7 +440,7 @@ do_read:
429 offset = 0; 440 offset = 0;
430 } 441 }
431 442
432 rval = qla2x00_read_sfp(ha, ha->sfp_data_dma, addr, offset, 443 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
433 SFP_BLOCK_SIZE); 444 SFP_BLOCK_SIZE);
434 if (rval != QLA_SUCCESS) { 445 if (rval != QLA_SUCCESS) {
435 qla_printk(KERN_WARNING, ha, 446 qla_printk(KERN_WARNING, ha,
@@ -469,30 +480,31 @@ static struct sysfs_entry {
469}; 480};
470 481
471void 482void
472qla2x00_alloc_sysfs_attr(scsi_qla_host_t *ha) 483qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
473{ 484{
474 struct Scsi_Host *host = ha->host; 485 struct Scsi_Host *host = vha->host;
475 struct sysfs_entry *iter; 486 struct sysfs_entry *iter;
476 int ret; 487 int ret;
477 488
478 for (iter = bin_file_entries; iter->name; iter++) { 489 for (iter = bin_file_entries; iter->name; iter++) {
479 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 490 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
480 continue; 491 continue;
481 492
482 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 493 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
483 iter->attr); 494 iter->attr);
484 if (ret) 495 if (ret)
485 qla_printk(KERN_INFO, ha, 496 qla_printk(KERN_INFO, vha->hw,
486 "Unable to create sysfs %s binary attribute " 497 "Unable to create sysfs %s binary attribute "
487 "(%d).\n", iter->name, ret); 498 "(%d).\n", iter->name, ret);
488 } 499 }
489} 500}
490 501
491void 502void
492qla2x00_free_sysfs_attr(scsi_qla_host_t *ha) 503qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
493{ 504{
494 struct Scsi_Host *host = ha->host; 505 struct Scsi_Host *host = vha->host;
495 struct sysfs_entry *iter; 506 struct sysfs_entry *iter;
507 struct qla_hw_data *ha = vha->hw;
496 508
497 for (iter = bin_file_entries; iter->name; iter++) { 509 for (iter = bin_file_entries; iter->name; iter++) {
498 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) 510 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
@@ -503,7 +515,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *ha)
503 } 515 }
504 516
505 if (ha->beacon_blink_led == 1) 517 if (ha->beacon_blink_led == 1)
506 ha->isp_ops->beacon_off(ha); 518 ha->isp_ops->beacon_off(vha);
507} 519}
508 520
509/* Scsi_Host attributes. */ 521/* Scsi_Host attributes. */
@@ -519,22 +531,24 @@ static ssize_t
519qla2x00_fw_version_show(struct device *dev, 531qla2x00_fw_version_show(struct device *dev,
520 struct device_attribute *attr, char *buf) 532 struct device_attribute *attr, char *buf)
521{ 533{
522 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 534 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
523 char fw_str[30]; 535 struct qla_hw_data *ha = vha->hw;
536 char fw_str[128];
524 537
525 return snprintf(buf, PAGE_SIZE, "%s\n", 538 return snprintf(buf, PAGE_SIZE, "%s\n",
526 ha->isp_ops->fw_version_str(ha, fw_str)); 539 ha->isp_ops->fw_version_str(vha, fw_str));
527} 540}
528 541
529static ssize_t 542static ssize_t
530qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr, 543qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
531 char *buf) 544 char *buf)
532{ 545{
533 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 546 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
547 struct qla_hw_data *ha = vha->hw;
534 uint32_t sn; 548 uint32_t sn;
535 549
536 if (IS_FWI2_CAPABLE(ha)) { 550 if (IS_FWI2_CAPABLE(ha)) {
537 qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE); 551 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
538 return snprintf(buf, PAGE_SIZE, "%s\n", buf); 552 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
539 } 553 }
540 554
@@ -547,15 +561,16 @@ static ssize_t
547qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr, 561qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
548 char *buf) 562 char *buf)
549{ 563{
550 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 564 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
551 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", ha->pdev->device); 565 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
552} 566}
553 567
554static ssize_t 568static ssize_t
555qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr, 569qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
556 char *buf) 570 char *buf)
557{ 571{
558 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 572 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
573 struct qla_hw_data *ha = vha->hw;
559 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", 574 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
560 ha->product_id[0], ha->product_id[1], ha->product_id[2], 575 ha->product_id[0], ha->product_id[1], ha->product_id[2],
561 ha->product_id[3]); 576 ha->product_id[3]);
@@ -565,43 +580,44 @@ static ssize_t
565qla2x00_model_name_show(struct device *dev, struct device_attribute *attr, 580qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
566 char *buf) 581 char *buf)
567{ 582{
568 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 583 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
569 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_number); 584 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
570} 585}
571 586
572static ssize_t 587static ssize_t
573qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr, 588qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
574 char *buf) 589 char *buf)
575{ 590{
576 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 591 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
577 return snprintf(buf, PAGE_SIZE, "%s\n", 592 return snprintf(buf, PAGE_SIZE, "%s\n",
578 ha->model_desc ? ha->model_desc: ""); 593 vha->hw->model_desc ? vha->hw->model_desc : "");
579} 594}
580 595
581static ssize_t 596static ssize_t
582qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, 597qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
583 char *buf) 598 char *buf)
584{ 599{
585 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 600 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
586 char pci_info[30]; 601 char pci_info[30];
587 602
588 return snprintf(buf, PAGE_SIZE, "%s\n", 603 return snprintf(buf, PAGE_SIZE, "%s\n",
589 ha->isp_ops->pci_info_str(ha, pci_info)); 604 vha->hw->isp_ops->pci_info_str(vha, pci_info));
590} 605}
591 606
592static ssize_t 607static ssize_t
593qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, 608qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
594 char *buf) 609 char *buf)
595{ 610{
596 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 611 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
612 struct qla_hw_data *ha = vha->hw;
597 int len = 0; 613 int len = 0;
598 614
599 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 615 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
600 atomic_read(&ha->loop_state) == LOOP_DEAD) 616 atomic_read(&vha->loop_state) == LOOP_DEAD)
601 len = snprintf(buf, PAGE_SIZE, "Link Down\n"); 617 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
602 else if (atomic_read(&ha->loop_state) != LOOP_READY || 618 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
603 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || 619 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
604 test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) 620 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
605 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); 621 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
606 else { 622 else {
607 len = snprintf(buf, PAGE_SIZE, "Link Up - "); 623 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -632,10 +648,10 @@ static ssize_t
632qla2x00_zio_show(struct device *dev, struct device_attribute *attr, 648qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
633 char *buf) 649 char *buf)
634{ 650{
635 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 651 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
636 int len = 0; 652 int len = 0;
637 653
638 switch (ha->zio_mode) { 654 switch (vha->hw->zio_mode) {
639 case QLA_ZIO_MODE_6: 655 case QLA_ZIO_MODE_6:
640 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); 656 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
641 break; 657 break;
@@ -650,7 +666,8 @@ static ssize_t
650qla2x00_zio_store(struct device *dev, struct device_attribute *attr, 666qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
651 const char *buf, size_t count) 667 const char *buf, size_t count)
652{ 668{
653 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 669 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
670 struct qla_hw_data *ha = vha->hw;
654 int val = 0; 671 int val = 0;
655 uint16_t zio_mode; 672 uint16_t zio_mode;
656 673
@@ -668,7 +685,7 @@ qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
668 /* Update per-hba values and queue a reset. */ 685 /* Update per-hba values and queue a reset. */
669 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) { 686 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
670 ha->zio_mode = zio_mode; 687 ha->zio_mode = zio_mode;
671 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
672 } 689 }
673 return strlen(buf); 690 return strlen(buf);
674} 691}
@@ -677,16 +694,16 @@ static ssize_t
677qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr, 694qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
678 char *buf) 695 char *buf)
679{ 696{
680 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 697 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
681 698
682 return snprintf(buf, PAGE_SIZE, "%d us\n", ha->zio_timer * 100); 699 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
683} 700}
684 701
685static ssize_t 702static ssize_t
686qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr, 703qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
687 const char *buf, size_t count) 704 const char *buf, size_t count)
688{ 705{
689 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 706 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
690 int val = 0; 707 int val = 0;
691 uint16_t zio_timer; 708 uint16_t zio_timer;
692 709
@@ -696,7 +713,7 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
696 return -ERANGE; 713 return -ERANGE;
697 714
698 zio_timer = (uint16_t)(val / 100); 715 zio_timer = (uint16_t)(val / 100);
699 ha->zio_timer = zio_timer; 716 vha->hw->zio_timer = zio_timer;
700 717
701 return strlen(buf); 718 return strlen(buf);
702} 719}
@@ -705,10 +722,10 @@ static ssize_t
705qla2x00_beacon_show(struct device *dev, struct device_attribute *attr, 722qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
706 char *buf) 723 char *buf)
707{ 724{
708 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 725 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
709 int len = 0; 726 int len = 0;
710 727
711 if (ha->beacon_blink_led) 728 if (vha->hw->beacon_blink_led)
712 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); 729 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
713 else 730 else
714 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); 731 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
@@ -719,14 +736,15 @@ static ssize_t
719qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, 736qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
720 const char *buf, size_t count) 737 const char *buf, size_t count)
721{ 738{
722 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 739 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
740 struct qla_hw_data *ha = vha->hw;
723 int val = 0; 741 int val = 0;
724 int rval; 742 int rval;
725 743
726 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 744 if (IS_QLA2100(ha) || IS_QLA2200(ha))
727 return -EPERM; 745 return -EPERM;
728 746
729 if (test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) { 747 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
730 qla_printk(KERN_WARNING, ha, 748 qla_printk(KERN_WARNING, ha,
731 "Abort ISP active -- ignoring beacon request.\n"); 749 "Abort ISP active -- ignoring beacon request.\n");
732 return -EBUSY; 750 return -EBUSY;
@@ -736,9 +754,9 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
736 return -EINVAL; 754 return -EINVAL;
737 755
738 if (val) 756 if (val)
739 rval = ha->isp_ops->beacon_on(ha); 757 rval = ha->isp_ops->beacon_on(vha);
740 else 758 else
741 rval = ha->isp_ops->beacon_off(ha); 759 rval = ha->isp_ops->beacon_off(vha);
742 760
743 if (rval != QLA_SUCCESS) 761 if (rval != QLA_SUCCESS)
744 count = 0; 762 count = 0;
@@ -750,8 +768,8 @@ static ssize_t
750qla2x00_optrom_bios_version_show(struct device *dev, 768qla2x00_optrom_bios_version_show(struct device *dev,
751 struct device_attribute *attr, char *buf) 769 struct device_attribute *attr, char *buf)
752{ 770{
753 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 771 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
754 772 struct qla_hw_data *ha = vha->hw;
755 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], 773 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
756 ha->bios_revision[0]); 774 ha->bios_revision[0]);
757} 775}
@@ -760,8 +778,8 @@ static ssize_t
760qla2x00_optrom_efi_version_show(struct device *dev, 778qla2x00_optrom_efi_version_show(struct device *dev,
761 struct device_attribute *attr, char *buf) 779 struct device_attribute *attr, char *buf)
762{ 780{
763 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 781 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
764 782 struct qla_hw_data *ha = vha->hw;
765 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], 783 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
766 ha->efi_revision[0]); 784 ha->efi_revision[0]);
767} 785}
@@ -770,8 +788,8 @@ static ssize_t
770qla2x00_optrom_fcode_version_show(struct device *dev, 788qla2x00_optrom_fcode_version_show(struct device *dev,
771 struct device_attribute *attr, char *buf) 789 struct device_attribute *attr, char *buf)
772{ 790{
773 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 791 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
774 792 struct qla_hw_data *ha = vha->hw;
775 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], 793 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
776 ha->fcode_revision[0]); 794 ha->fcode_revision[0]);
777} 795}
@@ -780,8 +798,8 @@ static ssize_t
780qla2x00_optrom_fw_version_show(struct device *dev, 798qla2x00_optrom_fw_version_show(struct device *dev,
781 struct device_attribute *attr, char *buf) 799 struct device_attribute *attr, char *buf)
782{ 800{
783 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 801 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
784 802 struct qla_hw_data *ha = vha->hw;
785 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", 803 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
786 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], 804 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
787 ha->fw_revision[3]); 805 ha->fw_revision[3]);
@@ -791,8 +809,8 @@ static ssize_t
791qla2x00_total_isp_aborts_show(struct device *dev, 809qla2x00_total_isp_aborts_show(struct device *dev,
792 struct device_attribute *attr, char *buf) 810 struct device_attribute *attr, char *buf)
793{ 811{
794 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 812 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
795 813 struct qla_hw_data *ha = vha->hw;
796 return snprintf(buf, PAGE_SIZE, "%d\n", 814 return snprintf(buf, PAGE_SIZE, "%d\n",
797 ha->qla_stats.total_isp_aborts); 815 ha->qla_stats.total_isp_aborts);
798} 816}
@@ -848,16 +866,17 @@ struct device_attribute *qla2x00_host_attrs[] = {
848static void 866static void
849qla2x00_get_host_port_id(struct Scsi_Host *shost) 867qla2x00_get_host_port_id(struct Scsi_Host *shost)
850{ 868{
851 scsi_qla_host_t *ha = shost_priv(shost); 869 scsi_qla_host_t *vha = shost_priv(shost);
852 870
853 fc_host_port_id(shost) = ha->d_id.b.domain << 16 | 871 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
854 ha->d_id.b.area << 8 | ha->d_id.b.al_pa; 872 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
855} 873}
856 874
857static void 875static void
858qla2x00_get_host_speed(struct Scsi_Host *shost) 876qla2x00_get_host_speed(struct Scsi_Host *shost)
859{ 877{
860 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 878 struct qla_hw_data *ha = ((struct scsi_qla_host *)
879 (shost_priv(shost)))->hw;
861 u32 speed = FC_PORTSPEED_UNKNOWN; 880 u32 speed = FC_PORTSPEED_UNKNOWN;
862 881
863 switch (ha->link_data_rate) { 882 switch (ha->link_data_rate) {
@@ -880,14 +899,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
880static void 899static void
881qla2x00_get_host_port_type(struct Scsi_Host *shost) 900qla2x00_get_host_port_type(struct Scsi_Host *shost)
882{ 901{
883 scsi_qla_host_t *ha = shost_priv(shost); 902 scsi_qla_host_t *vha = shost_priv(shost);
884 uint32_t port_type = FC_PORTTYPE_UNKNOWN; 903 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
885 904
886 if (ha->parent) { 905 if (vha->vp_idx) {
887 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 906 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
888 return; 907 return;
889 } 908 }
890 switch (ha->current_topology) { 909 switch (vha->hw->current_topology) {
891 case ISP_CFG_NL: 910 case ISP_CFG_NL:
892 port_type = FC_PORTTYPE_LPORT; 911 port_type = FC_PORTTYPE_LPORT;
893 break; 912 break;
@@ -908,11 +927,11 @@ static void
908qla2x00_get_starget_node_name(struct scsi_target *starget) 927qla2x00_get_starget_node_name(struct scsi_target *starget)
909{ 928{
910 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 929 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
911 scsi_qla_host_t *ha = shost_priv(host); 930 scsi_qla_host_t *vha = shost_priv(host);
912 fc_port_t *fcport; 931 fc_port_t *fcport;
913 u64 node_name = 0; 932 u64 node_name = 0;
914 933
915 list_for_each_entry(fcport, &ha->fcports, list) { 934 list_for_each_entry(fcport, &vha->vp_fcports, list) {
916 if (fcport->rport && 935 if (fcport->rport &&
917 starget->id == fcport->rport->scsi_target_id) { 936 starget->id == fcport->rport->scsi_target_id) {
918 node_name = wwn_to_u64(fcport->node_name); 937 node_name = wwn_to_u64(fcport->node_name);
@@ -927,11 +946,11 @@ static void
927qla2x00_get_starget_port_name(struct scsi_target *starget) 946qla2x00_get_starget_port_name(struct scsi_target *starget)
928{ 947{
929 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 948 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
930 scsi_qla_host_t *ha = shost_priv(host); 949 scsi_qla_host_t *vha = shost_priv(host);
931 fc_port_t *fcport; 950 fc_port_t *fcport;
932 u64 port_name = 0; 951 u64 port_name = 0;
933 952
934 list_for_each_entry(fcport, &ha->fcports, list) { 953 list_for_each_entry(fcport, &vha->vp_fcports, list) {
935 if (fcport->rport && 954 if (fcport->rport &&
936 starget->id == fcport->rport->scsi_target_id) { 955 starget->id == fcport->rport->scsi_target_id) {
937 port_name = wwn_to_u64(fcport->port_name); 956 port_name = wwn_to_u64(fcport->port_name);
@@ -946,11 +965,11 @@ static void
946qla2x00_get_starget_port_id(struct scsi_target *starget) 965qla2x00_get_starget_port_id(struct scsi_target *starget)
947{ 966{
948 struct Scsi_Host *host = dev_to_shost(starget->dev.parent); 967 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
949 scsi_qla_host_t *ha = shost_priv(host); 968 scsi_qla_host_t *vha = shost_priv(host);
950 fc_port_t *fcport; 969 fc_port_t *fcport;
951 uint32_t port_id = ~0U; 970 uint32_t port_id = ~0U;
952 971
953 list_for_each_entry(fcport, &ha->fcports, list) { 972 list_for_each_entry(fcport, &vha->vp_fcports, list) {
954 if (fcport->rport && 973 if (fcport->rport &&
955 starget->id == fcport->rport->scsi_target_id) { 974 starget->id == fcport->rport->scsi_target_id) {
956 port_id = fcport->d_id.b.domain << 16 | 975 port_id = fcport->d_id.b.domain << 16 |
@@ -999,9 +1018,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
999 * final cleanup of firmware resources (PCBs and XCBs). 1018 * final cleanup of firmware resources (PCBs and XCBs).
1000 */ 1019 */
1001 if (fcport->loop_id != FC_NO_LOOP_ID) { 1020 if (fcport->loop_id != FC_NO_LOOP_ID) {
1002 fcport->ha->isp_ops->fabric_logout(fcport->ha, fcport->loop_id, 1021 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1003 fcport->d_id.b.domain, fcport->d_id.b.area, 1022 fcport->loop_id, fcport->d_id.b.domain,
1004 fcport->d_id.b.al_pa); 1023 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1005 fcport->loop_id = FC_NO_LOOP_ID; 1024 fcport->loop_id = FC_NO_LOOP_ID;
1006 } 1025 }
1007 1026
@@ -1011,16 +1030,18 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1011static int 1030static int
1012qla2x00_issue_lip(struct Scsi_Host *shost) 1031qla2x00_issue_lip(struct Scsi_Host *shost)
1013{ 1032{
1014 scsi_qla_host_t *ha = shost_priv(shost); 1033 scsi_qla_host_t *vha = shost_priv(shost);
1015 1034
1016 qla2x00_loop_reset(ha); 1035 qla2x00_loop_reset(vha);
1017 return 0; 1036 return 0;
1018} 1037}
1019 1038
1020static struct fc_host_statistics * 1039static struct fc_host_statistics *
1021qla2x00_get_fc_host_stats(struct Scsi_Host *shost) 1040qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1022{ 1041{
1023 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1042 scsi_qla_host_t *vha = shost_priv(shost);
1043 struct qla_hw_data *ha = vha->hw;
1044 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1024 int rval; 1045 int rval;
1025 struct link_statistics *stats; 1046 struct link_statistics *stats;
1026 dma_addr_t stats_dma; 1047 dma_addr_t stats_dma;
@@ -1032,21 +1053,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1032 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma); 1053 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1033 if (stats == NULL) { 1054 if (stats == NULL) {
1034 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n", 1055 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
1035 __func__, ha->host_no)); 1056 __func__, base_vha->host_no));
1036 goto done; 1057 goto done;
1037 } 1058 }
1038 memset(stats, 0, DMA_POOL_SIZE); 1059 memset(stats, 0, DMA_POOL_SIZE);
1039 1060
1040 rval = QLA_FUNCTION_FAILED; 1061 rval = QLA_FUNCTION_FAILED;
1041 if (IS_FWI2_CAPABLE(ha)) { 1062 if (IS_FWI2_CAPABLE(ha)) {
1042 rval = qla24xx_get_isp_stats(ha, stats, stats_dma); 1063 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1043 } else if (atomic_read(&ha->loop_state) == LOOP_READY && 1064 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1044 !test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) && 1065 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1045 !test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) && 1066 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1046 !ha->dpc_active) { 1067 !ha->dpc_active) {
1047 /* Must be in a 'READY' state for statistics retrieval. */ 1068 /* Must be in a 'READY' state for statistics retrieval. */
1048 rval = qla2x00_get_link_status(ha, ha->loop_id, stats, 1069 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1049 stats_dma); 1070 stats, stats_dma);
1050 } 1071 }
1051 1072
1052 if (rval != QLA_SUCCESS) 1073 if (rval != QLA_SUCCESS)
@@ -1077,29 +1098,29 @@ done:
1077static void 1098static void
1078qla2x00_get_host_symbolic_name(struct Scsi_Host *shost) 1099qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1079{ 1100{
1080 scsi_qla_host_t *ha = shost_priv(shost); 1101 scsi_qla_host_t *vha = shost_priv(shost);
1081 1102
1082 qla2x00_get_sym_node_name(ha, fc_host_symbolic_name(shost)); 1103 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1083} 1104}
1084 1105
1085static void 1106static void
1086qla2x00_set_host_system_hostname(struct Scsi_Host *shost) 1107qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1087{ 1108{
1088 scsi_qla_host_t *ha = shost_priv(shost); 1109 scsi_qla_host_t *vha = shost_priv(shost);
1089 1110
1090 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 1111 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1091} 1112}
1092 1113
1093static void 1114static void
1094qla2x00_get_host_fabric_name(struct Scsi_Host *shost) 1115qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1095{ 1116{
1096 scsi_qla_host_t *ha = shost_priv(shost); 1117 scsi_qla_host_t *vha = shost_priv(shost);
1097 u64 node_name; 1118 u64 node_name;
1098 1119
1099 if (ha->device_flags & SWITCH_FOUND) 1120 if (vha->device_flags & SWITCH_FOUND)
1100 node_name = wwn_to_u64(ha->fabric_node_name); 1121 node_name = wwn_to_u64(vha->fabric_node_name);
1101 else 1122 else
1102 node_name = wwn_to_u64(ha->node_name); 1123 node_name = wwn_to_u64(vha->node_name);
1103 1124
1104 fc_host_fabric_name(shost) = node_name; 1125 fc_host_fabric_name(shost) = node_name;
1105} 1126}
@@ -1107,11 +1128,12 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1107static void 1128static void
1108qla2x00_get_host_port_state(struct Scsi_Host *shost) 1129qla2x00_get_host_port_state(struct Scsi_Host *shost)
1109{ 1130{
1110 scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost)); 1131 scsi_qla_host_t *vha = shost_priv(shost);
1132 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1111 1133
1112 if (!ha->flags.online) 1134 if (!base_vha->flags.online)
1113 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1135 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1114 else if (atomic_read(&ha->loop_state) == LOOP_TIMEOUT) 1136 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1115 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 1137 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1116 else 1138 else
1117 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 1139 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
@@ -1121,8 +1143,11 @@ static int
1121qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) 1143qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1122{ 1144{
1123 int ret = 0; 1145 int ret = 0;
1124 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 1146 int cnt = 0;
1125 scsi_qla_host_t *vha; 1147 uint8_t qos = QLA_DEFAULT_QUE_QOS;
1148 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1149 scsi_qla_host_t *vha = NULL;
1150 struct qla_hw_data *ha = base_vha->hw;
1126 1151
1127 ret = qla24xx_vport_create_req_sanity_check(fc_vport); 1152 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1128 if (ret) { 1153 if (ret) {
@@ -1144,18 +1169,19 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1144 atomic_set(&vha->vp_state, VP_FAILED); 1169 atomic_set(&vha->vp_state, VP_FAILED);
1145 1170
1146 /* ready to create vport */ 1171 /* ready to create vport */
1147 qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx); 1172 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1173 vha->vp_idx);
1148 1174
1149 /* initialized vport states */ 1175 /* initialized vport states */
1150 atomic_set(&vha->loop_state, LOOP_DOWN); 1176 atomic_set(&vha->loop_state, LOOP_DOWN);
1151 vha->vp_err_state= VP_ERR_PORTDWN; 1177 vha->vp_err_state= VP_ERR_PORTDWN;
1152 vha->vp_prev_err_state= VP_ERR_UNKWN; 1178 vha->vp_prev_err_state= VP_ERR_UNKWN;
1153 /* Check if physical ha port is Up */ 1179 /* Check if physical ha port is Up */
1154 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 1180 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1155 atomic_read(&ha->loop_state) == LOOP_DEAD) { 1181 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1156 /* Don't retry or attempt login of this virtual port */ 1182 /* Don't retry or attempt login of this virtual port */
1157 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n", 1183 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1158 vha->host_no)); 1184 base_vha->host_no));
1159 atomic_set(&vha->loop_state, LOOP_DEAD); 1185 atomic_set(&vha->loop_state, LOOP_DEAD);
1160 if (!disable) 1186 if (!disable)
1161 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN); 1187 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@@ -1171,18 +1197,32 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1171 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1197 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1172 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1198 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1173 fc_host_supported_classes(vha->host) = 1199 fc_host_supported_classes(vha->host) =
1174 fc_host_supported_classes(ha->host); 1200 fc_host_supported_classes(base_vha->host);
1175 fc_host_supported_speeds(vha->host) = 1201 fc_host_supported_speeds(vha->host) =
1176 fc_host_supported_speeds(ha->host); 1202 fc_host_supported_speeds(base_vha->host);
1177 1203
1178 qla24xx_vport_disable(fc_vport, disable); 1204 qla24xx_vport_disable(fc_vport, disable);
1179 1205
1206 /* Create a queue pair for the vport */
1207 if (ha->mqenable) {
1208 if (ha->npiv_info) {
1209 for (; cnt < ha->nvram_npiv_size; cnt++) {
1210 if (ha->npiv_info[cnt].port_name ==
1211 vha->port_name &&
1212 ha->npiv_info[cnt].node_name ==
1213 vha->node_name) {
1214 qos = ha->npiv_info[cnt].q_qos;
1215 break;
1216 }
1217 }
1218 }
1219 qla25xx_create_queues(vha, qos);
1220 }
1221
1180 return 0; 1222 return 0;
1181vport_create_failed_2: 1223vport_create_failed_2:
1182 qla24xx_disable_vp(vha); 1224 qla24xx_disable_vp(vha);
1183 qla24xx_deallocate_vp_id(vha); 1225 qla24xx_deallocate_vp_id(vha);
1184 kfree(vha->port_name);
1185 kfree(vha->node_name);
1186 scsi_host_put(vha->host); 1226 scsi_host_put(vha->host);
1187 return FC_VPORT_FAILED; 1227 return FC_VPORT_FAILED;
1188} 1228}
@@ -1191,17 +1231,34 @@ static int
1191qla24xx_vport_delete(struct fc_vport *fc_vport) 1231qla24xx_vport_delete(struct fc_vport *fc_vport)
1192{ 1232{
1193 scsi_qla_host_t *vha = fc_vport->dd_data; 1233 scsi_qla_host_t *vha = fc_vport->dd_data;
1194 scsi_qla_host_t *pha = to_qla_parent(vha); 1234 fc_port_t *fcport, *tfcport;
1235 struct qla_hw_data *ha = vha->hw;
1236 uint16_t id = vha->vp_idx;
1195 1237
1196 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || 1238 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1197 test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags)) 1239 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1198 msleep(1000); 1240 msleep(1000);
1199 1241
1242 if (ha->mqenable) {
1243 if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
1244 qla_printk(KERN_WARNING, ha,
1245 "Queue delete failed.\n");
1246 vha->req_ques[0] = ha->req_q_map[0]->id;
1247 }
1248
1200 qla24xx_disable_vp(vha); 1249 qla24xx_disable_vp(vha);
1201 qla24xx_deallocate_vp_id(vha);
1202 1250
1203 kfree(vha->node_name); 1251 fc_remove_host(vha->host);
1204 kfree(vha->port_name); 1252
1253 scsi_remove_host(vha->host);
1254
1255 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1256 list_del(&fcport->list);
1257 kfree(fcport);
1258 fcport = NULL;
1259 }
1260
1261 qla24xx_deallocate_vp_id(vha);
1205 1262
1206 if (vha->timer_active) { 1263 if (vha->timer_active) {
1207 qla2x00_vp_stop_timer(vha); 1264 qla2x00_vp_stop_timer(vha);
@@ -1210,12 +1267,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
1210 vha->host_no, vha->vp_idx, vha)); 1267 vha->host_no, vha->vp_idx, vha));
1211 } 1268 }
1212 1269
1213 fc_remove_host(vha->host);
1214
1215 scsi_remove_host(vha->host);
1216
1217 scsi_host_put(vha->host); 1270 scsi_host_put(vha->host);
1218 1271 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
1219 return 0; 1272 return 0;
1220} 1273}
1221 1274
@@ -1318,15 +1371,16 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1318}; 1371};
1319 1372
1320void 1373void
1321qla2x00_init_host_attr(scsi_qla_host_t *ha) 1374qla2x00_init_host_attr(scsi_qla_host_t *vha)
1322{ 1375{
1376 struct qla_hw_data *ha = vha->hw;
1323 u32 speed = FC_PORTSPEED_UNKNOWN; 1377 u32 speed = FC_PORTSPEED_UNKNOWN;
1324 1378
1325 fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name); 1379 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1326 fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name); 1380 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1327 fc_host_supported_classes(ha->host) = FC_COS_CLASS3; 1381 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
1328 fc_host_max_npiv_vports(ha->host) = ha->max_npiv_vports;; 1382 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1329 fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count; 1383 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1330 1384
1331 if (IS_QLA25XX(ha)) 1385 if (IS_QLA25XX(ha))
1332 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1386 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
@@ -1338,5 +1392,5 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
1338 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1392 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
1339 else 1393 else
1340 speed = FC_PORTSPEED_1GBIT; 1394 speed = FC_PORTSPEED_1GBIT;
1341 fc_host_supported_speeds(ha->host) = speed; 1395 fc_host_supported_speeds(vha->host) = speed;
1342} 1396}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 510ba64bc286..1cf77772623b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -9,7 +9,7 @@
9#include <linux/delay.h> 9#include <linux/delay.h>
10 10
11static inline void 11static inline void
12qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump) 12qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
13{ 13{
14 fw_dump->fw_major_version = htonl(ha->fw_major_version); 14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version); 15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
@@ -23,22 +23,24 @@ qla2xxx_prep_dump(scsi_qla_host_t *ha, struct qla2xxx_fw_dump *fw_dump)
23} 23}
24 24
25static inline void * 25static inline void *
26qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr) 26qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
27{ 27{
28 struct req_que *req = ha->req_q_map[0];
29 struct rsp_que *rsp = ha->rsp_q_map[0];
28 /* Request queue. */ 30 /* Request queue. */
29 memcpy(ptr, ha->request_ring, ha->request_q_length * 31 memcpy(ptr, req->ring, req->length *
30 sizeof(request_t)); 32 sizeof(request_t));
31 33
32 /* Response queue. */ 34 /* Response queue. */
33 ptr += ha->request_q_length * sizeof(request_t); 35 ptr += req->length * sizeof(request_t);
34 memcpy(ptr, ha->response_ring, ha->response_q_length * 36 memcpy(ptr, rsp->ring, rsp->length *
35 sizeof(response_t)); 37 sizeof(response_t));
36 38
37 return ptr + (ha->response_q_length * sizeof(response_t)); 39 return ptr + (rsp->length * sizeof(response_t));
38} 40}
39 41
40static int 42static int
41qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram, 43qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
42 uint32_t ram_dwords, void **nxt) 44 uint32_t ram_dwords, void **nxt)
43{ 45{
44 int rval; 46 int rval;
@@ -112,7 +114,7 @@ qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
112} 114}
113 115
114static int 116static int
115qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram, 117qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
116 uint32_t cram_size, void **nxt) 118 uint32_t cram_size, void **nxt)
117{ 119{
118 int rval; 120 int rval;
@@ -163,7 +165,7 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
163} 165}
164 166
165static int 167static int
166qla24xx_soft_reset(scsi_qla_host_t *ha) 168qla24xx_soft_reset(struct qla_hw_data *ha)
167{ 169{
168 int rval = QLA_SUCCESS; 170 int rval = QLA_SUCCESS;
169 uint32_t cnt; 171 uint32_t cnt;
@@ -215,8 +217,8 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
215} 217}
216 218
217static int 219static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram, 220qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
219 uint32_t ram_words, void **nxt) 221 uint16_t ram_words, void **nxt)
220{ 222{
221 int rval; 223 int rval;
222 uint32_t cnt, stat, timer, words, idx; 224 uint32_t cnt, stat, timer, words, idx;
@@ -314,16 +316,17 @@ qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
314 * @hardware_locked: Called with the hardware_lock 316 * @hardware_locked: Called with the hardware_lock
315 */ 317 */
316void 318void
317qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 319qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
318{ 320{
319 int rval; 321 int rval;
320 uint32_t cnt; 322 uint32_t cnt;
321 323 struct qla_hw_data *ha = vha->hw;
322 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 324 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
323 uint16_t __iomem *dmp_reg; 325 uint16_t __iomem *dmp_reg;
324 unsigned long flags; 326 unsigned long flags;
325 struct qla2300_fw_dump *fw; 327 struct qla2300_fw_dump *fw;
326 void *nxt; 328 void *nxt;
329 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
327 330
328 flags = 0; 331 flags = 0;
329 332
@@ -468,7 +471,7 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
468 } else { 471 } else {
469 qla_printk(KERN_INFO, ha, 472 qla_printk(KERN_INFO, ha,
470 "Firmware dump saved to temp buffer (%ld/%p).\n", 473 "Firmware dump saved to temp buffer (%ld/%p).\n",
471 ha->host_no, ha->fw_dump); 474 base_vha->host_no, ha->fw_dump);
472 ha->fw_dumped = 1; 475 ha->fw_dumped = 1;
473 } 476 }
474 477
@@ -483,16 +486,18 @@ qla2300_fw_dump_failed:
483 * @hardware_locked: Called with the hardware_lock 486 * @hardware_locked: Called with the hardware_lock
484 */ 487 */
485void 488void
486qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 489qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
487{ 490{
488 int rval; 491 int rval;
489 uint32_t cnt, timer; 492 uint32_t cnt, timer;
490 uint16_t risc_address; 493 uint16_t risc_address;
491 uint16_t mb0, mb2; 494 uint16_t mb0, mb2;
495 struct qla_hw_data *ha = vha->hw;
492 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 496 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
493 uint16_t __iomem *dmp_reg; 497 uint16_t __iomem *dmp_reg;
494 unsigned long flags; 498 unsigned long flags;
495 struct qla2100_fw_dump *fw; 499 struct qla2100_fw_dump *fw;
500 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
496 501
497 risc_address = 0; 502 risc_address = 0;
498 mb0 = mb2 = 0; 503 mb0 = mb2 = 0;
@@ -673,7 +678,7 @@ qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
673 } else { 678 } else {
674 qla_printk(KERN_INFO, ha, 679 qla_printk(KERN_INFO, ha,
675 "Firmware dump saved to temp buffer (%ld/%p).\n", 680 "Firmware dump saved to temp buffer (%ld/%p).\n",
676 ha->host_no, ha->fw_dump); 681 base_vha->host_no, ha->fw_dump);
677 ha->fw_dumped = 1; 682 ha->fw_dumped = 1;
678 } 683 }
679 684
@@ -683,12 +688,12 @@ qla2100_fw_dump_failed:
683} 688}
684 689
685void 690void
686qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 691qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
687{ 692{
688 int rval; 693 int rval;
689 uint32_t cnt; 694 uint32_t cnt;
690 uint32_t risc_address; 695 uint32_t risc_address;
691 696 struct qla_hw_data *ha = vha->hw;
692 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 697 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
693 uint32_t __iomem *dmp_reg; 698 uint32_t __iomem *dmp_reg;
694 uint32_t *iter_reg; 699 uint32_t *iter_reg;
@@ -697,6 +702,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
697 struct qla24xx_fw_dump *fw; 702 struct qla24xx_fw_dump *fw;
698 uint32_t ext_mem_cnt; 703 uint32_t ext_mem_cnt;
699 void *nxt; 704 void *nxt;
705 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
700 706
701 risc_address = ext_mem_cnt = 0; 707 risc_address = ext_mem_cnt = 0;
702 flags = 0; 708 flags = 0;
@@ -919,7 +925,7 @@ qla24xx_fw_dump_failed_0:
919 } else { 925 } else {
920 qla_printk(KERN_INFO, ha, 926 qla_printk(KERN_INFO, ha,
921 "Firmware dump saved to temp buffer (%ld/%p).\n", 927 "Firmware dump saved to temp buffer (%ld/%p).\n",
922 ha->host_no, ha->fw_dump); 928 base_vha->host_no, ha->fw_dump);
923 ha->fw_dumped = 1; 929 ha->fw_dumped = 1;
924 } 930 }
925 931
@@ -929,13 +935,14 @@ qla24xx_fw_dump_failed:
929} 935}
930 936
931void 937void
932qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked) 938qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
933{ 939{
934 int rval; 940 int rval;
935 uint32_t cnt; 941 uint32_t cnt;
936 uint32_t risc_address; 942 uint32_t risc_address;
937 943 struct qla_hw_data *ha = vha->hw;
938 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 944 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
945 struct device_reg_25xxmq __iomem *reg25;
939 uint32_t __iomem *dmp_reg; 946 uint32_t __iomem *dmp_reg;
940 uint32_t *iter_reg; 947 uint32_t *iter_reg;
941 uint16_t __iomem *mbx_reg; 948 uint16_t __iomem *mbx_reg;
@@ -944,6 +951,11 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
944 uint32_t ext_mem_cnt; 951 uint32_t ext_mem_cnt;
945 void *nxt; 952 void *nxt;
946 struct qla2xxx_fce_chain *fcec; 953 struct qla2xxx_fce_chain *fcec;
954 struct qla2xxx_mq_chain *mq = NULL;
955 uint32_t qreg_size;
956 uint8_t req_cnt, rsp_cnt, que_cnt;
957 uint32_t que_idx;
958 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
947 959
948 risc_address = ext_mem_cnt = 0; 960 risc_address = ext_mem_cnt = 0;
949 flags = 0; 961 flags = 0;
@@ -988,6 +1000,29 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
988 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++)); 1000 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
989 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); 1001 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
990 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window)); 1002 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1003
1004 /* Multi queue registers */
1005 if (ha->mqenable) {
1006 qreg_size = sizeof(struct qla2xxx_mq_chain);
1007 mq = kzalloc(qreg_size, GFP_KERNEL);
1008 if (!mq)
1009 goto qla25xx_fw_dump_failed_0;
1010 req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
1011 rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
1012 que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
1013 mq->count = htonl(que_cnt);
1014 mq->chain_size = htonl(qreg_size);
1015 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
1016 for (cnt = 0; cnt < que_cnt; cnt++) {
1017 reg25 = (struct device_reg_25xxmq *) ((void *)
1018 ha->mqiobase + cnt * QLA_QUE_PAGE);
1019 que_idx = cnt * 4;
1020 mq->qregs[que_idx] = htonl(reg25->req_q_in);
1021 mq->qregs[que_idx+1] = htonl(reg25->req_q_out);
1022 mq->qregs[que_idx+2] = htonl(reg25->rsp_q_in);
1023 mq->qregs[que_idx+3] = htonl(reg25->rsp_q_out);
1024 }
1025 }
991 WRT_REG_DWORD(&reg->iobase_window, 0x00); 1026 WRT_REG_DWORD(&reg->iobase_window, 0x00);
992 RD_REG_DWORD(&reg->iobase_window); 1027 RD_REG_DWORD(&reg->iobase_window);
993 1028
@@ -1225,7 +1260,14 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
1225 1260
1226 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1261 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1227 1262
1228 fcec = nxt + ntohl(ha->fw_dump->eft_size); 1263 if (ha->mqenable) {
1264 nxt = nxt + ntohl(ha->fw_dump->eft_size);
1265 memcpy(nxt, mq, qreg_size);
1266 kfree(mq);
1267 fcec = nxt + qreg_size;
1268 } else {
1269 fcec = nxt + ntohl(ha->fw_dump->eft_size);
1270 }
1229 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST); 1271 fcec->type = __constant_htonl(DUMP_CHAIN_FCE | DUMP_CHAIN_LAST);
1230 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 1272 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
1231 fce_calc_size(ha->fce_bufs)); 1273 fce_calc_size(ha->fce_bufs));
@@ -1248,7 +1290,7 @@ qla25xx_fw_dump_failed_0:
1248 } else { 1290 } else {
1249 qla_printk(KERN_INFO, ha, 1291 qla_printk(KERN_INFO, ha,
1250 "Firmware dump saved to temp buffer (%ld/%p).\n", 1292 "Firmware dump saved to temp buffer (%ld/%p).\n",
1251 ha->host_no, ha->fw_dump); 1293 base_vha->host_no, ha->fw_dump);
1252 ha->fw_dumped = 1; 1294 ha->fw_dumped = 1;
1253 } 1295 }
1254 1296
@@ -1256,15 +1298,15 @@ qla25xx_fw_dump_failed:
1256 if (!hardware_locked) 1298 if (!hardware_locked)
1257 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1258} 1300}
1259
1260/****************************************************************************/ 1301/****************************************************************************/
1261/* Driver Debug Functions. */ 1302/* Driver Debug Functions. */
1262/****************************************************************************/ 1303/****************************************************************************/
1263 1304
1264void 1305void
1265qla2x00_dump_regs(scsi_qla_host_t *ha) 1306qla2x00_dump_regs(scsi_qla_host_t *vha)
1266{ 1307{
1267 int i; 1308 int i;
1309 struct qla_hw_data *ha = vha->hw;
1268 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1310 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1269 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1311 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1270 uint16_t __iomem *mbx_reg; 1312 uint16_t __iomem *mbx_reg;
@@ -1274,7 +1316,7 @@ qla2x00_dump_regs(scsi_qla_host_t *ha)
1274 1316
1275 printk("Mailbox registers:\n"); 1317 printk("Mailbox registers:\n");
1276 for (i = 0; i < 6; i++) 1318 for (i = 0; i < 6; i++)
1277 printk("scsi(%ld): mbox %d 0x%04x \n", ha->host_no, i, 1319 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1278 RD_REG_WORD(mbx_reg++)); 1320 RD_REG_WORD(mbx_reg++));
1279} 1321}
1280 1322
@@ -1302,3 +1344,5 @@ qla2x00_dump_buffer(uint8_t * b, uint32_t size)
1302 if (cnt % 16) 1344 if (cnt % 16)
1303 printk("\n"); 1345 printk("\n");
1304} 1346}
1347
1348
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2e9c0c097f5e..c1794a70a45f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -4,6 +4,9 @@
4 * 4 *
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7
8#include "qla_def.h"
9
7/* 10/*
8 * Driver debug definitions. 11 * Driver debug definitions.
9 */ 12 */
@@ -23,6 +26,7 @@
23/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 26/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
24/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */ 27/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
25/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */ 28/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
29/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
26 30
27/* 31/*
28* Macros use for debugging the driver. 32* Macros use for debugging the driver.
@@ -43,6 +47,7 @@
43#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0) 47#define DEBUG2_11(x) do { if (ql2xextended_error_logging) { x; } } while (0)
44#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0) 48#define DEBUG2_13(x) do { if (ql2xextended_error_logging) { x; } } while (0)
45#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0) 49#define DEBUG2_16(x) do { if (ql2xextended_error_logging) { x; } } while (0)
50#define DEBUG2_17(x) do { if (ql2xextended_error_logging) { x; } } while (0)
46 51
47#if defined(QL_DEBUG_LEVEL_3) 52#if defined(QL_DEBUG_LEVEL_3)
48#define DEBUG3(x) do {x;} while (0) 53#define DEBUG3(x) do {x;} while (0)
@@ -127,7 +132,6 @@
127#else 132#else
128#define DEBUG16(x) do {} while (0) 133#define DEBUG16(x) do {} while (0)
129#endif 134#endif
130
131/* 135/*
132 * Firmware Dump structure definition 136 * Firmware Dump structure definition
133 */ 137 */
@@ -266,8 +270,17 @@ struct qla2xxx_fce_chain {
266 uint32_t eregs[8]; 270 uint32_t eregs[8];
267}; 271};
268 272
273struct qla2xxx_mq_chain {
274 uint32_t type;
275 uint32_t chain_size;
276
277 uint32_t count;
278 uint32_t qregs[4 * QLA_MQ_SIZE];
279};
280
269#define DUMP_CHAIN_VARIANT 0x80000000 281#define DUMP_CHAIN_VARIANT 0x80000000
270#define DUMP_CHAIN_FCE 0x7FFFFAF0 282#define DUMP_CHAIN_FCE 0x7FFFFAF0
283#define DUMP_CHAIN_MQ 0x7FFFFAF1
271#define DUMP_CHAIN_LAST 0x80000000 284#define DUMP_CHAIN_LAST 0x80000000
272 285
273struct qla2xxx_fw_dump { 286struct qla2xxx_fw_dump {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b97194096d8e..a29c95204975 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -181,11 +181,14 @@
181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 181#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 182#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
183 183
184struct req_que;
185
184/* 186/*
185 * SCSI Request Block 187 * SCSI Request Block
186 */ 188 */
187typedef struct srb { 189typedef struct srb {
188 struct scsi_qla_host *ha; /* HA the SP is queued on */ 190 struct scsi_qla_host *vha; /* HA the SP is queued on */
191 struct req_que *que;
189 struct fc_port *fcport; 192 struct fc_port *fcport;
190 193
191 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 194 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
@@ -369,9 +372,17 @@ struct device_reg_2xxx {
369 } u_end; 372 } u_end;
370}; 373};
371 374
375struct device_reg_25xxmq {
376 volatile uint32_t req_q_in;
377 volatile uint32_t req_q_out;
378 volatile uint32_t rsp_q_in;
379 volatile uint32_t rsp_q_out;
380};
381
372typedef union { 382typedef union {
373 struct device_reg_2xxx isp; 383 struct device_reg_2xxx isp;
374 struct device_reg_24xx isp24; 384 struct device_reg_24xx isp24;
385 struct device_reg_25xxmq isp25mq;
375} device_reg_t; 386} device_reg_t;
376 387
377#define ISP_REQ_Q_IN(ha, reg) \ 388#define ISP_REQ_Q_IN(ha, reg) \
@@ -1524,7 +1535,7 @@ typedef struct {
1524 */ 1535 */
1525typedef struct fc_port { 1536typedef struct fc_port {
1526 struct list_head list; 1537 struct list_head list;
1527 struct scsi_qla_host *ha; 1538 struct scsi_qla_host *vha;
1528 1539
1529 uint8_t node_name[WWN_SIZE]; 1540 uint8_t node_name[WWN_SIZE];
1530 uint8_t port_name[WWN_SIZE]; 1541 uint8_t port_name[WWN_SIZE];
@@ -1550,7 +1561,6 @@ typedef struct fc_port {
1550 unsigned long last_queue_full; 1561 unsigned long last_queue_full;
1551 unsigned long last_ramp_up; 1562 unsigned long last_ramp_up;
1552 1563
1553 struct list_head vp_fcport;
1554 uint16_t vp_idx; 1564 uint16_t vp_idx;
1555} fc_port_t; 1565} fc_port_t;
1556 1566
@@ -2037,6 +2047,8 @@ typedef struct vport_params {
2037#define VP_RET_CODE_NO_MEM 5 2047#define VP_RET_CODE_NO_MEM 5
2038#define VP_RET_CODE_NOT_FOUND 6 2048#define VP_RET_CODE_NOT_FOUND 6
2039 2049
2050struct qla_hw_data;
2051
2040/* 2052/*
2041 * ISP operations 2053 * ISP operations
2042 */ 2054 */
@@ -2055,10 +2067,11 @@ struct isp_operations {
2055 char * (*fw_version_str) (struct scsi_qla_host *, char *); 2067 char * (*fw_version_str) (struct scsi_qla_host *, char *);
2056 2068
2057 irq_handler_t intr_handler; 2069 irq_handler_t intr_handler;
2058 void (*enable_intrs) (struct scsi_qla_host *); 2070 void (*enable_intrs) (struct qla_hw_data *);
2059 void (*disable_intrs) (struct scsi_qla_host *); 2071 void (*disable_intrs) (struct qla_hw_data *);
2060 2072
2061 int (*abort_command) (struct scsi_qla_host *, srb_t *); 2073 int (*abort_command) (struct scsi_qla_host *, srb_t *,
2074 struct req_que *);
2062 int (*target_reset) (struct fc_port *, unsigned int); 2075 int (*target_reset) (struct fc_port *, unsigned int);
2063 int (*lun_reset) (struct fc_port *, unsigned int); 2076 int (*lun_reset) (struct fc_port *, unsigned int);
2064 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, 2077 int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2089,6 +2102,10 @@ struct isp_operations {
2089 uint32_t); 2102 uint32_t);
2090 2103
2091 int (*get_flash_version) (struct scsi_qla_host *, void *); 2104 int (*get_flash_version) (struct scsi_qla_host *, void *);
2105 int (*start_scsi) (srb_t *);
2106 void (*wrt_req_reg) (struct qla_hw_data *, uint16_t, uint16_t);
2107 void (*wrt_rsp_reg) (struct qla_hw_data *, uint16_t, uint16_t);
2108 uint16_t (*rd_req_reg) (struct qla_hw_data *, uint16_t);
2092}; 2109};
2093 2110
2094/* MSI-X Support *************************************************************/ 2111/* MSI-X Support *************************************************************/
@@ -2100,16 +2117,18 @@ struct isp_operations {
2100#define QLA_MSIX_DEFAULT 0x00 2117#define QLA_MSIX_DEFAULT 0x00
2101#define QLA_MSIX_RSP_Q 0x01 2118#define QLA_MSIX_RSP_Q 0x01
2102 2119
2103#define QLA_MSIX_ENTRIES 2
2104#define QLA_MIDX_DEFAULT 0 2120#define QLA_MIDX_DEFAULT 0
2105#define QLA_MIDX_RSP_Q 1 2121#define QLA_MIDX_RSP_Q 1
2122#define QLA_PCI_MSIX_CONTROL 0xa2
2106 2123
2107struct scsi_qla_host; 2124struct scsi_qla_host;
2125struct rsp_que;
2108 2126
2109struct qla_msix_entry { 2127struct qla_msix_entry {
2110 int have_irq; 2128 int have_irq;
2111 uint32_t msix_vector; 2129 uint32_t vector;
2112 uint16_t msix_entry; 2130 uint16_t entry;
2131 struct rsp_que *rsp;
2113}; 2132};
2114 2133
2115#define WATCH_INTERVAL 1 /* number of seconds */ 2134#define WATCH_INTERVAL 1 /* number of seconds */
@@ -2160,208 +2179,137 @@ struct qla_statistics {
2160 uint64_t output_bytes; 2179 uint64_t output_bytes;
2161}; 2180};
2162 2181
2163/* 2182/* Multi queue support */
2164 * Linux Host Adapter structure 2183#define MBC_INITIALIZE_MULTIQ 0x1f
2165 */ 2184#define QLA_QUE_PAGE 0X1000
2166typedef struct scsi_qla_host { 2185#define QLA_MQ_SIZE 32
2167 struct list_head list; 2186#define QLA_MAX_HOST_QUES 16
2187#define QLA_MAX_QUEUES 256
2188#define ISP_QUE_REG(ha, id) \
2189 ((ha->mqenable) ? \
2190 ((void *)(ha->mqiobase) +\
2191 (QLA_QUE_PAGE * id)) :\
2192 ((void *)(ha->iobase)))
2193#define QLA_REQ_QUE_ID(tag) \
2194 ((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
2195#define QLA_DEFAULT_QUE_QOS 5
2196#define QLA_PRECONFIG_VPORTS 32
2197#define QLA_MAX_VPORTS_QLA24XX 128
2198#define QLA_MAX_VPORTS_QLA25XX 256
2199/* Response queue data structure */
2200struct rsp_que {
2201 dma_addr_t dma;
2202 response_t *ring;
2203 response_t *ring_ptr;
2204 uint16_t ring_index;
2205 uint16_t out_ptr;
2206 uint16_t length;
2207 uint16_t options;
2208 uint16_t rid;
2209 uint16_t id;
2210 uint16_t vp_idx;
2211 struct qla_hw_data *hw;
2212 struct qla_msix_entry *msix;
2213 struct req_que *req;
2214};
2168 2215
2169 /* Commonly used flags and state information. */ 2216/* Request queue data structure */
2170 struct Scsi_Host *host; 2217struct req_que {
2171 struct pci_dev *pdev; 2218 dma_addr_t dma;
2219 request_t *ring;
2220 request_t *ring_ptr;
2221 uint16_t ring_index;
2222 uint16_t in_ptr;
2223 uint16_t cnt;
2224 uint16_t length;
2225 uint16_t options;
2226 uint16_t rid;
2227 uint16_t id;
2228 uint16_t qos;
2229 uint16_t vp_idx;
2230 struct rsp_que *rsp;
2231 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
2232 uint32_t current_outstanding_cmd;
2233 int max_q_depth;
2234};
2172 2235
2173 unsigned long host_no; 2236/*
2237 * Qlogic host adapter specific data structure.
2238*/
2239struct qla_hw_data {
2240 struct pci_dev *pdev;
2241 /* SRB cache. */
2242#define SRB_MIN_REQ 128
2243 mempool_t *srb_mempool;
2174 2244
2175 volatile struct { 2245 volatile struct {
2176 uint32_t init_done :1;
2177 uint32_t online :1;
2178 uint32_t mbox_int :1; 2246 uint32_t mbox_int :1;
2179 uint32_t mbox_busy :1; 2247 uint32_t mbox_busy :1;
2180 uint32_t rscn_queue_overflow :1;
2181 uint32_t reset_active :1;
2182
2183 uint32_t management_server_logged_in :1;
2184 uint32_t process_response_queue :1;
2185 2248
2186 uint32_t disable_risc_code_load :1; 2249 uint32_t disable_risc_code_load :1;
2187 uint32_t enable_64bit_addressing :1; 2250 uint32_t enable_64bit_addressing :1;
2188 uint32_t enable_lip_reset :1; 2251 uint32_t enable_lip_reset :1;
2189 uint32_t enable_lip_full_login :1;
2190 uint32_t enable_target_reset :1; 2252 uint32_t enable_target_reset :1;
2253 uint32_t enable_lip_full_login :1;
2191 uint32_t enable_led_scheme :1; 2254 uint32_t enable_led_scheme :1;
2192 uint32_t inta_enabled :1; 2255 uint32_t inta_enabled :1;
2193 uint32_t msi_enabled :1; 2256 uint32_t msi_enabled :1;
2194 uint32_t msix_enabled :1; 2257 uint32_t msix_enabled :1;
2195 uint32_t disable_serdes :1; 2258 uint32_t disable_serdes :1;
2196 uint32_t gpsc_supported :1; 2259 uint32_t gpsc_supported :1;
2197 uint32_t vsan_enabled :1; 2260 uint32_t vsan_enabled :1;
2198 uint32_t npiv_supported :1; 2261 uint32_t npiv_supported :1;
2199 uint32_t fce_enabled :1; 2262 uint32_t fce_enabled :1;
2200 uint32_t hw_event_marker_found :1; 2263 uint32_t hw_event_marker_found:1;
2201 } flags; 2264 } flags;
2202 2265
2203 atomic_t loop_state;
2204#define LOOP_TIMEOUT 1
2205#define LOOP_DOWN 2
2206#define LOOP_UP 3
2207#define LOOP_UPDATE 4
2208#define LOOP_READY 5
2209#define LOOP_DEAD 6
2210
2211 unsigned long dpc_flags;
2212#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
2213#define RESET_ACTIVE 1
2214#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
2215#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
2216#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
2217#define LOOP_RESYNC_ACTIVE 5
2218#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
2219#define RSCN_UPDATE 7 /* Perform an RSCN update. */
2220#define MAILBOX_RETRY 8
2221#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
2222#define FAILOVER_EVENT_NEEDED 10
2223#define FAILOVER_EVENT 11
2224#define FAILOVER_NEEDED 12
2225#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
2226#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
2227#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
2228#define ABORT_QUEUES_NEEDED 16
2229#define RELOGIN_NEEDED 17
2230#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
2231#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
2232#define ISP_ABORT_RETRY 20 /* ISP aborted. */
2233#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
2234#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
2235#define IOCTL_ERROR_RECOVERY 23
2236#define LOOP_RESET_NEEDED 24
2237#define BEACON_BLINK_NEEDED 25
2238#define REGISTER_FDMI_NEEDED 26
2239#define FCPORT_UPDATE_NEEDED 27
2240#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2241#define UNLOADING 29
2242#define NPIV_CONFIG_NEEDED 30
2243
2244 uint32_t device_flags;
2245#define DFLG_LOCAL_DEVICES BIT_0
2246#define DFLG_RETRY_LOCAL_DEVICES BIT_1
2247#define DFLG_FABRIC_DEVICES BIT_2
2248#define SWITCH_FOUND BIT_3
2249#define DFLG_NO_CABLE BIT_4
2250
2251#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2252#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2253 uint32_t device_type;
2254#define DT_ISP2100 BIT_0
2255#define DT_ISP2200 BIT_1
2256#define DT_ISP2300 BIT_2
2257#define DT_ISP2312 BIT_3
2258#define DT_ISP2322 BIT_4
2259#define DT_ISP6312 BIT_5
2260#define DT_ISP6322 BIT_6
2261#define DT_ISP2422 BIT_7
2262#define DT_ISP2432 BIT_8
2263#define DT_ISP5422 BIT_9
2264#define DT_ISP5432 BIT_10
2265#define DT_ISP2532 BIT_11
2266#define DT_ISP8432 BIT_12
2267#define DT_ISP_LAST (DT_ISP8432 << 1)
2268
2269#define DT_IIDMA BIT_26
2270#define DT_FWI2 BIT_27
2271#define DT_ZIO_SUPPORTED BIT_28
2272#define DT_OEM_001 BIT_29
2273#define DT_ISP2200A BIT_30
2274#define DT_EXTENDED_IDS BIT_31
2275
2276#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
2277#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
2278#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
2279#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
2280#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
2281#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
2282#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
2283#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
2284#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
2285#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2286#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2287#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2288#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2289#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2290
2291#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2292 IS_QLA6312(ha) || IS_QLA6322(ha))
2293#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2294#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2295#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2296#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2297#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2298 IS_QLA84XX(ha))
2299
2300#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2301#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2302#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2303#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2304#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2305
2306 /* SRB cache. */
2307#define SRB_MIN_REQ 128
2308 mempool_t *srb_mempool;
2309
2310 /* This spinlock is used to protect "io transactions", you must 2266 /* This spinlock is used to protect "io transactions", you must
2311 * acquire it before doing any IO to the card, eg with RD_REG*() and 2267 * acquire it before doing any IO to the card, eg with RD_REG*() and
2312 * WRT_REG*() for the duration of your entire commandtransaction. 2268 * WRT_REG*() for the duration of your entire commandtransaction.
2313 * 2269 *
2314 * This spinlock is of lower priority than the io request lock. 2270 * This spinlock is of lower priority than the io request lock.
2315 */ 2271 */
2316
2317 spinlock_t hardware_lock ____cacheline_aligned;
2318 2272
2273 spinlock_t hardware_lock ____cacheline_aligned;
2319 int bars; 2274 int bars;
2320 int mem_only; 2275 int mem_only;
2321 device_reg_t __iomem *iobase; /* Base I/O address */ 2276 device_reg_t __iomem *iobase; /* Base I/O address */
2322 resource_size_t pio_address; 2277 resource_size_t pio_address;
2323#define MIN_IOBASE_LEN 0x100
2324
2325 /* ISP ring lock, rings, and indexes */
2326 dma_addr_t request_dma; /* Physical address. */
2327 request_t *request_ring; /* Base virtual address */
2328 request_t *request_ring_ptr; /* Current address. */
2329 uint16_t req_ring_index; /* Current index. */
2330 uint16_t req_q_cnt; /* Number of available entries. */
2331 uint16_t request_q_length;
2332
2333 dma_addr_t response_dma; /* Physical address. */
2334 response_t *response_ring; /* Base virtual address */
2335 response_t *response_ring_ptr; /* Current address. */
2336 uint16_t rsp_ring_index; /* Current index. */
2337 uint16_t response_q_length;
2338
2339 struct isp_operations *isp_ops;
2340 2278
2341 /* Outstandings ISP commands. */ 2279#define MIN_IOBASE_LEN 0x100
2342 srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS]; 2280/* Multi queue data structs */
2343 uint32_t current_outstanding_cmd; 2281 device_reg_t *mqiobase;
2344 srb_t *status_srb; /* Status continuation entry. */ 2282 uint16_t msix_count;
2283 uint8_t mqenable;
2284 struct req_que **req_q_map;
2285 struct rsp_que **rsp_q_map;
2286 unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2287 unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
2288 uint16_t max_queues;
2289 struct qla_npiv_entry *npiv_info;
2290 uint16_t nvram_npiv_size;
2291
2292 uint16_t switch_cap;
2293#define FLOGI_SEQ_DEL BIT_8
2294#define FLOGI_MID_SUPPORT BIT_10
2295#define FLOGI_VSAN_SUPPORT BIT_12
2296#define FLOGI_SP_SUPPORT BIT_13
2297 /* Timeout timers. */
2298 uint8_t loop_down_abort_time; /* port down timer */
2299 atomic_t loop_down_timer; /* loop down timer */
2300 uint8_t link_down_timeout; /* link down timeout */
2301 uint16_t max_loop_id;
2345 2302
2346 /* ISP configuration data. */
2347 uint16_t loop_id; /* Host adapter loop id */
2348 uint16_t switch_cap;
2349#define FLOGI_SEQ_DEL BIT_8
2350#define FLOGI_MID_SUPPORT BIT_10
2351#define FLOGI_VSAN_SUPPORT BIT_12
2352#define FLOGI_SP_SUPPORT BIT_13
2353 uint16_t fb_rev; 2303 uint16_t fb_rev;
2354
2355 port_id_t d_id; /* Host adapter port id */
2356 uint16_t max_public_loop_ids; 2304 uint16_t max_public_loop_ids;
2357 uint16_t min_external_loopid; /* First external loop Id */ 2305 uint16_t min_external_loopid; /* First external loop Id */
2358 2306
2359#define PORT_SPEED_UNKNOWN 0xFFFF 2307#define PORT_SPEED_UNKNOWN 0xFFFF
2360#define PORT_SPEED_1GB 0x00 2308#define PORT_SPEED_1GB 0x00
2361#define PORT_SPEED_2GB 0x01 2309#define PORT_SPEED_2GB 0x01
2362#define PORT_SPEED_4GB 0x03 2310#define PORT_SPEED_4GB 0x03
2363#define PORT_SPEED_8GB 0x04 2311#define PORT_SPEED_8GB 0x04
2364 uint16_t link_data_rate; /* F/W operating speed */ 2312 uint16_t link_data_rate; /* F/W operating speed */
2365 2313
2366 uint8_t current_topology; 2314 uint8_t current_topology;
2367 uint8_t prev_topology; 2315 uint8_t prev_topology;
@@ -2370,15 +2318,69 @@ typedef struct scsi_qla_host {
2370#define ISP_CFG_FL 4 2318#define ISP_CFG_FL 4
2371#define ISP_CFG_F 8 2319#define ISP_CFG_F 8
2372 2320
2373 uint8_t operating_mode; /* F/W operating mode */ 2321 uint8_t operating_mode; /* F/W operating mode */
2374#define LOOP 0 2322#define LOOP 0
2375#define P2P 1 2323#define P2P 1
2376#define LOOP_P2P 2 2324#define LOOP_P2P 2
2377#define P2P_LOOP 3 2325#define P2P_LOOP 3
2378
2379 uint8_t marker_needed;
2380
2381 uint8_t interrupts_on; 2326 uint8_t interrupts_on;
2327 uint32_t isp_abort_cnt;
2328
2329#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2330#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2331 uint32_t device_type;
2332#define DT_ISP2100 BIT_0
2333#define DT_ISP2200 BIT_1
2334#define DT_ISP2300 BIT_2
2335#define DT_ISP2312 BIT_3
2336#define DT_ISP2322 BIT_4
2337#define DT_ISP6312 BIT_5
2338#define DT_ISP6322 BIT_6
2339#define DT_ISP2422 BIT_7
2340#define DT_ISP2432 BIT_8
2341#define DT_ISP5422 BIT_9
2342#define DT_ISP5432 BIT_10
2343#define DT_ISP2532 BIT_11
2344#define DT_ISP8432 BIT_12
2345#define DT_ISP_LAST (DT_ISP8432 << 1)
2346
2347#define DT_IIDMA BIT_26
2348#define DT_FWI2 BIT_27
2349#define DT_ZIO_SUPPORTED BIT_28
2350#define DT_OEM_001 BIT_29
2351#define DT_ISP2200A BIT_30
2352#define DT_EXTENDED_IDS BIT_31
2353#define DT_MASK(ha) ((ha)->device_type & (DT_ISP_LAST - 1))
2354#define IS_QLA2100(ha) (DT_MASK(ha) & DT_ISP2100)
2355#define IS_QLA2200(ha) (DT_MASK(ha) & DT_ISP2200)
2356#define IS_QLA2300(ha) (DT_MASK(ha) & DT_ISP2300)
2357#define IS_QLA2312(ha) (DT_MASK(ha) & DT_ISP2312)
2358#define IS_QLA2322(ha) (DT_MASK(ha) & DT_ISP2322)
2359#define IS_QLA6312(ha) (DT_MASK(ha) & DT_ISP6312)
2360#define IS_QLA6322(ha) (DT_MASK(ha) & DT_ISP6322)
2361#define IS_QLA2422(ha) (DT_MASK(ha) & DT_ISP2422)
2362#define IS_QLA2432(ha) (DT_MASK(ha) & DT_ISP2432)
2363#define IS_QLA5422(ha) (DT_MASK(ha) & DT_ISP5422)
2364#define IS_QLA5432(ha) (DT_MASK(ha) & DT_ISP5432)
2365#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2366#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2367
2368#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2369 IS_QLA6312(ha) || IS_QLA6322(ha))
2370#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2371#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2372#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2373#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2374#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2375 IS_QLA84XX(ha))
2376#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2377 IS_QLA25XX(ha))
2378
2379#define IS_IIDMA_CAPABLE(ha) ((ha)->device_type & DT_IIDMA)
2380#define IS_FWI2_CAPABLE(ha) ((ha)->device_type & DT_FWI2)
2381#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2382#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2383#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2382 2384
2383 /* HBA serial number */ 2385 /* HBA serial number */
2384 uint8_t serial0; 2386 uint8_t serial0;
@@ -2386,8 +2388,8 @@ typedef struct scsi_qla_host {
2386 uint8_t serial2; 2388 uint8_t serial2;
2387 2389
2388 /* NVRAM configuration data */ 2390 /* NVRAM configuration data */
2389#define MAX_NVRAM_SIZE 4096 2391#define MAX_NVRAM_SIZE 4096
2390#define VPD_OFFSET MAX_NVRAM_SIZE / 2 2392#define VPD_OFFSET MAX_NVRAM_SIZE / 2
2391 uint16_t nvram_size; 2393 uint16_t nvram_size;
2392 uint16_t nvram_base; 2394 uint16_t nvram_base;
2393 void *nvram; 2395 void *nvram;
@@ -2401,22 +2403,8 @@ typedef struct scsi_qla_host {
2401 uint16_t r_a_tov; 2403 uint16_t r_a_tov;
2402 int port_down_retry_count; 2404 int port_down_retry_count;
2403 uint8_t mbx_count; 2405 uint8_t mbx_count;
2404 uint16_t last_loop_id;
2405 uint16_t mgmt_svr_loop_id;
2406
2407 uint32_t login_retry_count;
2408 int max_q_depth;
2409
2410 struct list_head work_list;
2411
2412 /* Fibre Channel Device List. */
2413 struct list_head fcports;
2414
2415 /* RSCN queue. */
2416 uint32_t rscn_queue[MAX_RSCN_COUNT];
2417 uint8_t rscn_in_ptr;
2418 uint8_t rscn_out_ptr;
2419 2406
2407 uint32_t login_retry_count;
2420 /* SNS command interfaces. */ 2408 /* SNS command interfaces. */
2421 ms_iocb_entry_t *ms_iocb; 2409 ms_iocb_entry_t *ms_iocb;
2422 dma_addr_t ms_iocb_dma; 2410 dma_addr_t ms_iocb_dma;
@@ -2426,28 +2414,20 @@ typedef struct scsi_qla_host {
2426 struct sns_cmd_pkt *sns_cmd; 2414 struct sns_cmd_pkt *sns_cmd;
2427 dma_addr_t sns_cmd_dma; 2415 dma_addr_t sns_cmd_dma;
2428 2416
2429#define SFP_DEV_SIZE 256 2417#define SFP_DEV_SIZE 256
2430#define SFP_BLOCK_SIZE 64 2418#define SFP_BLOCK_SIZE 64
2431 void *sfp_data; 2419 void *sfp_data;
2432 dma_addr_t sfp_data_dma; 2420 dma_addr_t sfp_data_dma;
2433 2421
2434 struct task_struct *dpc_thread; 2422 struct task_struct *dpc_thread;
2435 uint8_t dpc_active; /* DPC routine is active */ 2423 uint8_t dpc_active; /* DPC routine is active */
2436 2424
2437 /* Timeout timers. */
2438 uint8_t loop_down_abort_time; /* port down timer */
2439 atomic_t loop_down_timer; /* loop down timer */
2440 uint8_t link_down_timeout; /* link down timeout */
2441
2442 uint32_t timer_active;
2443 struct timer_list timer;
2444
2445 dma_addr_t gid_list_dma; 2425 dma_addr_t gid_list_dma;
2446 struct gid_list_info *gid_list; 2426 struct gid_list_info *gid_list;
2447 int gid_list_info_size; 2427 int gid_list_info_size;
2448 2428
2449 /* Small DMA pool allocations -- maximum 256 bytes in length. */ 2429 /* Small DMA pool allocations -- maximum 256 bytes in length. */
2450#define DMA_POOL_SIZE 256 2430#define DMA_POOL_SIZE 256
2451 struct dma_pool *s_dma_pool; 2431 struct dma_pool *s_dma_pool;
2452 2432
2453 dma_addr_t init_cb_dma; 2433 dma_addr_t init_cb_dma;
@@ -2459,17 +2439,17 @@ typedef struct scsi_qla_host {
2459 2439
2460 mbx_cmd_t *mcp; 2440 mbx_cmd_t *mcp;
2461 unsigned long mbx_cmd_flags; 2441 unsigned long mbx_cmd_flags;
2462#define MBX_INTERRUPT 1 2442#define MBX_INTERRUPT 1
2463#define MBX_INTR_WAIT 2 2443#define MBX_INTR_WAIT 2
2464#define MBX_UPDATE_FLASH_ACTIVE 3 2444#define MBX_UPDATE_FLASH_ACTIVE 3
2465 2445
2466 struct mutex vport_lock; /* Virtual port synchronization */ 2446 struct mutex vport_lock; /* Virtual port synchronization */
2467 struct completion mbx_cmd_comp; /* Serialize mbx access */ 2447 struct completion mbx_cmd_comp; /* Serialize mbx access */
2468 struct completion mbx_intr_comp; /* Used for completion notification */ 2448 struct completion mbx_intr_comp; /* Used for completion notification */
2469 2449
2470 uint32_t mbx_flags; 2450 uint32_t mbx_flags;
2471#define MBX_IN_PROGRESS BIT_0 2451#define MBX_IN_PROGRESS BIT_0
2472#define MBX_BUSY BIT_1 /* Got the Access */ 2452#define MBX_BUSY BIT_1 /* Got the Access */
2473#define MBX_SLEEPING_ON_SEM BIT_2 2453#define MBX_SLEEPING_ON_SEM BIT_2
2474#define MBX_POLLING_FOR_COMP BIT_3 2454#define MBX_POLLING_FOR_COMP BIT_3
2475#define MBX_COMPLETED BIT_4 2455#define MBX_COMPLETED BIT_4
@@ -2488,7 +2468,7 @@ typedef struct scsi_qla_host {
2488#define RISC_START_ADDRESS_2300 0x800 2468#define RISC_START_ADDRESS_2300 0x800
2489#define RISC_START_ADDRESS_2400 0x100000 2469#define RISC_START_ADDRESS_2400 0x100000
2490 2470
2491 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ 2471 uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
2492 uint8_t fw_seriallink_options[4]; 2472 uint8_t fw_seriallink_options[4];
2493 uint16_t fw_seriallink_options24[4]; 2473 uint16_t fw_seriallink_options24[4];
2494 2474
@@ -2509,10 +2489,10 @@ typedef struct scsi_qla_host {
2509 uint64_t fce_wr, fce_rd; 2489 uint64_t fce_wr, fce_rd;
2510 struct mutex fce_mutex; 2490 struct mutex fce_mutex;
2511 2491
2492 uint32_t hw_event_start;
2512 uint32_t hw_event_ptr; 2493 uint32_t hw_event_ptr;
2513 uint32_t hw_event_pause_errors; 2494 uint32_t hw_event_pause_errors;
2514 2495
2515 uint8_t host_str[16];
2516 uint32_t pci_attr; 2496 uint32_t pci_attr;
2517 uint16_t chip_revision; 2497 uint16_t chip_revision;
2518 2498
@@ -2523,11 +2503,6 @@ typedef struct scsi_qla_host {
2523 char model_desc[80]; 2503 char model_desc[80];
2524 uint8_t adapter_id[16+1]; 2504 uint8_t adapter_id[16+1];
2525 2505
2526 uint8_t *node_name;
2527 uint8_t *port_name;
2528 uint8_t fabric_node_name[WWN_SIZE];
2529 uint32_t isp_abort_cnt;
2530
2531 /* Option ROM information. */ 2506 /* Option ROM information. */
2532 char *optrom_buffer; 2507 char *optrom_buffer;
2533 uint32_t optrom_size; 2508 uint32_t optrom_size;
@@ -2538,13 +2513,13 @@ typedef struct scsi_qla_host {
2538 uint32_t optrom_region_start; 2513 uint32_t optrom_region_start;
2539 uint32_t optrom_region_size; 2514 uint32_t optrom_region_size;
2540 2515
2541 /* PCI expansion ROM image information. */ 2516/* PCI expansion ROM image information. */
2542#define ROM_CODE_TYPE_BIOS 0 2517#define ROM_CODE_TYPE_BIOS 0
2543#define ROM_CODE_TYPE_FCODE 1 2518#define ROM_CODE_TYPE_FCODE 1
2544#define ROM_CODE_TYPE_EFI 3 2519#define ROM_CODE_TYPE_EFI 3
2545 uint8_t bios_revision[2]; 2520 uint8_t bios_revision[2];
2546 uint8_t efi_revision[2]; 2521 uint8_t efi_revision[2];
2547 uint8_t fcode_revision[16]; 2522 uint8_t fcode_revision[16];
2548 uint32_t fw_revision[4]; 2523 uint32_t fw_revision[4];
2549 2524
2550 uint32_t fdt_wrt_disable; 2525 uint32_t fdt_wrt_disable;
@@ -2553,39 +2528,144 @@ typedef struct scsi_qla_host {
2553 uint32_t fdt_unprotect_sec_cmd; 2528 uint32_t fdt_unprotect_sec_cmd;
2554 uint32_t fdt_protect_sec_cmd; 2529 uint32_t fdt_protect_sec_cmd;
2555 2530
2556 uint32_t flt_region_flt; 2531 uint32_t flt_region_flt;
2557 uint32_t flt_region_fdt; 2532 uint32_t flt_region_fdt;
2558 uint32_t flt_region_boot; 2533 uint32_t flt_region_boot;
2559 uint32_t flt_region_fw; 2534 uint32_t flt_region_fw;
2560 uint32_t flt_region_vpd_nvram; 2535 uint32_t flt_region_vpd_nvram;
2561 uint32_t flt_region_hw_event; 2536 uint32_t flt_region_hw_event;
2562 uint32_t flt_region_npiv_conf; 2537 uint32_t flt_region_npiv_conf;
2563 2538
2564 /* Needed for BEACON */ 2539 /* Needed for BEACON */
2565 uint16_t beacon_blink_led; 2540 uint16_t beacon_blink_led;
2566 uint8_t beacon_color_state; 2541 uint8_t beacon_color_state;
2567#define QLA_LED_GRN_ON 0x01 2542#define QLA_LED_GRN_ON 0x01
2568#define QLA_LED_YLW_ON 0x02 2543#define QLA_LED_YLW_ON 0x02
2569#define QLA_LED_ABR_ON 0x04 2544#define QLA_LED_ABR_ON 0x04
2570#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */ 2545#define QLA_LED_ALL_ON 0x07 /* yellow, green, amber. */
2571 /* ISP2322: red, green, amber. */ 2546 /* ISP2322: red, green, amber. */
2572 2547 uint16_t zio_mode;
2573 uint16_t zio_mode; 2548 uint16_t zio_timer;
2574 uint16_t zio_timer;
2575 struct fc_host_statistics fc_host_stat; 2549 struct fc_host_statistics fc_host_stat;
2576 2550
2577 struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES]; 2551 struct qla_msix_entry *msix_entries;
2552
2553 struct list_head vp_list; /* list of VP */
2554 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
2555 sizeof(unsigned long)];
2556 uint16_t num_vhosts; /* number of vports created */
2557 uint16_t num_vsans; /* number of vsan created */
2558 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */
2559 int cur_vport_count;
2560
2561 struct qla_chip_state_84xx *cs84xx;
2562 struct qla_statistics qla_stats;
2563 struct isp_operations *isp_ops;
2564};
2565
2566/*
2567 * Qlogic scsi host structure
2568 */
2569typedef struct scsi_qla_host {
2570 struct list_head list;
2571 struct list_head vp_fcports; /* list of fcports */
2572 struct list_head work_list;
2573 /* Commonly used flags and state information. */
2574 struct Scsi_Host *host;
2575 unsigned long host_no;
2576 uint8_t host_str[16];
2577
2578 volatile struct {
2579 uint32_t init_done :1;
2580 uint32_t online :1;
2581 uint32_t rscn_queue_overflow :1;
2582 uint32_t reset_active :1;
2583
2584 uint32_t management_server_logged_in :1;
2585 uint32_t process_response_queue :1;
2586 } flags;
2587
2588 atomic_t loop_state;
2589#define LOOP_TIMEOUT 1
2590#define LOOP_DOWN 2
2591#define LOOP_UP 3
2592#define LOOP_UPDATE 4
2593#define LOOP_READY 5
2594#define LOOP_DEAD 6
2595
2596 unsigned long dpc_flags;
2597#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
2598#define RESET_ACTIVE 1
2599#define ISP_ABORT_NEEDED 2 /* Initiate ISP abort. */
2600#define ABORT_ISP_ACTIVE 3 /* ISP abort in progress. */
2601#define LOOP_RESYNC_NEEDED 4 /* Device Resync needed. */
2602#define LOOP_RESYNC_ACTIVE 5
2603#define LOCAL_LOOP_UPDATE 6 /* Perform a local loop update. */
2604#define RSCN_UPDATE 7 /* Perform an RSCN update. */
2605#define MAILBOX_RETRY 8
2606#define ISP_RESET_NEEDED 9 /* Initiate a ISP reset. */
2607#define FAILOVER_EVENT_NEEDED 10
2608#define FAILOVER_EVENT 11
2609#define FAILOVER_NEEDED 12
2610#define SCSI_RESTART_NEEDED 13 /* Processes SCSI retry queue. */
2611#define PORT_RESTART_NEEDED 14 /* Processes Retry queue. */
2612#define RESTART_QUEUES_NEEDED 15 /* Restarts the Lun queue. */
2613#define ABORT_QUEUES_NEEDED 16
2614#define RELOGIN_NEEDED 17
2615#define LOGIN_RETRY_NEEDED 18 /* Initiate required fabric logins. */
2616#define REGISTER_FC4_NEEDED 19 /* SNS FC4 registration required. */
2617#define ISP_ABORT_RETRY 20 /* ISP aborted. */
2618#define FCPORT_RESCAN_NEEDED 21 /* IO descriptor processing needed */
2619#define IODESC_PROCESS_NEEDED 22 /* IO descriptor processing needed */
2620#define IOCTL_ERROR_RECOVERY 23
2621#define LOOP_RESET_NEEDED 24
2622#define BEACON_BLINK_NEEDED 25
2623#define REGISTER_FDMI_NEEDED 26
2624#define FCPORT_UPDATE_NEEDED 27
2625#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2626#define UNLOADING 29
2627#define NPIV_CONFIG_NEEDED 30
2628
2629 uint32_t device_flags;
2630#define DFLG_LOCAL_DEVICES BIT_0
2631#define DFLG_RETRY_LOCAL_DEVICES BIT_1
2632#define DFLG_FABRIC_DEVICES BIT_2
2633#define SWITCH_FOUND BIT_3
2634#define DFLG_NO_CABLE BIT_4
2635
2636 srb_t *status_srb; /* Status continuation entry. */
2637
2638 /* ISP configuration data. */
2639 uint16_t loop_id; /* Host adapter loop id */
2640
2641 port_id_t d_id; /* Host adapter port id */
2642 uint8_t marker_needed;
2643 uint16_t mgmt_svr_loop_id;
2644
2645
2646
2647 /* RSCN queue. */
2648 uint32_t rscn_queue[MAX_RSCN_COUNT];
2649 uint8_t rscn_in_ptr;
2650 uint8_t rscn_out_ptr;
2651
2652 /* Timeout timers. */
2653 uint8_t loop_down_abort_time; /* port down timer */
2654 atomic_t loop_down_timer; /* loop down timer */
2655 uint8_t link_down_timeout; /* link down timeout */
2656
2657 uint32_t timer_active;
2658 struct timer_list timer;
2659
2660 uint8_t node_name[WWN_SIZE];
2661 uint8_t port_name[WWN_SIZE];
2662 uint8_t fabric_node_name[WWN_SIZE];
2663 uint32_t vp_abort_cnt;
2578 2664
2579 struct list_head vp_list; /* list of VP */
2580 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 2665 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
2581 unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) / sizeof(unsigned long)];
2582 uint16_t num_vhosts; /* number of vports created */
2583 uint16_t num_vsans; /* number of vsan created */
2584 uint16_t vp_idx; /* vport ID */ 2666 uint16_t vp_idx; /* vport ID */
2585 2667
2586 struct scsi_qla_host *parent; /* holds pport */
2587 unsigned long vp_flags; 2668 unsigned long vp_flags;
2588 struct list_head vp_fcports; /* list of fcports */
2589#define VP_IDX_ACQUIRED 0 /* bit no 0 */ 2669#define VP_IDX_ACQUIRED 0 /* bit no 0 */
2590#define VP_CREATE_NEEDED 1 2670#define VP_CREATE_NEEDED 1
2591#define VP_BIND_NEEDED 2 2671#define VP_BIND_NEEDED 2
@@ -2604,14 +2684,10 @@ typedef struct scsi_qla_host {
2604#define VP_ERR_FAB_NORESOURCES 3 2684#define VP_ERR_FAB_NORESOURCES 3
2605#define VP_ERR_FAB_LOGOUT 4 2685#define VP_ERR_FAB_LOGOUT 4
2606#define VP_ERR_ADAP_NORESOURCES 5 2686#define VP_ERR_ADAP_NORESOURCES 5
2607 uint16_t max_npiv_vports; /* 63 or 125 per topoloty */ 2687 struct qla_hw_data *hw;
2608 int cur_vport_count; 2688 int req_ques[QLA_MAX_HOST_QUES];
2609
2610 struct qla_chip_state_84xx *cs84xx;
2611 struct qla_statistics qla_stats;
2612} scsi_qla_host_t; 2689} scsi_qla_host_t;
2613 2690
2614
2615/* 2691/*
2616 * Macros to help code, maintain, etc. 2692 * Macros to help code, maintain, etc.
2617 */ 2693 */
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 561a4411719d..0e366a1b44b3 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -15,10 +15,11 @@ static atomic_t qla2x00_dfs_root_count;
15static int 15static int
16qla2x00_dfs_fce_show(struct seq_file *s, void *unused) 16qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
17{ 17{
18 scsi_qla_host_t *ha = s->private; 18 scsi_qla_host_t *vha = s->private;
19 uint32_t cnt; 19 uint32_t cnt;
20 uint32_t *fce; 20 uint32_t *fce;
21 uint64_t fce_start; 21 uint64_t fce_start;
22 struct qla_hw_data *ha = vha->hw;
22 23
23 mutex_lock(&ha->fce_mutex); 24 mutex_lock(&ha->fce_mutex);
24 25
@@ -51,7 +52,8 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
51static int 52static int
52qla2x00_dfs_fce_open(struct inode *inode, struct file *file) 53qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
53{ 54{
54 scsi_qla_host_t *ha = inode->i_private; 55 scsi_qla_host_t *vha = inode->i_private;
56 struct qla_hw_data *ha = vha->hw;
55 int rval; 57 int rval;
56 58
57 if (!ha->flags.fce_enabled) 59 if (!ha->flags.fce_enabled)
@@ -60,7 +62,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
60 mutex_lock(&ha->fce_mutex); 62 mutex_lock(&ha->fce_mutex);
61 63
62 /* Pause tracing to flush FCE buffers. */ 64 /* Pause tracing to flush FCE buffers. */
63 rval = qla2x00_disable_fce_trace(ha, &ha->fce_wr, &ha->fce_rd); 65 rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
64 if (rval) 66 if (rval)
65 qla_printk(KERN_WARNING, ha, 67 qla_printk(KERN_WARNING, ha,
66 "DebugFS: Unable to disable FCE (%d).\n", rval); 68 "DebugFS: Unable to disable FCE (%d).\n", rval);
@@ -75,7 +77,8 @@ out:
75static int 77static int
76qla2x00_dfs_fce_release(struct inode *inode, struct file *file) 78qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
77{ 79{
78 scsi_qla_host_t *ha = inode->i_private; 80 scsi_qla_host_t *vha = inode->i_private;
81 struct qla_hw_data *ha = vha->hw;
79 int rval; 82 int rval;
80 83
81 if (ha->flags.fce_enabled) 84 if (ha->flags.fce_enabled)
@@ -86,7 +89,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
86 /* Re-enable FCE tracing. */ 89 /* Re-enable FCE tracing. */
87 ha->flags.fce_enabled = 1; 90 ha->flags.fce_enabled = 1;
88 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs)); 91 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
89 rval = qla2x00_enable_fce_trace(ha, ha->fce_dma, ha->fce_bufs, 92 rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
90 ha->fce_mb, &ha->fce_bufs); 93 ha->fce_mb, &ha->fce_bufs);
91 if (rval) { 94 if (rval) {
92 qla_printk(KERN_WARNING, ha, 95 qla_printk(KERN_WARNING, ha,
@@ -107,8 +110,9 @@ static const struct file_operations dfs_fce_ops = {
107}; 110};
108 111
109int 112int
110qla2x00_dfs_setup(scsi_qla_host_t *ha) 113qla2x00_dfs_setup(scsi_qla_host_t *vha)
111{ 114{
115 struct qla_hw_data *ha = vha->hw;
112 if (!IS_QLA25XX(ha)) 116 if (!IS_QLA25XX(ha))
113 goto out; 117 goto out;
114 if (!ha->fce) 118 if (!ha->fce)
@@ -130,7 +134,7 @@ create_dir:
130 goto create_nodes; 134 goto create_nodes;
131 135
132 mutex_init(&ha->fce_mutex); 136 mutex_init(&ha->fce_mutex);
133 ha->dfs_dir = debugfs_create_dir(ha->host_str, qla2x00_dfs_root); 137 ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
134 if (!ha->dfs_dir) { 138 if (!ha->dfs_dir) {
135 qla_printk(KERN_NOTICE, ha, 139 qla_printk(KERN_NOTICE, ha,
136 "DebugFS: Unable to create ha directory.\n"); 140 "DebugFS: Unable to create ha directory.\n");
@@ -152,8 +156,9 @@ out:
152} 156}
153 157
154int 158int
155qla2x00_dfs_remove(scsi_qla_host_t *ha) 159qla2x00_dfs_remove(scsi_qla_host_t *vha)
156{ 160{
161 struct qla_hw_data *ha = vha->hw;
157 if (ha->dfs_fce) { 162 if (ha->dfs_fce) {
158 debugfs_remove(ha->dfs_fce); 163 debugfs_remove(ha->dfs_fce);
159 ha->dfs_fce = NULL; 164 ha->dfs_fce = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d1d14202575a..ee1f1e794c2d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -299,7 +299,8 @@ struct init_cb_24xx {
299 uint32_t response_q_address[2]; 299 uint32_t response_q_address[2];
300 uint32_t prio_request_q_address[2]; 300 uint32_t prio_request_q_address[2];
301 301
302 uint8_t reserved_2[8]; 302 uint16_t msix;
303 uint8_t reserved_2[6];
303 304
304 uint16_t atio_q_inpointer; 305 uint16_t atio_q_inpointer;
305 uint16_t atio_q_length; 306 uint16_t atio_q_length;
@@ -372,8 +373,9 @@ struct init_cb_24xx {
372 * BIT 17-31 = Reserved 373 * BIT 17-31 = Reserved
373 */ 374 */
374 uint32_t firmware_options_3; 375 uint32_t firmware_options_3;
375 376 uint16_t qos;
376 uint8_t reserved_3[24]; 377 uint16_t rid;
378 uint8_t reserved_3[20];
377}; 379};
378 380
379/* 381/*
@@ -754,7 +756,8 @@ struct abort_entry_24xx {
754 756
755 uint32_t handle_to_abort; /* System handle to abort. */ 757 uint32_t handle_to_abort; /* System handle to abort. */
756 758
757 uint8_t reserved_1[32]; 759 uint16_t req_que_no;
760 uint8_t reserved_1[30];
758 761
759 uint8_t port_id[3]; /* PortID of destination port. */ 762 uint8_t port_id[3]; /* PortID of destination port. */
760 uint8_t vp_index; 763 uint8_t vp_index;
@@ -1258,7 +1261,8 @@ struct qla_npiv_header {
1258struct qla_npiv_entry { 1261struct qla_npiv_entry {
1259 uint16_t flags; 1262 uint16_t flags;
1260 uint16_t vf_id; 1263 uint16_t vf_id;
1261 uint16_t qos; 1264 uint8_t q_qos;
1265 uint8_t f_qos;
1262 uint16_t unused1; 1266 uint16_t unused1;
1263 uint8_t port_name[WWN_SIZE]; 1267 uint8_t port_name[WWN_SIZE];
1264 uint8_t node_name[WWN_SIZE]; 1268 uint8_t node_name[WWN_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 753dbe6cce6e..0011e31205db 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -63,6 +63,7 @@ extern int ql2xallocfwdump;
63extern int ql2xextended_error_logging; 63extern int ql2xextended_error_logging;
64extern int ql2xqfullrampup; 64extern int ql2xqfullrampup;
65extern int ql2xiidmaenable; 65extern int ql2xiidmaenable;
66extern int ql2xmaxqueues;
66 67
67extern int qla2x00_loop_reset(scsi_qla_host_t *); 68extern int qla2x00_loop_reset(scsi_qla_host_t *);
68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 69extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -72,7 +73,10 @@ extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
72 uint16_t, uint16_t); 73 uint16_t, uint16_t);
73 74
74extern void qla2x00_abort_fcport_cmds(fc_port_t *); 75extern void qla2x00_abort_fcport_cmds(fc_port_t *);
75 76extern struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *,
77 struct qla_hw_data *);
78extern void qla2x00_free_host(struct scsi_qla_host *);
79extern void qla2x00_relogin(struct scsi_qla_host *);
76/* 80/*
77 * Global Functions in qla_mid.c source file. 81 * Global Functions in qla_mid.c source file.
78 */ 82 */
@@ -94,7 +98,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
94extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 98extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
95extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); 99extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
96 100
97extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 101extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *);
98 102
99extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 103extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
100 104
@@ -105,10 +109,11 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
105 109
106extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 110extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
107 111
108extern void qla2xxx_wake_dpc(scsi_qla_host_t *); 112extern void qla2xxx_wake_dpc(struct scsi_qla_host *);
109extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *); 113extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
110extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *); 114extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
111extern void qla2x00_vp_abort_isp(scsi_qla_host_t *); 115 uint16_t *);
116extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
112 117
113/* 118/*
114 * Global Function Prototypes in qla_iocb.c source file. 119 * Global Function Prototypes in qla_iocb.c source file.
@@ -119,8 +124,10 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
119extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); 124extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
120extern int qla2x00_start_scsi(srb_t *sp); 125extern int qla2x00_start_scsi(srb_t *sp);
121extern int qla24xx_start_scsi(srb_t *sp); 126extern int qla24xx_start_scsi(srb_t *sp);
122int qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); 127int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
123int __qla2x00_marker(scsi_qla_host_t *, uint16_t, uint16_t, uint8_t); 128 uint16_t, uint16_t, uint8_t);
129int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
130 uint16_t, uint16_t, uint8_t);
124 131
125/* 132/*
126 * Global Function Prototypes in qla_mbx.c source file. 133 * Global Function Prototypes in qla_mbx.c source file.
@@ -154,7 +161,7 @@ extern int
154qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); 161qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
155 162
156extern int 163extern int
157qla2x00_abort_command(scsi_qla_host_t *, srb_t *); 164qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
158 165
159extern int 166extern int
160qla2x00_abort_target(struct fc_port *, unsigned int); 167qla2x00_abort_target(struct fc_port *, unsigned int);
@@ -225,7 +232,7 @@ extern int
225qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, 232qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
226 dma_addr_t); 233 dma_addr_t);
227 234
228extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *); 235extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
229extern int qla24xx_abort_target(struct fc_port *, unsigned int); 236extern int qla24xx_abort_target(struct fc_port *, unsigned int);
230extern int qla24xx_lun_reset(struct fc_port *, unsigned int); 237extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
231 238
@@ -264,10 +271,10 @@ extern int qla84xx_verify_chip(struct scsi_qla_host *, uint16_t *);
264extern irqreturn_t qla2100_intr_handler(int, void *); 271extern irqreturn_t qla2100_intr_handler(int, void *);
265extern irqreturn_t qla2300_intr_handler(int, void *); 272extern irqreturn_t qla2300_intr_handler(int, void *);
266extern irqreturn_t qla24xx_intr_handler(int, void *); 273extern irqreturn_t qla24xx_intr_handler(int, void *);
267extern void qla2x00_process_response_queue(struct scsi_qla_host *); 274extern void qla2x00_process_response_queue(struct rsp_que *);
268extern void qla24xx_process_response_queue(struct scsi_qla_host *); 275extern void qla24xx_process_response_queue(struct rsp_que *);
269 276
270extern int qla2x00_request_irqs(scsi_qla_host_t *); 277extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
271extern void qla2x00_free_irqs(scsi_qla_host_t *); 278extern void qla2x00_free_irqs(scsi_qla_host_t *);
272 279
273/* 280/*
@@ -367,4 +374,27 @@ extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
367 */ 374 */
368extern int qla2x00_dfs_setup(scsi_qla_host_t *); 375extern int qla2x00_dfs_setup(scsi_qla_host_t *);
369extern int qla2x00_dfs_remove(scsi_qla_host_t *); 376extern int qla2x00_dfs_remove(scsi_qla_host_t *);
377
378/* Globa function prototypes for multi-q */
379extern int qla25xx_request_irq(struct rsp_que *);
380extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *,
381 uint8_t);
382extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *,
383 uint8_t);
384extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
385 uint16_t, uint8_t, uint8_t);
386extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
387 uint16_t);
388extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
389extern void qla2x00_init_response_q_entries(struct rsp_que *);
390extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
391extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
392extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
393extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
394extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
395extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
396extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
397extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
398extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
399extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
370#endif /* _QLA_GBL_H */ 400#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index c2a4bfbcb05b..0a6f72973996 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -22,8 +22,9 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
22 * Returns a pointer to the @ha's ms_iocb. 22 * Returns a pointer to the @ha's ms_iocb.
23 */ 23 */
24void * 24void *
25qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) 25qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
26{ 26{
27 struct qla_hw_data *ha = vha->hw;
27 ms_iocb_entry_t *ms_pkt; 28 ms_iocb_entry_t *ms_pkt;
28 29
29 ms_pkt = ha->ms_iocb; 30 ms_pkt = ha->ms_iocb;
@@ -59,8 +60,9 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
59 * Returns a pointer to the @ha's ms_iocb. 60 * Returns a pointer to the @ha's ms_iocb.
60 */ 61 */
61void * 62void *
62qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) 63qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
63{ 64{
65 struct qla_hw_data *ha = vha->hw;
64 struct ct_entry_24xx *ct_pkt; 66 struct ct_entry_24xx *ct_pkt;
65 67
66 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 68 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
@@ -82,7 +84,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
82 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 84 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
83 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 85 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
84 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 86 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
85 ct_pkt->vp_index = ha->vp_idx; 87 ct_pkt->vp_index = vha->vp_idx;
86 88
87 return (ct_pkt); 89 return (ct_pkt);
88} 90}
@@ -110,16 +112,17 @@ qla2x00_prep_ct_req(struct ct_sns_req *ct_req, uint16_t cmd, uint16_t rsp_size)
110} 112}
111 113
112static int 114static int
113qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt, 115qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
114 struct ct_sns_rsp *ct_rsp, const char *routine) 116 struct ct_sns_rsp *ct_rsp, const char *routine)
115{ 117{
116 int rval; 118 int rval;
117 uint16_t comp_status; 119 uint16_t comp_status;
120 struct qla_hw_data *ha = vha->hw;
118 121
119 rval = QLA_FUNCTION_FAILED; 122 rval = QLA_FUNCTION_FAILED;
120 if (ms_pkt->entry_status != 0) { 123 if (ms_pkt->entry_status != 0) {
121 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", 124 DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
122 ha->host_no, routine, ms_pkt->entry_status)); 125 vha->host_no, routine, ms_pkt->entry_status));
123 } else { 126 } else {
124 if (IS_FWI2_CAPABLE(ha)) 127 if (IS_FWI2_CAPABLE(ha))
125 comp_status = le16_to_cpu( 128 comp_status = le16_to_cpu(
@@ -133,7 +136,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
133 if (ct_rsp->header.response != 136 if (ct_rsp->header.response !=
134 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 137 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
135 DEBUG2_3(printk("scsi(%ld): %s failed, " 138 DEBUG2_3(printk("scsi(%ld): %s failed, "
136 "rejected request:\n", ha->host_no, 139 "rejected request:\n", vha->host_no,
137 routine)); 140 routine));
138 DEBUG2_3(qla2x00_dump_buffer( 141 DEBUG2_3(qla2x00_dump_buffer(
139 (uint8_t *)&ct_rsp->header, 142 (uint8_t *)&ct_rsp->header,
@@ -144,7 +147,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
144 break; 147 break;
145 default: 148 default:
146 DEBUG2_3(printk("scsi(%ld): %s failed, completion " 149 DEBUG2_3(printk("scsi(%ld): %s failed, completion "
147 "status (%x).\n", ha->host_no, routine, 150 "status (%x).\n", vha->host_no, routine,
148 comp_status)); 151 comp_status));
149 break; 152 break;
150 } 153 }
@@ -160,21 +163,21 @@ qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
160 * Returns 0 on success. 163 * Returns 0 on success.
161 */ 164 */
162int 165int
163qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) 166qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
164{ 167{
165 int rval; 168 int rval;
166 169
167 ms_iocb_entry_t *ms_pkt; 170 ms_iocb_entry_t *ms_pkt;
168 struct ct_sns_req *ct_req; 171 struct ct_sns_req *ct_req;
169 struct ct_sns_rsp *ct_rsp; 172 struct ct_sns_rsp *ct_rsp;
173 struct qla_hw_data *ha = vha->hw;
170 174
171 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 175 if (IS_QLA2100(ha) || IS_QLA2200(ha))
172 return (qla2x00_sns_ga_nxt(ha, fcport)); 176 return qla2x00_sns_ga_nxt(vha, fcport);
173 }
174 177
175 /* Issue GA_NXT */ 178 /* Issue GA_NXT */
176 /* Prepare common MS IOCB */ 179 /* Prepare common MS IOCB */
177 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GA_NXT_REQ_SIZE, 180 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
178 GA_NXT_RSP_SIZE); 181 GA_NXT_RSP_SIZE);
179 182
180 /* Prepare CT request */ 183 /* Prepare CT request */
@@ -188,13 +191,13 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
188 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; 191 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
189 192
190 /* Execute MS IOCB */ 193 /* Execute MS IOCB */
191 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 194 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
192 sizeof(ms_iocb_entry_t)); 195 sizeof(ms_iocb_entry_t));
193 if (rval != QLA_SUCCESS) { 196 if (rval != QLA_SUCCESS) {
194 /*EMPTY*/ 197 /*EMPTY*/
195 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", 198 DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
196 ha->host_no, rval)); 199 vha->host_no, rval));
197 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GA_NXT") != 200 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
198 QLA_SUCCESS) { 201 QLA_SUCCESS) {
199 rval = QLA_FUNCTION_FAILED; 202 rval = QLA_FUNCTION_FAILED;
200 } else { 203 } else {
@@ -216,7 +219,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
216 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 219 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
217 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 220 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
218 "portid=%02x%02x%02x.\n", 221 "portid=%02x%02x%02x.\n",
219 ha->host_no, 222 vha->host_no,
220 fcport->node_name[0], fcport->node_name[1], 223 fcport->node_name[0], fcport->node_name[1],
221 fcport->node_name[2], fcport->node_name[3], 224 fcport->node_name[2], fcport->node_name[3],
222 fcport->node_name[4], fcport->node_name[5], 225 fcport->node_name[4], fcport->node_name[5],
@@ -242,7 +245,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
242 * Returns 0 on success. 245 * Returns 0 on success.
243 */ 246 */
244int 247int
245qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) 248qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
246{ 249{
247 int rval; 250 int rval;
248 uint16_t i; 251 uint16_t i;
@@ -252,16 +255,16 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
252 struct ct_sns_rsp *ct_rsp; 255 struct ct_sns_rsp *ct_rsp;
253 256
254 struct ct_sns_gid_pt_data *gid_data; 257 struct ct_sns_gid_pt_data *gid_data;
258 struct qla_hw_data *ha = vha->hw;
255 259
256 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 260 if (IS_QLA2100(ha) || IS_QLA2200(ha))
257 return (qla2x00_sns_gid_pt(ha, list)); 261 return qla2x00_sns_gid_pt(vha, list);
258 }
259 262
260 gid_data = NULL; 263 gid_data = NULL;
261 264
262 /* Issue GID_PT */ 265 /* Issue GID_PT */
263 /* Prepare common MS IOCB */ 266 /* Prepare common MS IOCB */
264 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GID_PT_REQ_SIZE, 267 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
265 GID_PT_RSP_SIZE); 268 GID_PT_RSP_SIZE);
266 269
267 /* Prepare CT request */ 270 /* Prepare CT request */
@@ -273,13 +276,13 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
273 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; 276 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
274 277
275 /* Execute MS IOCB */ 278 /* Execute MS IOCB */
276 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 279 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
277 sizeof(ms_iocb_entry_t)); 280 sizeof(ms_iocb_entry_t));
278 if (rval != QLA_SUCCESS) { 281 if (rval != QLA_SUCCESS) {
279 /*EMPTY*/ 282 /*EMPTY*/
280 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", 283 DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
281 ha->host_no, rval)); 284 vha->host_no, rval));
282 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GID_PT") != 285 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
283 QLA_SUCCESS) { 286 QLA_SUCCESS) {
284 rval = QLA_FUNCTION_FAILED; 287 rval = QLA_FUNCTION_FAILED;
285 } else { 288 } else {
@@ -320,7 +323,7 @@ qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
320 * Returns 0 on success. 323 * Returns 0 on success.
321 */ 324 */
322int 325int
323qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) 326qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
324{ 327{
325 int rval; 328 int rval;
326 uint16_t i; 329 uint16_t i;
@@ -328,15 +331,15 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
328 ms_iocb_entry_t *ms_pkt; 331 ms_iocb_entry_t *ms_pkt;
329 struct ct_sns_req *ct_req; 332 struct ct_sns_req *ct_req;
330 struct ct_sns_rsp *ct_rsp; 333 struct ct_sns_rsp *ct_rsp;
334 struct qla_hw_data *ha = vha->hw;
331 335
332 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 336 if (IS_QLA2100(ha) || IS_QLA2200(ha))
333 return (qla2x00_sns_gpn_id(ha, list)); 337 return qla2x00_sns_gpn_id(vha, list);
334 }
335 338
336 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 339 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
337 /* Issue GPN_ID */ 340 /* Issue GPN_ID */
338 /* Prepare common MS IOCB */ 341 /* Prepare common MS IOCB */
339 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GPN_ID_REQ_SIZE, 342 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
340 GPN_ID_RSP_SIZE); 343 GPN_ID_RSP_SIZE);
341 344
342 /* Prepare CT request */ 345 /* Prepare CT request */
@@ -350,13 +353,13 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
350 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 353 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
351 354
352 /* Execute MS IOCB */ 355 /* Execute MS IOCB */
353 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 356 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
354 sizeof(ms_iocb_entry_t)); 357 sizeof(ms_iocb_entry_t));
355 if (rval != QLA_SUCCESS) { 358 if (rval != QLA_SUCCESS) {
356 /*EMPTY*/ 359 /*EMPTY*/
357 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " 360 DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
358 "(%d).\n", ha->host_no, rval)); 361 "(%d).\n", vha->host_no, rval));
359 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 362 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
360 "GPN_ID") != QLA_SUCCESS) { 363 "GPN_ID") != QLA_SUCCESS) {
361 rval = QLA_FUNCTION_FAILED; 364 rval = QLA_FUNCTION_FAILED;
362 } else { 365 } else {
@@ -381,23 +384,22 @@ qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
381 * Returns 0 on success. 384 * Returns 0 on success.
382 */ 385 */
383int 386int
384qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list) 387qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
385{ 388{
386 int rval; 389 int rval;
387 uint16_t i; 390 uint16_t i;
388 391 struct qla_hw_data *ha = vha->hw;
389 ms_iocb_entry_t *ms_pkt; 392 ms_iocb_entry_t *ms_pkt;
390 struct ct_sns_req *ct_req; 393 struct ct_sns_req *ct_req;
391 struct ct_sns_rsp *ct_rsp; 394 struct ct_sns_rsp *ct_rsp;
392 395
393 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 396 if (IS_QLA2100(ha) || IS_QLA2200(ha))
394 return (qla2x00_sns_gnn_id(ha, list)); 397 return qla2x00_sns_gnn_id(vha, list);
395 }
396 398
397 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 399 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
398 /* Issue GNN_ID */ 400 /* Issue GNN_ID */
399 /* Prepare common MS IOCB */ 401 /* Prepare common MS IOCB */
400 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GNN_ID_REQ_SIZE, 402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
401 GNN_ID_RSP_SIZE); 403 GNN_ID_RSP_SIZE);
402 404
403 /* Prepare CT request */ 405 /* Prepare CT request */
@@ -411,13 +413,13 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
411 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 413 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
412 414
413 /* Execute MS IOCB */ 415 /* Execute MS IOCB */
414 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 416 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
415 sizeof(ms_iocb_entry_t)); 417 sizeof(ms_iocb_entry_t));
416 if (rval != QLA_SUCCESS) { 418 if (rval != QLA_SUCCESS) {
417 /*EMPTY*/ 419 /*EMPTY*/
418 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed " 420 DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
419 "(%d).\n", ha->host_no, rval)); 421 "(%d).\n", vha->host_no, rval));
420 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
421 "GNN_ID") != QLA_SUCCESS) { 423 "GNN_ID") != QLA_SUCCESS) {
422 rval = QLA_FUNCTION_FAILED; 424 rval = QLA_FUNCTION_FAILED;
423 } else { 425 } else {
@@ -429,7 +431,7 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
429 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 431 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
430 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 432 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
431 "portid=%02x%02x%02x.\n", 433 "portid=%02x%02x%02x.\n",
432 ha->host_no, 434 vha->host_no,
433 list[i].node_name[0], list[i].node_name[1], 435 list[i].node_name[0], list[i].node_name[1],
434 list[i].node_name[2], list[i].node_name[3], 436 list[i].node_name[2], list[i].node_name[3],
435 list[i].node_name[4], list[i].node_name[5], 437 list[i].node_name[4], list[i].node_name[5],
@@ -457,21 +459,20 @@ qla2x00_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
457 * Returns 0 on success. 459 * Returns 0 on success.
458 */ 460 */
459int 461int
460qla2x00_rft_id(scsi_qla_host_t *ha) 462qla2x00_rft_id(scsi_qla_host_t *vha)
461{ 463{
462 int rval; 464 int rval;
463 465 struct qla_hw_data *ha = vha->hw;
464 ms_iocb_entry_t *ms_pkt; 466 ms_iocb_entry_t *ms_pkt;
465 struct ct_sns_req *ct_req; 467 struct ct_sns_req *ct_req;
466 struct ct_sns_rsp *ct_rsp; 468 struct ct_sns_rsp *ct_rsp;
467 469
468 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 470 if (IS_QLA2100(ha) || IS_QLA2200(ha))
469 return (qla2x00_sns_rft_id(ha)); 471 return qla2x00_sns_rft_id(vha);
470 }
471 472
472 /* Issue RFT_ID */ 473 /* Issue RFT_ID */
473 /* Prepare common MS IOCB */ 474 /* Prepare common MS IOCB */
474 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFT_ID_REQ_SIZE, 475 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
475 RFT_ID_RSP_SIZE); 476 RFT_ID_RSP_SIZE);
476 477
477 /* Prepare CT request */ 478 /* Prepare CT request */
@@ -480,25 +481,25 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
480 ct_rsp = &ha->ct_sns->p.rsp; 481 ct_rsp = &ha->ct_sns->p.rsp;
481 482
482 /* Prepare CT arguments -- port_id, FC-4 types */ 483 /* Prepare CT arguments -- port_id, FC-4 types */
483 ct_req->req.rft_id.port_id[0] = ha->d_id.b.domain; 484 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
484 ct_req->req.rft_id.port_id[1] = ha->d_id.b.area; 485 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
485 ct_req->req.rft_id.port_id[2] = ha->d_id.b.al_pa; 486 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
486 487
487 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ 488 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
488 489
489 /* Execute MS IOCB */ 490 /* Execute MS IOCB */
490 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 491 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
491 sizeof(ms_iocb_entry_t)); 492 sizeof(ms_iocb_entry_t));
492 if (rval != QLA_SUCCESS) { 493 if (rval != QLA_SUCCESS) {
493 /*EMPTY*/ 494 /*EMPTY*/
494 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n", 495 DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
495 ha->host_no, rval)); 496 vha->host_no, rval));
496 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFT_ID") != 497 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
497 QLA_SUCCESS) { 498 QLA_SUCCESS) {
498 rval = QLA_FUNCTION_FAILED; 499 rval = QLA_FUNCTION_FAILED;
499 } else { 500 } else {
500 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 501 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
501 ha->host_no)); 502 vha->host_no));
502 } 503 }
503 504
504 return (rval); 505 return (rval);
@@ -511,23 +512,23 @@ qla2x00_rft_id(scsi_qla_host_t *ha)
511 * Returns 0 on success. 512 * Returns 0 on success.
512 */ 513 */
513int 514int
514qla2x00_rff_id(scsi_qla_host_t *ha) 515qla2x00_rff_id(scsi_qla_host_t *vha)
515{ 516{
516 int rval; 517 int rval;
517 518 struct qla_hw_data *ha = vha->hw;
518 ms_iocb_entry_t *ms_pkt; 519 ms_iocb_entry_t *ms_pkt;
519 struct ct_sns_req *ct_req; 520 struct ct_sns_req *ct_req;
520 struct ct_sns_rsp *ct_rsp; 521 struct ct_sns_rsp *ct_rsp;
521 522
522 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 523 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
523 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " 524 DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
524 "ISP2100/ISP2200.\n", ha->host_no)); 525 "ISP2100/ISP2200.\n", vha->host_no));
525 return (QLA_SUCCESS); 526 return (QLA_SUCCESS);
526 } 527 }
527 528
528 /* Issue RFF_ID */ 529 /* Issue RFF_ID */
529 /* Prepare common MS IOCB */ 530 /* Prepare common MS IOCB */
530 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RFF_ID_REQ_SIZE, 531 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
531 RFF_ID_RSP_SIZE); 532 RFF_ID_RSP_SIZE);
532 533
533 /* Prepare CT request */ 534 /* Prepare CT request */
@@ -536,26 +537,26 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
536 ct_rsp = &ha->ct_sns->p.rsp; 537 ct_rsp = &ha->ct_sns->p.rsp;
537 538
538 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ 539 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
539 ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain; 540 ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
540 ct_req->req.rff_id.port_id[1] = ha->d_id.b.area; 541 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
541 ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa; 542 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
542 543
543 ct_req->req.rff_id.fc4_feature = BIT_1; 544 ct_req->req.rff_id.fc4_feature = BIT_1;
544 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ 545 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
545 546
546 /* Execute MS IOCB */ 547 /* Execute MS IOCB */
547 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 548 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
548 sizeof(ms_iocb_entry_t)); 549 sizeof(ms_iocb_entry_t));
549 if (rval != QLA_SUCCESS) { 550 if (rval != QLA_SUCCESS) {
550 /*EMPTY*/ 551 /*EMPTY*/
551 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", 552 DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
552 ha->host_no, rval)); 553 vha->host_no, rval));
553 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFF_ID") != 554 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
554 QLA_SUCCESS) { 555 QLA_SUCCESS) {
555 rval = QLA_FUNCTION_FAILED; 556 rval = QLA_FUNCTION_FAILED;
556 } else { 557 } else {
557 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", 558 DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
558 ha->host_no)); 559 vha->host_no));
559 } 560 }
560 561
561 return (rval); 562 return (rval);
@@ -568,21 +569,20 @@ qla2x00_rff_id(scsi_qla_host_t *ha)
568 * Returns 0 on success. 569 * Returns 0 on success.
569 */ 570 */
570int 571int
571qla2x00_rnn_id(scsi_qla_host_t *ha) 572qla2x00_rnn_id(scsi_qla_host_t *vha)
572{ 573{
573 int rval; 574 int rval;
574 575 struct qla_hw_data *ha = vha->hw;
575 ms_iocb_entry_t *ms_pkt; 576 ms_iocb_entry_t *ms_pkt;
576 struct ct_sns_req *ct_req; 577 struct ct_sns_req *ct_req;
577 struct ct_sns_rsp *ct_rsp; 578 struct ct_sns_rsp *ct_rsp;
578 579
579 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 580 if (IS_QLA2100(ha) || IS_QLA2200(ha))
580 return (qla2x00_sns_rnn_id(ha)); 581 return qla2x00_sns_rnn_id(vha);
581 }
582 582
583 /* Issue RNN_ID */ 583 /* Issue RNN_ID */
584 /* Prepare common MS IOCB */ 584 /* Prepare common MS IOCB */
585 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, RNN_ID_REQ_SIZE, 585 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
586 RNN_ID_RSP_SIZE); 586 RNN_ID_RSP_SIZE);
587 587
588 /* Prepare CT request */ 588 /* Prepare CT request */
@@ -591,33 +591,34 @@ qla2x00_rnn_id(scsi_qla_host_t *ha)
591 ct_rsp = &ha->ct_sns->p.rsp; 591 ct_rsp = &ha->ct_sns->p.rsp;
592 592
593 /* Prepare CT arguments -- port_id, node_name */ 593 /* Prepare CT arguments -- port_id, node_name */
594 ct_req->req.rnn_id.port_id[0] = ha->d_id.b.domain; 594 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
595 ct_req->req.rnn_id.port_id[1] = ha->d_id.b.area; 595 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
596 ct_req->req.rnn_id.port_id[2] = ha->d_id.b.al_pa; 596 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
597 597
598 memcpy(ct_req->req.rnn_id.node_name, ha->node_name, WWN_SIZE); 598 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
599 599
600 /* Execute MS IOCB */ 600 /* Execute MS IOCB */
601 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 601 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
602 sizeof(ms_iocb_entry_t)); 602 sizeof(ms_iocb_entry_t));
603 if (rval != QLA_SUCCESS) { 603 if (rval != QLA_SUCCESS) {
604 /*EMPTY*/ 604 /*EMPTY*/
605 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n", 605 DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
606 ha->host_no, rval)); 606 vha->host_no, rval));
607 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RNN_ID") != 607 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
608 QLA_SUCCESS) { 608 QLA_SUCCESS) {
609 rval = QLA_FUNCTION_FAILED; 609 rval = QLA_FUNCTION_FAILED;
610 } else { 610 } else {
611 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 611 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
612 ha->host_no)); 612 vha->host_no));
613 } 613 }
614 614
615 return (rval); 615 return (rval);
616} 616}
617 617
618void 618void
619qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn) 619qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
620{ 620{
621 struct qla_hw_data *ha = vha->hw;
621 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number, 622 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
622 ha->fw_major_version, ha->fw_minor_version, 623 ha->fw_major_version, ha->fw_minor_version,
623 ha->fw_subminor_version, qla2x00_version_str); 624 ha->fw_subminor_version, qla2x00_version_str);
@@ -630,23 +631,24 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *ha, uint8_t *snn)
630 * Returns 0 on success. 631 * Returns 0 on success.
631 */ 632 */
632int 633int
633qla2x00_rsnn_nn(scsi_qla_host_t *ha) 634qla2x00_rsnn_nn(scsi_qla_host_t *vha)
634{ 635{
635 int rval; 636 int rval;
637 struct qla_hw_data *ha = vha->hw;
636 ms_iocb_entry_t *ms_pkt; 638 ms_iocb_entry_t *ms_pkt;
637 struct ct_sns_req *ct_req; 639 struct ct_sns_req *ct_req;
638 struct ct_sns_rsp *ct_rsp; 640 struct ct_sns_rsp *ct_rsp;
639 641
640 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 642 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
641 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " 643 DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
642 "ISP2100/ISP2200.\n", ha->host_no)); 644 "ISP2100/ISP2200.\n", vha->host_no));
643 return (QLA_SUCCESS); 645 return (QLA_SUCCESS);
644 } 646 }
645 647
646 /* Issue RSNN_NN */ 648 /* Issue RSNN_NN */
647 /* Prepare common MS IOCB */ 649 /* Prepare common MS IOCB */
648 /* Request size adjusted after CT preparation */ 650 /* Request size adjusted after CT preparation */
649 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE); 651 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
650 652
651 /* Prepare CT request */ 653 /* Prepare CT request */
652 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, 654 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD,
@@ -654,10 +656,10 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
654 ct_rsp = &ha->ct_sns->p.rsp; 656 ct_rsp = &ha->ct_sns->p.rsp;
655 657
656 /* Prepare CT arguments -- node_name, symbolic node_name, size */ 658 /* Prepare CT arguments -- node_name, symbolic node_name, size */
657 memcpy(ct_req->req.rsnn_nn.node_name, ha->node_name, WWN_SIZE); 659 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
658 660
659 /* Prepare the Symbolic Node Name */ 661 /* Prepare the Symbolic Node Name */
660 qla2x00_get_sym_node_name(ha, ct_req->req.rsnn_nn.sym_node_name); 662 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
661 663
662 /* Calculate SNN length */ 664 /* Calculate SNN length */
663 ct_req->req.rsnn_nn.name_len = 665 ct_req->req.rsnn_nn.name_len =
@@ -669,18 +671,18 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
669 ms_pkt->dseg_req_length = ms_pkt->req_bytecount; 671 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
670 672
671 /* Execute MS IOCB */ 673 /* Execute MS IOCB */
672 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 674 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
673 sizeof(ms_iocb_entry_t)); 675 sizeof(ms_iocb_entry_t));
674 if (rval != QLA_SUCCESS) { 676 if (rval != QLA_SUCCESS) {
675 /*EMPTY*/ 677 /*EMPTY*/
676 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", 678 DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
677 ha->host_no, rval)); 679 vha->host_no, rval));
678 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RSNN_NN") != 680 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
679 QLA_SUCCESS) { 681 QLA_SUCCESS) {
680 rval = QLA_FUNCTION_FAILED; 682 rval = QLA_FUNCTION_FAILED;
681 } else { 683 } else {
682 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", 684 DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
683 ha->host_no)); 685 vha->host_no));
684 } 686 }
685 687
686 return (rval); 688 return (rval);
@@ -696,11 +698,12 @@ qla2x00_rsnn_nn(scsi_qla_host_t *ha)
696 * Returns a pointer to the @ha's sns_cmd. 698 * Returns a pointer to the @ha's sns_cmd.
697 */ 699 */
698static inline struct sns_cmd_pkt * 700static inline struct sns_cmd_pkt *
699qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len, 701qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
700 uint16_t data_size) 702 uint16_t data_size)
701{ 703{
702 uint16_t wc; 704 uint16_t wc;
703 struct sns_cmd_pkt *sns_cmd; 705 struct sns_cmd_pkt *sns_cmd;
706 struct qla_hw_data *ha = vha->hw;
704 707
705 sns_cmd = ha->sns_cmd; 708 sns_cmd = ha->sns_cmd;
706 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt)); 709 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
@@ -726,15 +729,15 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *ha, uint16_t cmd, uint16_t scmd_len,
726 * Returns 0 on success. 729 * Returns 0 on success.
727 */ 730 */
728static int 731static int
729qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) 732qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
730{ 733{
731 int rval; 734 int rval;
732 735 struct qla_hw_data *ha = vha->hw;
733 struct sns_cmd_pkt *sns_cmd; 736 struct sns_cmd_pkt *sns_cmd;
734 737
735 /* Issue GA_NXT. */ 738 /* Issue GA_NXT. */
736 /* Prepare SNS command request. */ 739 /* Prepare SNS command request. */
737 sns_cmd = qla2x00_prep_sns_cmd(ha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN, 740 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
738 GA_NXT_SNS_DATA_SIZE); 741 GA_NXT_SNS_DATA_SIZE);
739 742
740 /* Prepare SNS command arguments -- port_id. */ 743 /* Prepare SNS command arguments -- port_id. */
@@ -743,16 +746,16 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
743 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain; 746 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
744 747
745 /* Execute SNS command. */ 748 /* Execute SNS command. */
746 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2, 749 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
747 sizeof(struct sns_cmd_pkt)); 750 sizeof(struct sns_cmd_pkt));
748 if (rval != QLA_SUCCESS) { 751 if (rval != QLA_SUCCESS) {
749 /*EMPTY*/ 752 /*EMPTY*/
750 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n", 753 DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
751 ha->host_no, rval)); 754 vha->host_no, rval));
752 } else if (sns_cmd->p.gan_data[8] != 0x80 || 755 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
753 sns_cmd->p.gan_data[9] != 0x02) { 756 sns_cmd->p.gan_data[9] != 0x02) {
754 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " 757 DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
755 "ga_nxt_rsp:\n", ha->host_no)); 758 "ga_nxt_rsp:\n", vha->host_no));
756 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16)); 759 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
757 rval = QLA_FUNCTION_FAILED; 760 rval = QLA_FUNCTION_FAILED;
758 } else { 761 } else {
@@ -772,7 +775,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
772 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 775 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
773 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 776 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
774 "portid=%02x%02x%02x.\n", 777 "portid=%02x%02x%02x.\n",
775 ha->host_no, 778 vha->host_no,
776 fcport->node_name[0], fcport->node_name[1], 779 fcport->node_name[0], fcport->node_name[1],
777 fcport->node_name[2], fcport->node_name[3], 780 fcport->node_name[2], fcport->node_name[3],
778 fcport->node_name[4], fcport->node_name[5], 781 fcport->node_name[4], fcport->node_name[5],
@@ -800,33 +803,33 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport)
800 * Returns 0 on success. 803 * Returns 0 on success.
801 */ 804 */
802static int 805static int
803qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) 806qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
804{ 807{
805 int rval; 808 int rval;
806 809 struct qla_hw_data *ha = vha->hw;
807 uint16_t i; 810 uint16_t i;
808 uint8_t *entry; 811 uint8_t *entry;
809 struct sns_cmd_pkt *sns_cmd; 812 struct sns_cmd_pkt *sns_cmd;
810 813
811 /* Issue GID_PT. */ 814 /* Issue GID_PT. */
812 /* Prepare SNS command request. */ 815 /* Prepare SNS command request. */
813 sns_cmd = qla2x00_prep_sns_cmd(ha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 816 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
814 GID_PT_SNS_DATA_SIZE); 817 GID_PT_SNS_DATA_SIZE);
815 818
816 /* Prepare SNS command arguments -- port_type. */ 819 /* Prepare SNS command arguments -- port_type. */
817 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 820 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
818 821
819 /* Execute SNS command. */ 822 /* Execute SNS command. */
820 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2, 823 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
821 sizeof(struct sns_cmd_pkt)); 824 sizeof(struct sns_cmd_pkt));
822 if (rval != QLA_SUCCESS) { 825 if (rval != QLA_SUCCESS) {
823 /*EMPTY*/ 826 /*EMPTY*/
824 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n", 827 DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
825 ha->host_no, rval)); 828 vha->host_no, rval));
826 } else if (sns_cmd->p.gid_data[8] != 0x80 || 829 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
827 sns_cmd->p.gid_data[9] != 0x02) { 830 sns_cmd->p.gid_data[9] != 0x02) {
828 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " 831 DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
829 "gid_rsp:\n", ha->host_no)); 832 "gid_rsp:\n", vha->host_no));
830 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16)); 833 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
831 rval = QLA_FUNCTION_FAILED; 834 rval = QLA_FUNCTION_FAILED;
832 } else { 835 } else {
@@ -867,17 +870,17 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *ha, sw_info_t *list)
867 * Returns 0 on success. 870 * Returns 0 on success.
868 */ 871 */
869static int 872static int
870qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) 873qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
871{ 874{
872 int rval; 875 int rval;
873 876 struct qla_hw_data *ha = vha->hw;
874 uint16_t i; 877 uint16_t i;
875 struct sns_cmd_pkt *sns_cmd; 878 struct sns_cmd_pkt *sns_cmd;
876 879
877 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 880 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
878 /* Issue GPN_ID */ 881 /* Issue GPN_ID */
879 /* Prepare SNS command request. */ 882 /* Prepare SNS command request. */
880 sns_cmd = qla2x00_prep_sns_cmd(ha, GPN_ID_CMD, 883 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
881 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE); 884 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
882 885
883 /* Prepare SNS command arguments -- port_id. */ 886 /* Prepare SNS command arguments -- port_id. */
@@ -886,16 +889,16 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
886 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 889 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
887 890
888 /* Execute SNS command. */ 891 /* Execute SNS command. */
889 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, 892 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
890 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 893 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
891 if (rval != QLA_SUCCESS) { 894 if (rval != QLA_SUCCESS) {
892 /*EMPTY*/ 895 /*EMPTY*/
893 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed " 896 DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
894 "(%d).\n", ha->host_no, rval)); 897 "(%d).\n", vha->host_no, rval));
895 } else if (sns_cmd->p.gpn_data[8] != 0x80 || 898 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
896 sns_cmd->p.gpn_data[9] != 0x02) { 899 sns_cmd->p.gpn_data[9] != 0x02) {
897 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " 900 DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
898 "request, gpn_rsp:\n", ha->host_no)); 901 "request, gpn_rsp:\n", vha->host_no));
899 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16)); 902 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
900 rval = QLA_FUNCTION_FAILED; 903 rval = QLA_FUNCTION_FAILED;
901 } else { 904 } else {
@@ -922,17 +925,17 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *ha, sw_info_t *list)
922 * Returns 0 on success. 925 * Returns 0 on success.
923 */ 926 */
924static int 927static int
925qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list) 928qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
926{ 929{
927 int rval; 930 int rval;
928 931 struct qla_hw_data *ha = vha->hw;
929 uint16_t i; 932 uint16_t i;
930 struct sns_cmd_pkt *sns_cmd; 933 struct sns_cmd_pkt *sns_cmd;
931 934
932 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 935 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
933 /* Issue GNN_ID */ 936 /* Issue GNN_ID */
934 /* Prepare SNS command request. */ 937 /* Prepare SNS command request. */
935 sns_cmd = qla2x00_prep_sns_cmd(ha, GNN_ID_CMD, 938 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
936 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE); 939 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
937 940
938 /* Prepare SNS command arguments -- port_id. */ 941 /* Prepare SNS command arguments -- port_id. */
@@ -941,16 +944,16 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
941 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain; 944 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
942 945
943 /* Execute SNS command. */ 946 /* Execute SNS command. */
944 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, 947 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
945 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt)); 948 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
946 if (rval != QLA_SUCCESS) { 949 if (rval != QLA_SUCCESS) {
947 /*EMPTY*/ 950 /*EMPTY*/
948 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed " 951 DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
949 "(%d).\n", ha->host_no, rval)); 952 "(%d).\n", vha->host_no, rval));
950 } else if (sns_cmd->p.gnn_data[8] != 0x80 || 953 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
951 sns_cmd->p.gnn_data[9] != 0x02) { 954 sns_cmd->p.gnn_data[9] != 0x02) {
952 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected " 955 DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
953 "request, gnn_rsp:\n", ha->host_no)); 956 "request, gnn_rsp:\n", vha->host_no));
954 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16)); 957 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
955 rval = QLA_FUNCTION_FAILED; 958 rval = QLA_FUNCTION_FAILED;
956 } else { 959 } else {
@@ -962,7 +965,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
962 "nn %02x%02x%02x%02x%02x%02x%02x%02x " 965 "nn %02x%02x%02x%02x%02x%02x%02x%02x "
963 "pn %02x%02x%02x%02x%02x%02x%02x%02x " 966 "pn %02x%02x%02x%02x%02x%02x%02x%02x "
964 "portid=%02x%02x%02x.\n", 967 "portid=%02x%02x%02x.\n",
965 ha->host_no, 968 vha->host_no,
966 list[i].node_name[0], list[i].node_name[1], 969 list[i].node_name[0], list[i].node_name[1],
967 list[i].node_name[2], list[i].node_name[3], 970 list[i].node_name[2], list[i].node_name[3],
968 list[i].node_name[4], list[i].node_name[5], 971 list[i].node_name[4], list[i].node_name[5],
@@ -992,40 +995,40 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *ha, sw_info_t *list)
992 * Returns 0 on success. 995 * Returns 0 on success.
993 */ 996 */
994static int 997static int
995qla2x00_sns_rft_id(scsi_qla_host_t *ha) 998qla2x00_sns_rft_id(scsi_qla_host_t *vha)
996{ 999{
997 int rval; 1000 int rval;
998 1001 struct qla_hw_data *ha = vha->hw;
999 struct sns_cmd_pkt *sns_cmd; 1002 struct sns_cmd_pkt *sns_cmd;
1000 1003
1001 /* Issue RFT_ID. */ 1004 /* Issue RFT_ID. */
1002 /* Prepare SNS command request. */ 1005 /* Prepare SNS command request. */
1003 sns_cmd = qla2x00_prep_sns_cmd(ha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN, 1006 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1004 RFT_ID_SNS_DATA_SIZE); 1007 RFT_ID_SNS_DATA_SIZE);
1005 1008
1006 /* Prepare SNS command arguments -- port_id, FC-4 types */ 1009 /* Prepare SNS command arguments -- port_id, FC-4 types */
1007 sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa; 1010 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1008 sns_cmd->p.cmd.param[1] = ha->d_id.b.area; 1011 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1009 sns_cmd->p.cmd.param[2] = ha->d_id.b.domain; 1012 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1010 1013
1011 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */ 1014 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1012 1015
1013 /* Execute SNS command. */ 1016 /* Execute SNS command. */
1014 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2, 1017 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1015 sizeof(struct sns_cmd_pkt)); 1018 sizeof(struct sns_cmd_pkt));
1016 if (rval != QLA_SUCCESS) { 1019 if (rval != QLA_SUCCESS) {
1017 /*EMPTY*/ 1020 /*EMPTY*/
1018 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n", 1021 DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
1019 ha->host_no, rval)); 1022 vha->host_no, rval));
1020 } else if (sns_cmd->p.rft_data[8] != 0x80 || 1023 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1021 sns_cmd->p.rft_data[9] != 0x02) { 1024 sns_cmd->p.rft_data[9] != 0x02) {
1022 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, " 1025 DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
1023 "rft_rsp:\n", ha->host_no)); 1026 "rft_rsp:\n", vha->host_no));
1024 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16)); 1027 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
1025 rval = QLA_FUNCTION_FAILED; 1028 rval = QLA_FUNCTION_FAILED;
1026 } else { 1029 } else {
1027 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n", 1030 DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
1028 ha->host_no)); 1031 vha->host_no));
1029 } 1032 }
1030 1033
1031 return (rval); 1034 return (rval);
@@ -1041,47 +1044,47 @@ qla2x00_sns_rft_id(scsi_qla_host_t *ha)
1041 * Returns 0 on success. 1044 * Returns 0 on success.
1042 */ 1045 */
1043static int 1046static int
1044qla2x00_sns_rnn_id(scsi_qla_host_t *ha) 1047qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1045{ 1048{
1046 int rval; 1049 int rval;
1047 1050 struct qla_hw_data *ha = vha->hw;
1048 struct sns_cmd_pkt *sns_cmd; 1051 struct sns_cmd_pkt *sns_cmd;
1049 1052
1050 /* Issue RNN_ID. */ 1053 /* Issue RNN_ID. */
1051 /* Prepare SNS command request. */ 1054 /* Prepare SNS command request. */
1052 sns_cmd = qla2x00_prep_sns_cmd(ha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN, 1055 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1053 RNN_ID_SNS_DATA_SIZE); 1056 RNN_ID_SNS_DATA_SIZE);
1054 1057
1055 /* Prepare SNS command arguments -- port_id, nodename. */ 1058 /* Prepare SNS command arguments -- port_id, nodename. */
1056 sns_cmd->p.cmd.param[0] = ha->d_id.b.al_pa; 1059 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1057 sns_cmd->p.cmd.param[1] = ha->d_id.b.area; 1060 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1058 sns_cmd->p.cmd.param[2] = ha->d_id.b.domain; 1061 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1059 1062
1060 sns_cmd->p.cmd.param[4] = ha->node_name[7]; 1063 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1061 sns_cmd->p.cmd.param[5] = ha->node_name[6]; 1064 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1062 sns_cmd->p.cmd.param[6] = ha->node_name[5]; 1065 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1063 sns_cmd->p.cmd.param[7] = ha->node_name[4]; 1066 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1064 sns_cmd->p.cmd.param[8] = ha->node_name[3]; 1067 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1065 sns_cmd->p.cmd.param[9] = ha->node_name[2]; 1068 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1066 sns_cmd->p.cmd.param[10] = ha->node_name[1]; 1069 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1067 sns_cmd->p.cmd.param[11] = ha->node_name[0]; 1070 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1068 1071
1069 /* Execute SNS command. */ 1072 /* Execute SNS command. */
1070 rval = qla2x00_send_sns(ha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2, 1073 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1071 sizeof(struct sns_cmd_pkt)); 1074 sizeof(struct sns_cmd_pkt));
1072 if (rval != QLA_SUCCESS) { 1075 if (rval != QLA_SUCCESS) {
1073 /*EMPTY*/ 1076 /*EMPTY*/
1074 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n", 1077 DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
1075 ha->host_no, rval)); 1078 vha->host_no, rval));
1076 } else if (sns_cmd->p.rnn_data[8] != 0x80 || 1079 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1077 sns_cmd->p.rnn_data[9] != 0x02) { 1080 sns_cmd->p.rnn_data[9] != 0x02) {
1078 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, " 1081 DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
1079 "rnn_rsp:\n", ha->host_no)); 1082 "rnn_rsp:\n", vha->host_no));
1080 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16)); 1083 DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
1081 rval = QLA_FUNCTION_FAILED; 1084 rval = QLA_FUNCTION_FAILED;
1082 } else { 1085 } else {
1083 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n", 1086 DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
1084 ha->host_no)); 1087 vha->host_no));
1085 } 1088 }
1086 1089
1087 return (rval); 1090 return (rval);
@@ -1094,25 +1097,25 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *ha)
1094 * Returns 0 on success. 1097 * Returns 0 on success.
1095 */ 1098 */
1096static int 1099static int
1097qla2x00_mgmt_svr_login(scsi_qla_host_t *ha) 1100qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1098{ 1101{
1099 int ret; 1102 int ret;
1100 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1103 uint16_t mb[MAILBOX_REGISTER_COUNT];
1101 1104 struct qla_hw_data *ha = vha->hw;
1102 ret = QLA_SUCCESS; 1105 ret = QLA_SUCCESS;
1103 if (ha->flags.management_server_logged_in) 1106 if (vha->flags.management_server_logged_in)
1104 return ret; 1107 return ret;
1105 1108
1106 ha->isp_ops->fabric_login(ha, ha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1109 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
1107 mb, BIT_1); 1110 mb, BIT_1);
1108 if (mb[0] != MBS_COMMAND_COMPLETE) { 1111 if (mb[0] != MBS_COMMAND_COMPLETE) {
1109 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " 1112 DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
1110 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", 1113 "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
1111 __func__, ha->host_no, ha->mgmt_svr_loop_id, mb[0], mb[1], 1114 __func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
1112 mb[2], mb[6], mb[7])); 1115 mb[2], mb[6], mb[7]));
1113 ret = QLA_FUNCTION_FAILED; 1116 ret = QLA_FUNCTION_FAILED;
1114 } else 1117 } else
1115 ha->flags.management_server_logged_in = 1; 1118 vha->flags.management_server_logged_in = 1;
1116 1119
1117 return ret; 1120 return ret;
1118} 1121}
@@ -1126,17 +1129,17 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *ha)
1126 * Returns a pointer to the @ha's ms_iocb. 1129 * Returns a pointer to the @ha's ms_iocb.
1127 */ 1130 */
1128void * 1131void *
1129qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1132qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1130 uint32_t rsp_size) 1133 uint32_t rsp_size)
1131{ 1134{
1132 ms_iocb_entry_t *ms_pkt; 1135 ms_iocb_entry_t *ms_pkt;
1133 1136 struct qla_hw_data *ha = vha->hw;
1134 ms_pkt = ha->ms_iocb; 1137 ms_pkt = ha->ms_iocb;
1135 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t)); 1138 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1136 1139
1137 ms_pkt->entry_type = MS_IOCB_TYPE; 1140 ms_pkt->entry_type = MS_IOCB_TYPE;
1138 ms_pkt->entry_count = 1; 1141 ms_pkt->entry_count = 1;
1139 SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); 1142 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1140 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 1143 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
1141 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1144 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1142 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1145 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
@@ -1164,17 +1167,18 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1164 * Returns a pointer to the @ha's ms_iocb. 1167 * Returns a pointer to the @ha's ms_iocb.
1165 */ 1168 */
1166void * 1169void *
1167qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1170qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1168 uint32_t rsp_size) 1171 uint32_t rsp_size)
1169{ 1172{
1170 struct ct_entry_24xx *ct_pkt; 1173 struct ct_entry_24xx *ct_pkt;
1174 struct qla_hw_data *ha = vha->hw;
1171 1175
1172 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1176 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1173 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1177 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1174 1178
1175 ct_pkt->entry_type = CT_IOCB_TYPE; 1179 ct_pkt->entry_type = CT_IOCB_TYPE;
1176 ct_pkt->entry_count = 1; 1180 ct_pkt->entry_count = 1;
1177 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1181 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1178 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1182 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1179 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1183 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1180 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1184 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1188,14 +1192,15 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1188 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1192 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1189 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1193 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1190 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1194 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1191 ct_pkt->vp_index = ha->vp_idx; 1195 ct_pkt->vp_index = vha->vp_idx;
1192 1196
1193 return ct_pkt; 1197 return ct_pkt;
1194} 1198}
1195 1199
1196static inline ms_iocb_entry_t * 1200static inline ms_iocb_entry_t *
1197qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size) 1201qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1198{ 1202{
1203 struct qla_hw_data *ha = vha->hw;
1199 ms_iocb_entry_t *ms_pkt = ha->ms_iocb; 1204 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1200 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1205 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1201 1206
@@ -1240,7 +1245,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_req *ct_req, uint16_t cmd,
1240 * Returns 0 on success. 1245 * Returns 0 on success.
1241 */ 1246 */
1242static int 1247static int
1243qla2x00_fdmi_rhba(scsi_qla_host_t *ha) 1248qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1244{ 1249{
1245 int rval, alen; 1250 int rval, alen;
1246 uint32_t size, sn; 1251 uint32_t size, sn;
@@ -1250,11 +1255,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1250 struct ct_sns_rsp *ct_rsp; 1255 struct ct_sns_rsp *ct_rsp;
1251 uint8_t *entries; 1256 uint8_t *entries;
1252 struct ct_fdmi_hba_attr *eiter; 1257 struct ct_fdmi_hba_attr *eiter;
1258 struct qla_hw_data *ha = vha->hw;
1253 1259
1254 /* Issue RHBA */ 1260 /* Issue RHBA */
1255 /* Prepare common MS IOCB */ 1261 /* Prepare common MS IOCB */
1256 /* Request size adjusted after CT preparation */ 1262 /* Request size adjusted after CT preparation */
1257 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RHBA_RSP_SIZE); 1263 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1258 1264
1259 /* Prepare CT request */ 1265 /* Prepare CT request */
1260 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD, 1266 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RHBA_CMD,
@@ -1262,9 +1268,9 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1262 ct_rsp = &ha->ct_sns->p.rsp; 1268 ct_rsp = &ha->ct_sns->p.rsp;
1263 1269
1264 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1270 /* Prepare FDMI command arguments -- attribute block, attributes. */
1265 memcpy(ct_req->req.rhba.hba_identifier, ha->port_name, WWN_SIZE); 1271 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1266 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1); 1272 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
1267 memcpy(ct_req->req.rhba.port_name, ha->port_name, WWN_SIZE); 1273 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1268 size = 2 * WWN_SIZE + 4 + 4; 1274 size = 2 * WWN_SIZE + 4 + 4;
1269 1275
1270 /* Attributes */ 1276 /* Attributes */
@@ -1276,11 +1282,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1276 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1282 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1277 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); 1283 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
1278 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); 1284 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
1279 memcpy(eiter->a.node_name, ha->node_name, WWN_SIZE); 1285 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1280 size += 4 + WWN_SIZE; 1286 size += 4 + WWN_SIZE;
1281 1287
1282 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n", 1288 DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
1283 __func__, ha->host_no, 1289 __func__, vha->host_no,
1284 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2], 1290 eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
1285 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5], 1291 eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
1286 eiter->a.node_name[6], eiter->a.node_name[7])); 1292 eiter->a.node_name[6], eiter->a.node_name[7]));
@@ -1294,7 +1300,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1294 eiter->len = cpu_to_be16(4 + alen); 1300 eiter->len = cpu_to_be16(4 + alen);
1295 size += 4 + alen; 1301 size += 4 + alen;
1296 1302
1297 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, ha->host_no, 1303 DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
1298 eiter->a.manufacturer)); 1304 eiter->a.manufacturer));
1299 1305
1300 /* Serial number. */ 1306 /* Serial number. */
@@ -1307,7 +1313,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1307 eiter->len = cpu_to_be16(4 + alen); 1313 eiter->len = cpu_to_be16(4 + alen);
1308 size += 4 + alen; 1314 size += 4 + alen;
1309 1315
1310 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, ha->host_no, 1316 DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
1311 eiter->a.serial_num)); 1317 eiter->a.serial_num));
1312 1318
1313 /* Model name. */ 1319 /* Model name. */
@@ -1319,7 +1325,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1319 eiter->len = cpu_to_be16(4 + alen); 1325 eiter->len = cpu_to_be16(4 + alen);
1320 size += 4 + alen; 1326 size += 4 + alen;
1321 1327
1322 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, ha->host_no, 1328 DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
1323 eiter->a.model)); 1329 eiter->a.model));
1324 1330
1325 /* Model description. */ 1331 /* Model description. */
@@ -1332,7 +1338,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1332 eiter->len = cpu_to_be16(4 + alen); 1338 eiter->len = cpu_to_be16(4 + alen);
1333 size += 4 + alen; 1339 size += 4 + alen;
1334 1340
1335 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, ha->host_no, 1341 DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
1336 eiter->a.model_desc)); 1342 eiter->a.model_desc));
1337 1343
1338 /* Hardware version. */ 1344 /* Hardware version. */
@@ -1344,7 +1350,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1344 eiter->len = cpu_to_be16(4 + alen); 1350 eiter->len = cpu_to_be16(4 + alen);
1345 size += 4 + alen; 1351 size += 4 + alen;
1346 1352
1347 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, ha->host_no, 1353 DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
1348 eiter->a.hw_version)); 1354 eiter->a.hw_version));
1349 1355
1350 /* Driver version. */ 1356 /* Driver version. */
@@ -1356,7 +1362,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1356 eiter->len = cpu_to_be16(4 + alen); 1362 eiter->len = cpu_to_be16(4 + alen);
1357 size += 4 + alen; 1363 size += 4 + alen;
1358 1364
1359 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, ha->host_no, 1365 DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
1360 eiter->a.driver_version)); 1366 eiter->a.driver_version));
1361 1367
1362 /* Option ROM version. */ 1368 /* Option ROM version. */
@@ -1368,27 +1374,27 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1368 eiter->len = cpu_to_be16(4 + alen); 1374 eiter->len = cpu_to_be16(4 + alen);
1369 size += 4 + alen; 1375 size += 4 + alen;
1370 1376
1371 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, ha->host_no, 1377 DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
1372 eiter->a.orom_version)); 1378 eiter->a.orom_version));
1373 1379
1374 /* Firmware version */ 1380 /* Firmware version */
1375 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1381 eiter = (struct ct_fdmi_hba_attr *) (entries + size);
1376 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1382 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1377 ha->isp_ops->fw_version_str(ha, eiter->a.fw_version); 1383 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
1378 alen = strlen(eiter->a.fw_version); 1384 alen = strlen(eiter->a.fw_version);
1379 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1385 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1380 eiter->len = cpu_to_be16(4 + alen); 1386 eiter->len = cpu_to_be16(4 + alen);
1381 size += 4 + alen; 1387 size += 4 + alen;
1382 1388
1383 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, ha->host_no, 1389 DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
1384 eiter->a.fw_version)); 1390 eiter->a.fw_version));
1385 1391
1386 /* Update MS request size. */ 1392 /* Update MS request size. */
1387 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1393 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1388 1394
1389 DEBUG13(printk("%s(%ld): RHBA identifier=" 1395 DEBUG13(printk("%s(%ld): RHBA identifier="
1390 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1396 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1391 ha->host_no, ct_req->req.rhba.hba_identifier[0], 1397 vha->host_no, ct_req->req.rhba.hba_identifier[0],
1392 ct_req->req.rhba.hba_identifier[1], 1398 ct_req->req.rhba.hba_identifier[1],
1393 ct_req->req.rhba.hba_identifier[2], 1399 ct_req->req.rhba.hba_identifier[2],
1394 ct_req->req.rhba.hba_identifier[3], 1400 ct_req->req.rhba.hba_identifier[3],
@@ -1399,25 +1405,25 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1399 DEBUG13(qla2x00_dump_buffer(entries, size)); 1405 DEBUG13(qla2x00_dump_buffer(entries, size));
1400 1406
1401 /* Execute MS IOCB */ 1407 /* Execute MS IOCB */
1402 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1408 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1403 sizeof(ms_iocb_entry_t)); 1409 sizeof(ms_iocb_entry_t));
1404 if (rval != QLA_SUCCESS) { 1410 if (rval != QLA_SUCCESS) {
1405 /*EMPTY*/ 1411 /*EMPTY*/
1406 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n", 1412 DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
1407 ha->host_no, rval)); 1413 vha->host_no, rval));
1408 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RHBA") != 1414 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1409 QLA_SUCCESS) { 1415 QLA_SUCCESS) {
1410 rval = QLA_FUNCTION_FAILED; 1416 rval = QLA_FUNCTION_FAILED;
1411 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && 1417 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1412 ct_rsp->header.explanation_code == 1418 ct_rsp->header.explanation_code ==
1413 CT_EXPL_ALREADY_REGISTERED) { 1419 CT_EXPL_ALREADY_REGISTERED) {
1414 DEBUG2_13(printk("%s(%ld): HBA already registered.\n", 1420 DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
1415 __func__, ha->host_no)); 1421 __func__, vha->host_no));
1416 rval = QLA_ALREADY_REGISTERED; 1422 rval = QLA_ALREADY_REGISTERED;
1417 } 1423 }
1418 } else { 1424 } else {
1419 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n", 1425 DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
1420 ha->host_no)); 1426 vha->host_no));
1421 } 1427 }
1422 1428
1423 return rval; 1429 return rval;
@@ -1430,17 +1436,17 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *ha)
1430 * Returns 0 on success. 1436 * Returns 0 on success.
1431 */ 1437 */
1432static int 1438static int
1433qla2x00_fdmi_dhba(scsi_qla_host_t *ha) 1439qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1434{ 1440{
1435 int rval; 1441 int rval;
1436 1442 struct qla_hw_data *ha = vha->hw;
1437 ms_iocb_entry_t *ms_pkt; 1443 ms_iocb_entry_t *ms_pkt;
1438 struct ct_sns_req *ct_req; 1444 struct ct_sns_req *ct_req;
1439 struct ct_sns_rsp *ct_rsp; 1445 struct ct_sns_rsp *ct_rsp;
1440 1446
1441 /* Issue RPA */ 1447 /* Issue RPA */
1442 /* Prepare common MS IOCB */ 1448 /* Prepare common MS IOCB */
1443 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, DHBA_REQ_SIZE, 1449 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
1444 DHBA_RSP_SIZE); 1450 DHBA_RSP_SIZE);
1445 1451
1446 /* Prepare CT request */ 1452 /* Prepare CT request */
@@ -1449,28 +1455,28 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1449 ct_rsp = &ha->ct_sns->p.rsp; 1455 ct_rsp = &ha->ct_sns->p.rsp;
1450 1456
1451 /* Prepare FDMI command arguments -- portname. */ 1457 /* Prepare FDMI command arguments -- portname. */
1452 memcpy(ct_req->req.dhba.port_name, ha->port_name, WWN_SIZE); 1458 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
1453 1459
1454 DEBUG13(printk("%s(%ld): DHBA portname=" 1460 DEBUG13(printk("%s(%ld): DHBA portname="
1455 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, ha->host_no, 1461 "%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
1456 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1], 1462 ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
1457 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3], 1463 ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
1458 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5], 1464 ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
1459 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7])); 1465 ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
1460 1466
1461 /* Execute MS IOCB */ 1467 /* Execute MS IOCB */
1462 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1468 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1463 sizeof(ms_iocb_entry_t)); 1469 sizeof(ms_iocb_entry_t));
1464 if (rval != QLA_SUCCESS) { 1470 if (rval != QLA_SUCCESS) {
1465 /*EMPTY*/ 1471 /*EMPTY*/
1466 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n", 1472 DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
1467 ha->host_no, rval)); 1473 vha->host_no, rval));
1468 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "DHBA") != 1474 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
1469 QLA_SUCCESS) { 1475 QLA_SUCCESS) {
1470 rval = QLA_FUNCTION_FAILED; 1476 rval = QLA_FUNCTION_FAILED;
1471 } else { 1477 } else {
1472 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n", 1478 DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
1473 ha->host_no)); 1479 vha->host_no));
1474 } 1480 }
1475 1481
1476 return rval; 1482 return rval;
@@ -1483,11 +1489,11 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *ha)
1483 * Returns 0 on success. 1489 * Returns 0 on success.
1484 */ 1490 */
1485static int 1491static int
1486qla2x00_fdmi_rpa(scsi_qla_host_t *ha) 1492qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1487{ 1493{
1488 int rval, alen; 1494 int rval, alen;
1489 uint32_t size, max_frame_size; 1495 uint32_t size, max_frame_size;
1490 1496 struct qla_hw_data *ha = vha->hw;
1491 ms_iocb_entry_t *ms_pkt; 1497 ms_iocb_entry_t *ms_pkt;
1492 struct ct_sns_req *ct_req; 1498 struct ct_sns_req *ct_req;
1493 struct ct_sns_rsp *ct_rsp; 1499 struct ct_sns_rsp *ct_rsp;
@@ -1498,7 +1504,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1498 /* Issue RPA */ 1504 /* Issue RPA */
1499 /* Prepare common MS IOCB */ 1505 /* Prepare common MS IOCB */
1500 /* Request size adjusted after CT preparation */ 1506 /* Request size adjusted after CT preparation */
1501 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE); 1507 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1502 1508
1503 /* Prepare CT request */ 1509 /* Prepare CT request */
1504 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD, 1510 ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
@@ -1506,7 +1512,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1506 ct_rsp = &ha->ct_sns->p.rsp; 1512 ct_rsp = &ha->ct_sns->p.rsp;
1507 1513
1508 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1514 /* Prepare FDMI command arguments -- attribute block, attributes. */
1509 memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE); 1515 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1510 size = WWN_SIZE + 4; 1516 size = WWN_SIZE + 4;
1511 1517
1512 /* Attributes */ 1518 /* Attributes */
@@ -1521,8 +1527,9 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1521 eiter->a.fc4_types[2] = 0x01; 1527 eiter->a.fc4_types[2] = 0x01;
1522 size += 4 + 32; 1528 size += 4 + 32;
1523 1529
1524 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no, 1530 DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
1525 eiter->a.fc4_types[2], eiter->a.fc4_types[1])); 1531 vha->host_no, eiter->a.fc4_types[2],
1532 eiter->a.fc4_types[1]));
1526 1533
1527 /* Supported speed. */ 1534 /* Supported speed. */
1528 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1535 eiter = (struct ct_fdmi_port_attr *) (entries + size);
@@ -1544,7 +1551,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1544 FDMI_PORT_SPEED_1GB); 1551 FDMI_PORT_SPEED_1GB);
1545 size += 4 + 4; 1552 size += 4 + 4;
1546 1553
1547 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no, 1554 DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
1548 eiter->a.sup_speed)); 1555 eiter->a.sup_speed));
1549 1556
1550 /* Current speed. */ 1557 /* Current speed. */
@@ -1575,7 +1582,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1575 } 1582 }
1576 size += 4 + 4; 1583 size += 4 + 4;
1577 1584
1578 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no, 1585 DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
1579 eiter->a.cur_speed)); 1586 eiter->a.cur_speed));
1580 1587
1581 /* Max frame size. */ 1588 /* Max frame size. */
@@ -1588,7 +1595,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1588 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 1595 eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
1589 size += 4 + 4; 1596 size += 4 + 4;
1590 1597
1591 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no, 1598 DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
1592 eiter->a.max_frame_size)); 1599 eiter->a.max_frame_size));
1593 1600
1594 /* OS device name. */ 1601 /* OS device name. */
@@ -1600,32 +1607,32 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1600 eiter->len = cpu_to_be16(4 + alen); 1607 eiter->len = cpu_to_be16(4 + alen);
1601 size += 4 + alen; 1608 size += 4 + alen;
1602 1609
1603 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no, 1610 DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
1604 eiter->a.os_dev_name)); 1611 eiter->a.os_dev_name));
1605 1612
1606 /* Hostname. */ 1613 /* Hostname. */
1607 if (strlen(fc_host_system_hostname(ha->host))) { 1614 if (strlen(fc_host_system_hostname(vha->host))) {
1608 ct_req->req.rpa.attrs.count = 1615 ct_req->req.rpa.attrs.count =
1609 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT); 1616 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1610 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1617 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1611 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME); 1618 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
1612 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), 1619 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1613 "%s", fc_host_system_hostname(ha->host)); 1620 "%s", fc_host_system_hostname(vha->host));
1614 alen = strlen(eiter->a.host_name); 1621 alen = strlen(eiter->a.host_name);
1615 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1622 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1616 eiter->len = cpu_to_be16(4 + alen); 1623 eiter->len = cpu_to_be16(4 + alen);
1617 size += 4 + alen; 1624 size += 4 + alen;
1618 1625
1619 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__, 1626 DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
1620 ha->host_no, eiter->a.host_name)); 1627 vha->host_no, eiter->a.host_name));
1621 } 1628 }
1622 1629
1623 /* Update MS request size. */ 1630 /* Update MS request size. */
1624 qla2x00_update_ms_fdmi_iocb(ha, size + 16); 1631 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1625 1632
1626 DEBUG13(printk("%s(%ld): RPA portname=" 1633 DEBUG13(printk("%s(%ld): RPA portname="
1627 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__, 1634 "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
1628 ha->host_no, ct_req->req.rpa.port_name[0], 1635 vha->host_no, ct_req->req.rpa.port_name[0],
1629 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2], 1636 ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
1630 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4], 1637 ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
1631 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6], 1638 ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
@@ -1633,18 +1640,18 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1633 DEBUG13(qla2x00_dump_buffer(entries, size)); 1640 DEBUG13(qla2x00_dump_buffer(entries, size));
1634 1641
1635 /* Execute MS IOCB */ 1642 /* Execute MS IOCB */
1636 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1643 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1637 sizeof(ms_iocb_entry_t)); 1644 sizeof(ms_iocb_entry_t));
1638 if (rval != QLA_SUCCESS) { 1645 if (rval != QLA_SUCCESS) {
1639 /*EMPTY*/ 1646 /*EMPTY*/
1640 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n", 1647 DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
1641 ha->host_no, rval)); 1648 vha->host_no, rval));
1642 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") != 1649 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1643 QLA_SUCCESS) { 1650 QLA_SUCCESS) {
1644 rval = QLA_FUNCTION_FAILED; 1651 rval = QLA_FUNCTION_FAILED;
1645 } else { 1652 } else {
1646 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n", 1653 DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
1647 ha->host_no)); 1654 vha->host_no));
1648 } 1655 }
1649 1656
1650 return rval; 1657 return rval;
@@ -1657,34 +1664,28 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
1657 * Returns 0 on success. 1664 * Returns 0 on success.
1658 */ 1665 */
1659int 1666int
1660qla2x00_fdmi_register(scsi_qla_host_t *ha) 1667qla2x00_fdmi_register(scsi_qla_host_t *vha)
1661{ 1668{
1662 int rval; 1669 int rval;
1663 1670
1664 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 1671 rval = qla2x00_mgmt_svr_login(vha);
1665 DEBUG2(printk("scsi(%ld): FDMI unsupported on "
1666 "ISP2100/ISP2200.\n", ha->host_no));
1667 return QLA_SUCCESS;
1668 }
1669
1670 rval = qla2x00_mgmt_svr_login(ha);
1671 if (rval) 1672 if (rval)
1672 return rval; 1673 return rval;
1673 1674
1674 rval = qla2x00_fdmi_rhba(ha); 1675 rval = qla2x00_fdmi_rhba(vha);
1675 if (rval) { 1676 if (rval) {
1676 if (rval != QLA_ALREADY_REGISTERED) 1677 if (rval != QLA_ALREADY_REGISTERED)
1677 return rval; 1678 return rval;
1678 1679
1679 rval = qla2x00_fdmi_dhba(ha); 1680 rval = qla2x00_fdmi_dhba(vha);
1680 if (rval) 1681 if (rval)
1681 return rval; 1682 return rval;
1682 1683
1683 rval = qla2x00_fdmi_rhba(ha); 1684 rval = qla2x00_fdmi_rhba(vha);
1684 if (rval) 1685 if (rval)
1685 return rval; 1686 return rval;
1686 } 1687 }
1687 rval = qla2x00_fdmi_rpa(ha); 1688 rval = qla2x00_fdmi_rpa(vha);
1688 1689
1689 return rval; 1690 return rval;
1690} 1691}
@@ -1697,11 +1698,11 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
1697 * Returns 0 on success. 1698 * Returns 0 on success.
1698 */ 1699 */
1699int 1700int
1700qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list) 1701qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1701{ 1702{
1702 int rval; 1703 int rval;
1703 uint16_t i; 1704 uint16_t i;
1704 1705 struct qla_hw_data *ha = vha->hw;
1705 ms_iocb_entry_t *ms_pkt; 1706 ms_iocb_entry_t *ms_pkt;
1706 struct ct_sns_req *ct_req; 1707 struct ct_sns_req *ct_req;
1707 struct ct_sns_rsp *ct_rsp; 1708 struct ct_sns_rsp *ct_rsp;
@@ -1712,7 +1713,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1712 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1713 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1713 /* Issue GFPN_ID */ 1714 /* Issue GFPN_ID */
1714 /* Prepare common MS IOCB */ 1715 /* Prepare common MS IOCB */
1715 ms_pkt = ha->isp_ops->prep_ms_iocb(ha, GFPN_ID_REQ_SIZE, 1716 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
1716 GFPN_ID_RSP_SIZE); 1717 GFPN_ID_RSP_SIZE);
1717 1718
1718 /* Prepare CT request */ 1719 /* Prepare CT request */
@@ -1726,13 +1727,13 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1726 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; 1727 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
1727 1728
1728 /* Execute MS IOCB */ 1729 /* Execute MS IOCB */
1729 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1730 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1730 sizeof(ms_iocb_entry_t)); 1731 sizeof(ms_iocb_entry_t));
1731 if (rval != QLA_SUCCESS) { 1732 if (rval != QLA_SUCCESS) {
1732 /*EMPTY*/ 1733 /*EMPTY*/
1733 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB " 1734 DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
1734 "failed (%d).\n", ha->host_no, rval)); 1735 "failed (%d).\n", vha->host_no, rval));
1735 } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 1736 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1736 "GFPN_ID") != QLA_SUCCESS) { 1737 "GFPN_ID") != QLA_SUCCESS) {
1737 rval = QLA_FUNCTION_FAILED; 1738 rval = QLA_FUNCTION_FAILED;
1738 } else { 1739 } else {
@@ -1750,17 +1751,17 @@ qla2x00_gfpn_id(scsi_qla_host_t *ha, sw_info_t *list)
1750} 1751}
1751 1752
1752static inline void * 1753static inline void *
1753qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size, 1754qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1754 uint32_t rsp_size) 1755 uint32_t rsp_size)
1755{ 1756{
1756 struct ct_entry_24xx *ct_pkt; 1757 struct ct_entry_24xx *ct_pkt;
1757 1758 struct qla_hw_data *ha = vha->hw;
1758 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb; 1759 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1759 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx)); 1760 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1760 1761
1761 ct_pkt->entry_type = CT_IOCB_TYPE; 1762 ct_pkt->entry_type = CT_IOCB_TYPE;
1762 ct_pkt->entry_count = 1; 1763 ct_pkt->entry_count = 1;
1763 ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); 1764 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1764 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1765 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1765 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1766 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
1766 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1767 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
@@ -1774,7 +1775,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1774 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1775 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1775 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1776 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1776 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1777 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1777 ct_pkt->vp_index = ha->vp_idx; 1778 ct_pkt->vp_index = vha->vp_idx;
1778 1779
1779 return ct_pkt; 1780 return ct_pkt;
1780} 1781}
@@ -1803,11 +1804,11 @@ qla24xx_prep_ct_fm_req(struct ct_sns_req *ct_req, uint16_t cmd,
1803 * Returns 0 on success. 1804 * Returns 0 on success.
1804 */ 1805 */
1805int 1806int
1806qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list) 1807qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1807{ 1808{
1808 int rval; 1809 int rval;
1809 uint16_t i; 1810 uint16_t i;
1810 1811 struct qla_hw_data *ha = vha->hw;
1811 ms_iocb_entry_t *ms_pkt; 1812 ms_iocb_entry_t *ms_pkt;
1812 struct ct_sns_req *ct_req; 1813 struct ct_sns_req *ct_req;
1813 struct ct_sns_rsp *ct_rsp; 1814 struct ct_sns_rsp *ct_rsp;
@@ -1817,14 +1818,14 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1817 if (!ha->flags.gpsc_supported) 1818 if (!ha->flags.gpsc_supported)
1818 return QLA_FUNCTION_FAILED; 1819 return QLA_FUNCTION_FAILED;
1819 1820
1820 rval = qla2x00_mgmt_svr_login(ha); 1821 rval = qla2x00_mgmt_svr_login(vha);
1821 if (rval) 1822 if (rval)
1822 return rval; 1823 return rval;
1823 1824
1824 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1825 for (i = 0; i < MAX_FIBRE_DEVICES; i++) {
1825 /* Issue GFPN_ID */ 1826 /* Issue GFPN_ID */
1826 /* Prepare common MS IOCB */ 1827 /* Prepare common MS IOCB */
1827 ms_pkt = qla24xx_prep_ms_fm_iocb(ha, GPSC_REQ_SIZE, 1828 ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
1828 GPSC_RSP_SIZE); 1829 GPSC_RSP_SIZE);
1829 1830
1830 /* Prepare CT request */ 1831 /* Prepare CT request */
@@ -1837,13 +1838,13 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1837 WWN_SIZE); 1838 WWN_SIZE);
1838 1839
1839 /* Execute MS IOCB */ 1840 /* Execute MS IOCB */
1840 rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, 1841 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1841 sizeof(ms_iocb_entry_t)); 1842 sizeof(ms_iocb_entry_t));
1842 if (rval != QLA_SUCCESS) { 1843 if (rval != QLA_SUCCESS) {
1843 /*EMPTY*/ 1844 /*EMPTY*/
1844 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB " 1845 DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
1845 "failed (%d).\n", ha->host_no, rval)); 1846 "failed (%d).\n", vha->host_no, rval));
1846 } else if ((rval = qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, 1847 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1847 "GPSC")) != QLA_SUCCESS) { 1848 "GPSC")) != QLA_SUCCESS) {
1848 /* FM command unsupported? */ 1849 /* FM command unsupported? */
1849 if (rval == QLA_INVALID_COMMAND && 1850 if (rval == QLA_INVALID_COMMAND &&
@@ -1853,7 +1854,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1853 CT_REASON_COMMAND_UNSUPPORTED)) { 1854 CT_REASON_COMMAND_UNSUPPORTED)) {
1854 DEBUG2(printk("scsi(%ld): GPSC command " 1855 DEBUG2(printk("scsi(%ld): GPSC command "
1855 "unsupported, disabling query...\n", 1856 "unsupported, disabling query...\n",
1856 ha->host_no)); 1857 vha->host_no));
1857 ha->flags.gpsc_supported = 0; 1858 ha->flags.gpsc_supported = 0;
1858 rval = QLA_FUNCTION_FAILED; 1859 rval = QLA_FUNCTION_FAILED;
1859 break; 1860 break;
@@ -1878,7 +1879,7 @@ qla2x00_gpsc(scsi_qla_host_t *ha, sw_info_t *list)
1878 1879
1879 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - " 1880 DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
1880 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x " 1881 "fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
1881 "speed=%04x.\n", ha->host_no, 1882 "speed=%04x.\n", vha->host_no,
1882 list[i].fabric_port_name[0], 1883 list[i].fabric_port_name[0],
1883 list[i].fabric_port_name[1], 1884 list[i].fabric_port_name[1],
1884 list[i].fabric_port_name[2], 1885 list[i].fabric_port_name[2],
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4218f20f5ed5..52ed56ecf195 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -21,7 +22,6 @@
21static int qla2x00_isp_firmware(scsi_qla_host_t *); 22static int qla2x00_isp_firmware(scsi_qla_host_t *);
22static void qla2x00_resize_request_q(scsi_qla_host_t *); 23static void qla2x00_resize_request_q(scsi_qla_host_t *);
23static int qla2x00_setup_chip(scsi_qla_host_t *); 24static int qla2x00_setup_chip(scsi_qla_host_t *);
24static void qla2x00_init_response_q_entries(scsi_qla_host_t *);
25static int qla2x00_init_rings(scsi_qla_host_t *); 25static int qla2x00_init_rings(scsi_qla_host_t *);
26static int qla2x00_fw_ready(scsi_qla_host_t *); 26static int qla2x00_fw_ready(scsi_qla_host_t *);
27static int qla2x00_configure_hba(scsi_qla_host_t *); 27static int qla2x00_configure_hba(scsi_qla_host_t *);
@@ -35,10 +35,11 @@ static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
35 35
36static int qla2x00_restart_isp(scsi_qla_host_t *); 36static int qla2x00_restart_isp(scsi_qla_host_t *);
37 37
38static int qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev); 38static int qla2x00_find_new_loop_id(scsi_qla_host_t *, fc_port_t *);
39 39
40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); 40static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
41static int qla84xx_init_chip(scsi_qla_host_t *); 41static int qla84xx_init_chip(scsi_qla_host_t *);
42static int qla25xx_init_queues(struct qla_hw_data *);
42 43
43/****************************************************************************/ 44/****************************************************************************/
44/* QLogic ISP2x00 Hardware Support Functions. */ 45/* QLogic ISP2x00 Hardware Support Functions. */
@@ -55,77 +56,81 @@ static int qla84xx_init_chip(scsi_qla_host_t *);
55* 0 = success 56* 0 = success
56*/ 57*/
57int 58int
58qla2x00_initialize_adapter(scsi_qla_host_t *ha) 59qla2x00_initialize_adapter(scsi_qla_host_t *vha)
59{ 60{
60 int rval; 61 int rval;
61 62 struct qla_hw_data *ha = vha->hw;
63 struct req_que *req = ha->req_q_map[0];
62 /* Clear adapter flags. */ 64 /* Clear adapter flags. */
63 ha->flags.online = 0; 65 vha->flags.online = 0;
64 ha->flags.reset_active = 0; 66 vha->flags.reset_active = 0;
65 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 67 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
66 atomic_set(&ha->loop_state, LOOP_DOWN); 68 atomic_set(&vha->loop_state, LOOP_DOWN);
67 ha->device_flags = DFLG_NO_CABLE; 69 vha->device_flags = DFLG_NO_CABLE;
68 ha->dpc_flags = 0; 70 vha->dpc_flags = 0;
69 ha->flags.management_server_logged_in = 0; 71 vha->flags.management_server_logged_in = 0;
70 ha->marker_needed = 0; 72 vha->marker_needed = 0;
71 ha->mbx_flags = 0; 73 ha->mbx_flags = 0;
72 ha->isp_abort_cnt = 0; 74 ha->isp_abort_cnt = 0;
73 ha->beacon_blink_led = 0; 75 ha->beacon_blink_led = 0;
74 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 76 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
77
78 set_bit(0, ha->req_qid_map);
79 set_bit(0, ha->rsp_qid_map);
75 80
76 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n"); 81 qla_printk(KERN_INFO, ha, "Configuring PCI space...\n");
77 rval = ha->isp_ops->pci_config(ha); 82 rval = ha->isp_ops->pci_config(vha);
78 if (rval) { 83 if (rval) {
79 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n", 84 DEBUG2(printk("scsi(%ld): Unable to configure PCI space.\n",
80 ha->host_no)); 85 vha->host_no));
81 return (rval); 86 return (rval);
82 } 87 }
83 88
84 ha->isp_ops->reset_chip(ha); 89 ha->isp_ops->reset_chip(vha);
85 90
86 rval = qla2xxx_get_flash_info(ha); 91 rval = qla2xxx_get_flash_info(vha);
87 if (rval) { 92 if (rval) {
88 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n", 93 DEBUG2(printk("scsi(%ld): Unable to validate FLASH data.\n",
89 ha->host_no)); 94 vha->host_no));
90 return (rval); 95 return (rval);
91 } 96 }
92 97
93 ha->isp_ops->get_flash_version(ha, ha->request_ring); 98 ha->isp_ops->get_flash_version(vha, req->ring);
94 99
95 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n"); 100 qla_printk(KERN_INFO, ha, "Configure NVRAM parameters...\n");
96 101
97 ha->isp_ops->nvram_config(ha); 102 ha->isp_ops->nvram_config(vha);
98 103
99 if (ha->flags.disable_serdes) { 104 if (ha->flags.disable_serdes) {
100 /* Mask HBA via NVRAM settings? */ 105 /* Mask HBA via NVRAM settings? */
101 qla_printk(KERN_INFO, ha, "Masking HBA WWPN " 106 qla_printk(KERN_INFO, ha, "Masking HBA WWPN "
102 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n", 107 "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
103 ha->port_name[0], ha->port_name[1], 108 vha->port_name[0], vha->port_name[1],
104 ha->port_name[2], ha->port_name[3], 109 vha->port_name[2], vha->port_name[3],
105 ha->port_name[4], ha->port_name[5], 110 vha->port_name[4], vha->port_name[5],
106 ha->port_name[6], ha->port_name[7]); 111 vha->port_name[6], vha->port_name[7]);
107 return QLA_FUNCTION_FAILED; 112 return QLA_FUNCTION_FAILED;
108 } 113 }
109 114
110 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n"); 115 qla_printk(KERN_INFO, ha, "Verifying loaded RISC code...\n");
111 116
112 if (qla2x00_isp_firmware(ha) != QLA_SUCCESS) { 117 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
113 rval = ha->isp_ops->chip_diag(ha); 118 rval = ha->isp_ops->chip_diag(vha);
114 if (rval) 119 if (rval)
115 return (rval); 120 return (rval);
116 rval = qla2x00_setup_chip(ha); 121 rval = qla2x00_setup_chip(vha);
117 if (rval) 122 if (rval)
118 return (rval); 123 return (rval);
119 } 124 }
120 if (IS_QLA84XX(ha)) { 125 if (IS_QLA84XX(ha)) {
121 ha->cs84xx = qla84xx_get_chip(ha); 126 ha->cs84xx = qla84xx_get_chip(vha);
122 if (!ha->cs84xx) { 127 if (!ha->cs84xx) {
123 qla_printk(KERN_ERR, ha, 128 qla_printk(KERN_ERR, ha,
124 "Unable to configure ISP84XX.\n"); 129 "Unable to configure ISP84XX.\n");
125 return QLA_FUNCTION_FAILED; 130 return QLA_FUNCTION_FAILED;
126 } 131 }
127 } 132 }
128 rval = qla2x00_init_rings(ha); 133 rval = qla2x00_init_rings(vha);
129 134
130 return (rval); 135 return (rval);
131} 136}
@@ -137,10 +142,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *ha)
137 * Returns 0 on success. 142 * Returns 0 on success.
138 */ 143 */
139int 144int
140qla2100_pci_config(scsi_qla_host_t *ha) 145qla2100_pci_config(scsi_qla_host_t *vha)
141{ 146{
142 uint16_t w; 147 uint16_t w;
143 unsigned long flags; 148 unsigned long flags;
149 struct qla_hw_data *ha = vha->hw;
144 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 150 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
145 151
146 pci_set_master(ha->pdev); 152 pci_set_master(ha->pdev);
@@ -167,11 +173,12 @@ qla2100_pci_config(scsi_qla_host_t *ha)
167 * Returns 0 on success. 173 * Returns 0 on success.
168 */ 174 */
169int 175int
170qla2300_pci_config(scsi_qla_host_t *ha) 176qla2300_pci_config(scsi_qla_host_t *vha)
171{ 177{
172 uint16_t w; 178 uint16_t w;
173 unsigned long flags = 0; 179 unsigned long flags = 0;
174 uint32_t cnt; 180 uint32_t cnt;
181 struct qla_hw_data *ha = vha->hw;
175 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 182 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
176 183
177 pci_set_master(ha->pdev); 184 pci_set_master(ha->pdev);
@@ -248,10 +255,11 @@ qla2300_pci_config(scsi_qla_host_t *ha)
248 * Returns 0 on success. 255 * Returns 0 on success.
249 */ 256 */
250int 257int
251qla24xx_pci_config(scsi_qla_host_t *ha) 258qla24xx_pci_config(scsi_qla_host_t *vha)
252{ 259{
253 uint16_t w; 260 uint16_t w;
254 unsigned long flags = 0; 261 unsigned long flags = 0;
262 struct qla_hw_data *ha = vha->hw;
255 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 263 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
256 264
257 pci_set_master(ha->pdev); 265 pci_set_master(ha->pdev);
@@ -291,9 +299,10 @@ qla24xx_pci_config(scsi_qla_host_t *ha)
291 * Returns 0 on success. 299 * Returns 0 on success.
292 */ 300 */
293int 301int
294qla25xx_pci_config(scsi_qla_host_t *ha) 302qla25xx_pci_config(scsi_qla_host_t *vha)
295{ 303{
296 uint16_t w; 304 uint16_t w;
305 struct qla_hw_data *ha = vha->hw;
297 306
298 pci_set_master(ha->pdev); 307 pci_set_master(ha->pdev);
299 pci_try_set_mwi(ha->pdev); 308 pci_try_set_mwi(ha->pdev);
@@ -321,32 +330,33 @@ qla25xx_pci_config(scsi_qla_host_t *ha)
321 * Returns 0 on success. 330 * Returns 0 on success.
322 */ 331 */
323static int 332static int
324qla2x00_isp_firmware(scsi_qla_host_t *ha) 333qla2x00_isp_firmware(scsi_qla_host_t *vha)
325{ 334{
326 int rval; 335 int rval;
327 uint16_t loop_id, topo, sw_cap; 336 uint16_t loop_id, topo, sw_cap;
328 uint8_t domain, area, al_pa; 337 uint8_t domain, area, al_pa;
338 struct qla_hw_data *ha = vha->hw;
329 339
330 /* Assume loading risc code */ 340 /* Assume loading risc code */
331 rval = QLA_FUNCTION_FAILED; 341 rval = QLA_FUNCTION_FAILED;
332 342
333 if (ha->flags.disable_risc_code_load) { 343 if (ha->flags.disable_risc_code_load) {
334 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n", 344 DEBUG2(printk("scsi(%ld): RISC CODE NOT loaded\n",
335 ha->host_no)); 345 vha->host_no));
336 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n"); 346 qla_printk(KERN_INFO, ha, "RISC CODE NOT loaded\n");
337 347
338 /* Verify checksum of loaded RISC code. */ 348 /* Verify checksum of loaded RISC code. */
339 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 349 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
340 if (rval == QLA_SUCCESS) { 350 if (rval == QLA_SUCCESS) {
341 /* And, verify we are not in ROM code. */ 351 /* And, verify we are not in ROM code. */
342 rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa, 352 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
343 &area, &domain, &topo, &sw_cap); 353 &area, &domain, &topo, &sw_cap);
344 } 354 }
345 } 355 }
346 356
347 if (rval) { 357 if (rval) {
348 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n", 358 DEBUG2_3(printk("scsi(%ld): **** Load RISC code ****\n",
349 ha->host_no)); 359 vha->host_no));
350 } 360 }
351 361
352 return (rval); 362 return (rval);
@@ -359,9 +369,10 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
359 * Returns 0 on success. 369 * Returns 0 on success.
360 */ 370 */
361void 371void
362qla2x00_reset_chip(scsi_qla_host_t *ha) 372qla2x00_reset_chip(scsi_qla_host_t *vha)
363{ 373{
364 unsigned long flags = 0; 374 unsigned long flags = 0;
375 struct qla_hw_data *ha = vha->hw;
365 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 376 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
366 uint32_t cnt; 377 uint32_t cnt;
367 uint16_t cmd; 378 uint16_t cmd;
@@ -499,10 +510,11 @@ qla2x00_reset_chip(scsi_qla_host_t *ha)
499 * Returns 0 on success. 510 * Returns 0 on success.
500 */ 511 */
501static inline void 512static inline void
502qla24xx_reset_risc(scsi_qla_host_t *ha) 513qla24xx_reset_risc(scsi_qla_host_t *vha)
503{ 514{
504 int hw_evt = 0; 515 int hw_evt = 0;
505 unsigned long flags = 0; 516 unsigned long flags = 0;
517 struct qla_hw_data *ha = vha->hw;
506 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 518 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
507 uint32_t cnt, d2; 519 uint32_t cnt, d2;
508 uint16_t wd; 520 uint16_t wd;
@@ -541,7 +553,7 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
541 barrier(); 553 barrier();
542 } 554 }
543 if (cnt == 0 || hw_evt) 555 if (cnt == 0 || hw_evt)
544 qla2xxx_hw_event_log(ha, HW_EVENT_RESET_ERR, 556 qla2xxx_hw_event_log(vha, HW_EVENT_RESET_ERR,
545 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2), 557 RD_REG_WORD(&reg->mailbox1), RD_REG_WORD(&reg->mailbox2),
546 RD_REG_WORD(&reg->mailbox3)); 558 RD_REG_WORD(&reg->mailbox3));
547 559
@@ -571,12 +583,13 @@ qla24xx_reset_risc(scsi_qla_host_t *ha)
571 * Returns 0 on success. 583 * Returns 0 on success.
572 */ 584 */
573void 585void
574qla24xx_reset_chip(scsi_qla_host_t *ha) 586qla24xx_reset_chip(scsi_qla_host_t *vha)
575{ 587{
588 struct qla_hw_data *ha = vha->hw;
576 ha->isp_ops->disable_intrs(ha); 589 ha->isp_ops->disable_intrs(ha);
577 590
578 /* Perform RISC reset. */ 591 /* Perform RISC reset. */
579 qla24xx_reset_risc(ha); 592 qla24xx_reset_risc(vha);
580} 593}
581 594
582/** 595/**
@@ -586,20 +599,22 @@ qla24xx_reset_chip(scsi_qla_host_t *ha)
586 * Returns 0 on success. 599 * Returns 0 on success.
587 */ 600 */
588int 601int
589qla2x00_chip_diag(scsi_qla_host_t *ha) 602qla2x00_chip_diag(scsi_qla_host_t *vha)
590{ 603{
591 int rval; 604 int rval;
605 struct qla_hw_data *ha = vha->hw;
592 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 606 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
593 unsigned long flags = 0; 607 unsigned long flags = 0;
594 uint16_t data; 608 uint16_t data;
595 uint32_t cnt; 609 uint32_t cnt;
596 uint16_t mb[5]; 610 uint16_t mb[5];
611 struct req_que *req = ha->req_q_map[0];
597 612
598 /* Assume a failed state */ 613 /* Assume a failed state */
599 rval = QLA_FUNCTION_FAILED; 614 rval = QLA_FUNCTION_FAILED;
600 615
601 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n", 616 DEBUG3(printk("scsi(%ld): Testing device at %lx.\n",
602 ha->host_no, (u_long)&reg->flash_address)); 617 vha->host_no, (u_long)&reg->flash_address));
603 618
604 spin_lock_irqsave(&ha->hardware_lock, flags); 619 spin_lock_irqsave(&ha->hardware_lock, flags);
605 620
@@ -662,17 +677,17 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
662 ha->product_id[3] = mb[4]; 677 ha->product_id[3] = mb[4];
663 678
664 /* Adjust fw RISC transfer size */ 679 /* Adjust fw RISC transfer size */
665 if (ha->request_q_length > 1024) 680 if (req->length > 1024)
666 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 681 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
667 else 682 else
668 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 683 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
669 ha->request_q_length; 684 req->length;
670 685
671 if (IS_QLA2200(ha) && 686 if (IS_QLA2200(ha) &&
672 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) { 687 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
673 /* Limit firmware transfer size with a 2200A */ 688 /* Limit firmware transfer size with a 2200A */
674 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n", 689 DEBUG3(printk("scsi(%ld): Found QLA2200A chip.\n",
675 ha->host_no)); 690 vha->host_no));
676 691
677 ha->device_type |= DT_ISP2200A; 692 ha->device_type |= DT_ISP2200A;
678 ha->fw_transfer_size = 128; 693 ha->fw_transfer_size = 128;
@@ -681,11 +696,11 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
681 /* Wrap Incoming Mailboxes Test. */ 696 /* Wrap Incoming Mailboxes Test. */
682 spin_unlock_irqrestore(&ha->hardware_lock, flags); 697 spin_unlock_irqrestore(&ha->hardware_lock, flags);
683 698
684 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", ha->host_no)); 699 DEBUG3(printk("scsi(%ld): Checking mailboxes.\n", vha->host_no));
685 rval = qla2x00_mbx_reg_test(ha); 700 rval = qla2x00_mbx_reg_test(vha);
686 if (rval) { 701 if (rval) {
687 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 702 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
688 ha->host_no)); 703 vha->host_no));
689 qla_printk(KERN_WARNING, ha, 704 qla_printk(KERN_WARNING, ha,
690 "Failed mailbox send register test\n"); 705 "Failed mailbox send register test\n");
691 } 706 }
@@ -698,7 +713,7 @@ qla2x00_chip_diag(scsi_qla_host_t *ha)
698chip_diag_failed: 713chip_diag_failed:
699 if (rval) 714 if (rval)
700 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED " 715 DEBUG2_3(printk("scsi(%ld): Chip diagnostics **** FAILED "
701 "****\n", ha->host_no)); 716 "****\n", vha->host_no));
702 717
703 spin_unlock_irqrestore(&ha->hardware_lock, flags); 718 spin_unlock_irqrestore(&ha->hardware_lock, flags);
704 719
@@ -712,19 +727,21 @@ chip_diag_failed:
712 * Returns 0 on success. 727 * Returns 0 on success.
713 */ 728 */
714int 729int
715qla24xx_chip_diag(scsi_qla_host_t *ha) 730qla24xx_chip_diag(scsi_qla_host_t *vha)
716{ 731{
717 int rval; 732 int rval;
733 struct qla_hw_data *ha = vha->hw;
734 struct req_que *req = ha->req_q_map[0];
718 735
719 /* Perform RISC reset. */ 736 /* Perform RISC reset. */
720 qla24xx_reset_risc(ha); 737 qla24xx_reset_risc(vha);
721 738
722 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length; 739 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
723 740
724 rval = qla2x00_mbx_reg_test(ha); 741 rval = qla2x00_mbx_reg_test(vha);
725 if (rval) { 742 if (rval) {
726 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n", 743 DEBUG(printk("scsi(%ld): Failed mailbox send register test\n",
727 ha->host_no)); 744 vha->host_no));
728 qla_printk(KERN_WARNING, ha, 745 qla_printk(KERN_WARNING, ha,
729 "Failed mailbox send register test\n"); 746 "Failed mailbox send register test\n");
730 } else { 747 } else {
@@ -736,13 +753,16 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
736} 753}
737 754
738void 755void
739qla2x00_alloc_fw_dump(scsi_qla_host_t *ha) 756qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
740{ 757{
741 int rval; 758 int rval;
742 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, 759 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
743 eft_size, fce_size; 760 eft_size, fce_size, mq_size;
744 dma_addr_t tc_dma; 761 dma_addr_t tc_dma;
745 void *tc; 762 void *tc;
763 struct qla_hw_data *ha = vha->hw;
764 struct req_que *req = ha->req_q_map[0];
765 struct rsp_que *rsp = ha->rsp_q_map[0];
746 766
747 if (ha->fw_dump) { 767 if (ha->fw_dump) {
748 qla_printk(KERN_WARNING, ha, 768 qla_printk(KERN_WARNING, ha,
@@ -751,7 +771,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
751 } 771 }
752 772
753 ha->fw_dumped = 0; 773 ha->fw_dumped = 0;
754 fixed_size = mem_size = eft_size = fce_size = 0; 774 fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
755 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 775 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
756 fixed_size = sizeof(struct qla2100_fw_dump); 776 fixed_size = sizeof(struct qla2100_fw_dump);
757 } else if (IS_QLA23XX(ha)) { 777 } else if (IS_QLA23XX(ha)) {
@@ -760,10 +780,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
760 sizeof(uint16_t); 780 sizeof(uint16_t);
761 } else if (IS_FWI2_CAPABLE(ha)) { 781 } else if (IS_FWI2_CAPABLE(ha)) {
762 fixed_size = IS_QLA25XX(ha) ? 782 fixed_size = IS_QLA25XX(ha) ?
763 offsetof(struct qla25xx_fw_dump, ext_mem): 783 offsetof(struct qla25xx_fw_dump, ext_mem) :
764 offsetof(struct qla24xx_fw_dump, ext_mem); 784 offsetof(struct qla24xx_fw_dump, ext_mem);
765 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 785 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
766 sizeof(uint32_t); 786 sizeof(uint32_t);
787 if (ha->mqenable)
788 mq_size = sizeof(struct qla2xxx_mq_chain);
767 789
768 /* Allocate memory for Fibre Channel Event Buffer. */ 790 /* Allocate memory for Fibre Channel Event Buffer. */
769 if (!IS_QLA25XX(ha)) 791 if (!IS_QLA25XX(ha))
@@ -778,7 +800,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
778 } 800 }
779 801
780 memset(tc, 0, FCE_SIZE); 802 memset(tc, 0, FCE_SIZE);
781 rval = qla2x00_enable_fce_trace(ha, tc_dma, FCE_NUM_BUFFERS, 803 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
782 ha->fce_mb, &ha->fce_bufs); 804 ha->fce_mb, &ha->fce_bufs);
783 if (rval) { 805 if (rval) {
784 qla_printk(KERN_WARNING, ha, "Unable to initialize " 806 qla_printk(KERN_WARNING, ha, "Unable to initialize "
@@ -807,7 +829,7 @@ try_eft:
807 } 829 }
808 830
809 memset(tc, 0, EFT_SIZE); 831 memset(tc, 0, EFT_SIZE);
810 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS); 832 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
811 if (rval) { 833 if (rval) {
812 qla_printk(KERN_WARNING, ha, "Unable to initialize " 834 qla_printk(KERN_WARNING, ha, "Unable to initialize "
813 "EFT (%d).\n", rval); 835 "EFT (%d).\n", rval);
@@ -824,12 +846,12 @@ try_eft:
824 ha->eft = tc; 846 ha->eft = tc;
825 } 847 }
826cont_alloc: 848cont_alloc:
827 req_q_size = ha->request_q_length * sizeof(request_t); 849 req_q_size = req->length * sizeof(request_t);
828 rsp_q_size = ha->response_q_length * sizeof(response_t); 850 rsp_q_size = rsp->length * sizeof(response_t);
829 851
830 dump_size = offsetof(struct qla2xxx_fw_dump, isp); 852 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
831 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + 853 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
832 eft_size + fce_size; 854 mq_size + eft_size + fce_size;
833 855
834 ha->fw_dump = vmalloc(dump_size); 856 ha->fw_dump = vmalloc(dump_size);
835 if (!ha->fw_dump) { 857 if (!ha->fw_dump) {
@@ -844,7 +866,6 @@ cont_alloc:
844 } 866 }
845 return; 867 return;
846 } 868 }
847
848 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n", 869 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for firmware dump...\n",
849 dump_size / 1024); 870 dump_size / 1024);
850 871
@@ -875,27 +896,29 @@ cont_alloc:
875 * Returns 0 on success. 896 * Returns 0 on success.
876 */ 897 */
877static void 898static void
878qla2x00_resize_request_q(scsi_qla_host_t *ha) 899qla2x00_resize_request_q(scsi_qla_host_t *vha)
879{ 900{
880 int rval; 901 int rval;
881 uint16_t fw_iocb_cnt = 0; 902 uint16_t fw_iocb_cnt = 0;
882 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM; 903 uint16_t request_q_length = REQUEST_ENTRY_CNT_2XXX_EXT_MEM;
883 dma_addr_t request_dma; 904 dma_addr_t request_dma;
884 request_t *request_ring; 905 request_t *request_ring;
906 struct qla_hw_data *ha = vha->hw;
907 struct req_que *req = ha->req_q_map[0];
885 908
886 /* Valid only on recent ISPs. */ 909 /* Valid only on recent ISPs. */
887 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 910 if (IS_QLA2100(ha) || IS_QLA2200(ha))
888 return; 911 return;
889 912
890 /* Retrieve IOCB counts available to the firmware. */ 913 /* Retrieve IOCB counts available to the firmware. */
891 rval = qla2x00_get_resource_cnts(ha, NULL, NULL, NULL, &fw_iocb_cnt, 914 rval = qla2x00_get_resource_cnts(vha, NULL, NULL, NULL, &fw_iocb_cnt,
892 &ha->max_npiv_vports); 915 &ha->max_npiv_vports);
893 if (rval) 916 if (rval)
894 return; 917 return;
895 /* No point in continuing if current settings are sufficient. */ 918 /* No point in continuing if current settings are sufficient. */
896 if (fw_iocb_cnt < 1024) 919 if (fw_iocb_cnt < 1024)
897 return; 920 return;
898 if (ha->request_q_length >= request_q_length) 921 if (req->length >= request_q_length)
899 return; 922 return;
900 923
901 /* Attempt to claim larger area for request queue. */ 924 /* Attempt to claim larger area for request queue. */
@@ -909,17 +932,17 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
909 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n", 932 qla_printk(KERN_INFO, ha, "Extended memory detected (%d KB)...\n",
910 (ha->fw_memory_size + 1) / 1024); 933 (ha->fw_memory_size + 1) / 1024);
911 qla_printk(KERN_INFO, ha, "Resizing request queue depth " 934 qla_printk(KERN_INFO, ha, "Resizing request queue depth "
912 "(%d -> %d)...\n", ha->request_q_length, request_q_length); 935 "(%d -> %d)...\n", req->length, request_q_length);
913 936
914 /* Clear old allocations. */ 937 /* Clear old allocations. */
915 dma_free_coherent(&ha->pdev->dev, 938 dma_free_coherent(&ha->pdev->dev,
916 (ha->request_q_length + 1) * sizeof(request_t), ha->request_ring, 939 (req->length + 1) * sizeof(request_t), req->ring,
917 ha->request_dma); 940 req->dma);
918 941
919 /* Begin using larger queue. */ 942 /* Begin using larger queue. */
920 ha->request_q_length = request_q_length; 943 req->length = request_q_length;
921 ha->request_ring = request_ring; 944 req->ring = request_ring;
922 ha->request_dma = request_dma; 945 req->dma = request_dma;
923} 946}
924 947
925/** 948/**
@@ -929,10 +952,11 @@ qla2x00_resize_request_q(scsi_qla_host_t *ha)
929 * Returns 0 on success. 952 * Returns 0 on success.
930 */ 953 */
931static int 954static int
932qla2x00_setup_chip(scsi_qla_host_t *ha) 955qla2x00_setup_chip(scsi_qla_host_t *vha)
933{ 956{
934 int rval; 957 int rval;
935 uint32_t srisc_address = 0; 958 uint32_t srisc_address = 0;
959 struct qla_hw_data *ha = vha->hw;
936 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 960 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
937 unsigned long flags; 961 unsigned long flags;
938 962
@@ -945,28 +969,27 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
945 } 969 }
946 970
947 /* Load firmware sequences */ 971 /* Load firmware sequences */
948 rval = ha->isp_ops->load_risc(ha, &srisc_address); 972 rval = ha->isp_ops->load_risc(vha, &srisc_address);
949 if (rval == QLA_SUCCESS) { 973 if (rval == QLA_SUCCESS) {
950 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC " 974 DEBUG(printk("scsi(%ld): Verifying Checksum of loaded RISC "
951 "code.\n", ha->host_no)); 975 "code.\n", vha->host_no));
952 976
953 rval = qla2x00_verify_checksum(ha, srisc_address); 977 rval = qla2x00_verify_checksum(vha, srisc_address);
954 if (rval == QLA_SUCCESS) { 978 if (rval == QLA_SUCCESS) {
955 /* Start firmware execution. */ 979 /* Start firmware execution. */
956 DEBUG(printk("scsi(%ld): Checksum OK, start " 980 DEBUG(printk("scsi(%ld): Checksum OK, start "
957 "firmware.\n", ha->host_no)); 981 "firmware.\n", vha->host_no));
958 982
959 rval = qla2x00_execute_fw(ha, srisc_address); 983 rval = qla2x00_execute_fw(vha, srisc_address);
960 /* Retrieve firmware information. */ 984 /* Retrieve firmware information. */
961 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) { 985 if (rval == QLA_SUCCESS && ha->fw_major_version == 0) {
962 qla2x00_get_fw_version(ha, 986 qla2x00_get_fw_version(vha,
963 &ha->fw_major_version, 987 &ha->fw_major_version,
964 &ha->fw_minor_version, 988 &ha->fw_minor_version,
965 &ha->fw_subminor_version, 989 &ha->fw_subminor_version,
966 &ha->fw_attributes, &ha->fw_memory_size); 990 &ha->fw_attributes, &ha->fw_memory_size);
967 ha->flags.npiv_supported = 0; 991 ha->flags.npiv_supported = 0;
968 if ((IS_QLA24XX(ha) || IS_QLA25XX(ha) || 992 if (IS_QLA2XXX_MIDTYPE(ha) &&
969 IS_QLA84XX(ha)) &&
970 (ha->fw_attributes & BIT_2)) { 993 (ha->fw_attributes & BIT_2)) {
971 ha->flags.npiv_supported = 1; 994 ha->flags.npiv_supported = 1;
972 if ((!ha->max_npiv_vports) || 995 if ((!ha->max_npiv_vports) ||
@@ -975,15 +998,15 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
975 ha->max_npiv_vports = 998 ha->max_npiv_vports =
976 MIN_MULTI_ID_FABRIC - 1; 999 MIN_MULTI_ID_FABRIC - 1;
977 } 1000 }
978 qla2x00_resize_request_q(ha); 1001 qla2x00_resize_request_q(vha);
979 1002
980 if (ql2xallocfwdump) 1003 if (ql2xallocfwdump)
981 qla2x00_alloc_fw_dump(ha); 1004 qla2x00_alloc_fw_dump(vha);
982 } 1005 }
983 } else { 1006 } else {
984 DEBUG2(printk(KERN_INFO 1007 DEBUG2(printk(KERN_INFO
985 "scsi(%ld): ISP Firmware failed checksum.\n", 1008 "scsi(%ld): ISP Firmware failed checksum.\n",
986 ha->host_no)); 1009 vha->host_no));
987 } 1010 }
988 } 1011 }
989 1012
@@ -1002,7 +1025,7 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1002 1025
1003 if (rval) { 1026 if (rval) {
1004 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", 1027 DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
1005 ha->host_no)); 1028 vha->host_no));
1006 } 1029 }
1007 1030
1008 return (rval); 1031 return (rval);
@@ -1017,14 +1040,14 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
1017 * 1040 *
1018 * Returns 0 on success. 1041 * Returns 0 on success.
1019 */ 1042 */
1020static void 1043void
1021qla2x00_init_response_q_entries(scsi_qla_host_t *ha) 1044qla2x00_init_response_q_entries(struct rsp_que *rsp)
1022{ 1045{
1023 uint16_t cnt; 1046 uint16_t cnt;
1024 response_t *pkt; 1047 response_t *pkt;
1025 1048
1026 pkt = ha->response_ring_ptr; 1049 pkt = rsp->ring_ptr;
1027 for (cnt = 0; cnt < ha->response_q_length; cnt++) { 1050 for (cnt = 0; cnt < rsp->length; cnt++) {
1028 pkt->signature = RESPONSE_PROCESSED; 1051 pkt->signature = RESPONSE_PROCESSED;
1029 pkt++; 1052 pkt++;
1030 } 1053 }
@@ -1038,19 +1061,20 @@ qla2x00_init_response_q_entries(scsi_qla_host_t *ha)
1038 * Returns 0 on success. 1061 * Returns 0 on success.
1039 */ 1062 */
1040void 1063void
1041qla2x00_update_fw_options(scsi_qla_host_t *ha) 1064qla2x00_update_fw_options(scsi_qla_host_t *vha)
1042{ 1065{
1043 uint16_t swing, emphasis, tx_sens, rx_sens; 1066 uint16_t swing, emphasis, tx_sens, rx_sens;
1067 struct qla_hw_data *ha = vha->hw;
1044 1068
1045 memset(ha->fw_options, 0, sizeof(ha->fw_options)); 1069 memset(ha->fw_options, 0, sizeof(ha->fw_options));
1046 qla2x00_get_fw_options(ha, ha->fw_options); 1070 qla2x00_get_fw_options(vha, ha->fw_options);
1047 1071
1048 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1072 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1049 return; 1073 return;
1050 1074
1051 /* Serial Link options. */ 1075 /* Serial Link options. */
1052 DEBUG3(printk("scsi(%ld): Serial link options:\n", 1076 DEBUG3(printk("scsi(%ld): Serial link options:\n",
1053 ha->host_no)); 1077 vha->host_no));
1054 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options, 1078 DEBUG3(qla2x00_dump_buffer((uint8_t *)&ha->fw_seriallink_options,
1055 sizeof(ha->fw_seriallink_options))); 1079 sizeof(ha->fw_seriallink_options)));
1056 1080
@@ -1108,19 +1132,20 @@ qla2x00_update_fw_options(scsi_qla_host_t *ha)
1108 ha->fw_options[2] |= BIT_13; 1132 ha->fw_options[2] |= BIT_13;
1109 1133
1110 /* Update firmware options. */ 1134 /* Update firmware options. */
1111 qla2x00_set_fw_options(ha, ha->fw_options); 1135 qla2x00_set_fw_options(vha, ha->fw_options);
1112} 1136}
1113 1137
1114void 1138void
1115qla24xx_update_fw_options(scsi_qla_host_t *ha) 1139qla24xx_update_fw_options(scsi_qla_host_t *vha)
1116{ 1140{
1117 int rval; 1141 int rval;
1142 struct qla_hw_data *ha = vha->hw;
1118 1143
1119 /* Update Serial Link options. */ 1144 /* Update Serial Link options. */
1120 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) 1145 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
1121 return; 1146 return;
1122 1147
1123 rval = qla2x00_set_serdes_params(ha, 1148 rval = qla2x00_set_serdes_params(vha,
1124 le16_to_cpu(ha->fw_seriallink_options24[1]), 1149 le16_to_cpu(ha->fw_seriallink_options24[1]),
1125 le16_to_cpu(ha->fw_seriallink_options24[2]), 1150 le16_to_cpu(ha->fw_seriallink_options24[2]),
1126 le16_to_cpu(ha->fw_seriallink_options24[3])); 1151 le16_to_cpu(ha->fw_seriallink_options24[3]));
@@ -1131,19 +1156,22 @@ qla24xx_update_fw_options(scsi_qla_host_t *ha)
1131} 1156}
1132 1157
1133void 1158void
1134qla2x00_config_rings(struct scsi_qla_host *ha) 1159qla2x00_config_rings(struct scsi_qla_host *vha)
1135{ 1160{
1161 struct qla_hw_data *ha = vha->hw;
1136 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1162 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1163 struct req_que *req = ha->req_q_map[0];
1164 struct rsp_que *rsp = ha->rsp_q_map[0];
1137 1165
1138 /* Setup ring parameters in initialization control block. */ 1166 /* Setup ring parameters in initialization control block. */
1139 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 1167 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
1140 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 1168 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
1141 ha->init_cb->request_q_length = cpu_to_le16(ha->request_q_length); 1169 ha->init_cb->request_q_length = cpu_to_le16(req->length);
1142 ha->init_cb->response_q_length = cpu_to_le16(ha->response_q_length); 1170 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
1143 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1171 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1144 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1172 ha->init_cb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1145 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1173 ha->init_cb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1146 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1174 ha->init_cb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1147 1175
1148 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); 1176 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
1149 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); 1177 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
@@ -1153,27 +1181,62 @@ qla2x00_config_rings(struct scsi_qla_host *ha)
1153} 1181}
1154 1182
1155void 1183void
1156qla24xx_config_rings(struct scsi_qla_host *ha) 1184qla24xx_config_rings(struct scsi_qla_host *vha)
1157{ 1185{
1158 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1186 struct qla_hw_data *ha = vha->hw;
1187 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
1188 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
1189 struct qla_msix_entry *msix;
1159 struct init_cb_24xx *icb; 1190 struct init_cb_24xx *icb;
1191 uint16_t rid = 0;
1192 struct req_que *req = ha->req_q_map[0];
1193 struct rsp_que *rsp = ha->rsp_q_map[0];
1160 1194
1161 /* Setup ring parameters in initialization control block. */ 1195/* Setup ring parameters in initialization control block. */
1162 icb = (struct init_cb_24xx *)ha->init_cb; 1196 icb = (struct init_cb_24xx *)ha->init_cb;
1163 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1197 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1164 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1198 icb->response_q_inpointer = __constant_cpu_to_le16(0);
1165 icb->request_q_length = cpu_to_le16(ha->request_q_length); 1199 icb->request_q_length = cpu_to_le16(req->length);
1166 icb->response_q_length = cpu_to_le16(ha->response_q_length); 1200 icb->response_q_length = cpu_to_le16(rsp->length);
1167 icb->request_q_address[0] = cpu_to_le32(LSD(ha->request_dma)); 1201 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
1168 icb->request_q_address[1] = cpu_to_le32(MSD(ha->request_dma)); 1202 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
1169 icb->response_q_address[0] = cpu_to_le32(LSD(ha->response_dma)); 1203 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1170 icb->response_q_address[1] = cpu_to_le32(MSD(ha->response_dma)); 1204 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1171 1205
1172 WRT_REG_DWORD(&reg->req_q_in, 0); 1206 if (ha->mqenable) {
1173 WRT_REG_DWORD(&reg->req_q_out, 0); 1207 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1174 WRT_REG_DWORD(&reg->rsp_q_in, 0); 1208 icb->rid = __constant_cpu_to_le16(rid);
1175 WRT_REG_DWORD(&reg->rsp_q_out, 0); 1209 if (ha->flags.msix_enabled) {
1176 RD_REG_DWORD(&reg->rsp_q_out); 1210 msix = &ha->msix_entries[1];
1211 DEBUG2_17(printk(KERN_INFO
1212 "Reistering vector 0x%x for base que\n", msix->entry));
1213 icb->msix = cpu_to_le16(msix->entry);
1214 }
1215 /* Use alternate PCI bus number */
1216 if (MSB(rid))
1217 icb->firmware_options_2 |=
1218 __constant_cpu_to_le32(BIT_19);
1219 /* Use alternate PCI devfn */
1220 if (LSB(rid))
1221 icb->firmware_options_2 |=
1222 __constant_cpu_to_le32(BIT_18);
1223
1224 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
1225 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
1226 ha->rsp_q_map[0]->options = icb->firmware_options_2;
1227
1228 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
1229 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
1230 WRT_REG_DWORD(&reg->isp25mq.rsp_q_in, 0);
1231 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, 0);
1232 } else {
1233 WRT_REG_DWORD(&reg->isp24.req_q_in, 0);
1234 WRT_REG_DWORD(&reg->isp24.req_q_out, 0);
1235 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1236 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1237 }
1238 /* PCI posting */
1239 RD_REG_DWORD(&ioreg->hccr);
1177} 1240}
1178 1241
1179/** 1242/**
@@ -1186,11 +1249,14 @@ qla24xx_config_rings(struct scsi_qla_host *ha)
1186 * Returns 0 on success. 1249 * Returns 0 on success.
1187 */ 1250 */
1188static int 1251static int
1189qla2x00_init_rings(scsi_qla_host_t *ha) 1252qla2x00_init_rings(scsi_qla_host_t *vha)
1190{ 1253{
1191 int rval; 1254 int rval;
1192 unsigned long flags = 0; 1255 unsigned long flags = 0;
1193 int cnt; 1256 int cnt;
1257 struct qla_hw_data *ha = vha->hw;
1258 struct req_que *req = ha->req_q_map[0];
1259 struct rsp_que *rsp = ha->rsp_q_map[0];
1194 struct mid_init_cb_24xx *mid_init_cb = 1260 struct mid_init_cb_24xx *mid_init_cb =
1195 (struct mid_init_cb_24xx *) ha->init_cb; 1261 (struct mid_init_cb_24xx *) ha->init_cb;
1196 1262
@@ -1198,45 +1264,45 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1198 1264
1199 /* Clear outstanding commands array. */ 1265 /* Clear outstanding commands array. */
1200 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1266 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1201 ha->outstanding_cmds[cnt] = NULL; 1267 req->outstanding_cmds[cnt] = NULL;
1202 1268
1203 ha->current_outstanding_cmd = 0; 1269 req->current_outstanding_cmd = 0;
1204 1270
1205 /* Clear RSCN queue. */ 1271 /* Clear RSCN queue. */
1206 ha->rscn_in_ptr = 0; 1272 vha->rscn_in_ptr = 0;
1207 ha->rscn_out_ptr = 0; 1273 vha->rscn_out_ptr = 0;
1208 1274
1209 /* Initialize firmware. */ 1275 /* Initialize firmware. */
1210 ha->request_ring_ptr = ha->request_ring; 1276 req->ring_ptr = req->ring;
1211 ha->req_ring_index = 0; 1277 req->ring_index = 0;
1212 ha->req_q_cnt = ha->request_q_length; 1278 req->cnt = req->length;
1213 ha->response_ring_ptr = ha->response_ring; 1279 rsp->ring_ptr = rsp->ring;
1214 ha->rsp_ring_index = 0; 1280 rsp->ring_index = 0;
1215 1281
1216 /* Initialize response queue entries */ 1282 /* Initialize response queue entries */
1217 qla2x00_init_response_q_entries(ha); 1283 qla2x00_init_response_q_entries(rsp);
1218 1284
1219 ha->isp_ops->config_rings(ha); 1285 ha->isp_ops->config_rings(vha);
1220 1286
1221 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1222 1288
1223 /* Update any ISP specific firmware options before initialization. */ 1289 /* Update any ISP specific firmware options before initialization. */
1224 ha->isp_ops->update_fw_options(ha); 1290 ha->isp_ops->update_fw_options(vha);
1225 1291
1226 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1292 DEBUG(printk("scsi(%ld): Issue init firmware.\n", vha->host_no));
1227 1293
1228 if (ha->flags.npiv_supported) 1294 if (ha->flags.npiv_supported)
1229 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports); 1295 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
1230 1296
1231 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 1297 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
1232 1298
1233 rval = qla2x00_init_firmware(ha, ha->init_cb_size); 1299 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
1234 if (rval) { 1300 if (rval) {
1235 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1301 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
1236 ha->host_no)); 1302 vha->host_no));
1237 } else { 1303 } else {
1238 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n", 1304 DEBUG3(printk("scsi(%ld): Init firmware -- success.\n",
1239 ha->host_no)); 1305 vha->host_no));
1240 } 1306 }
1241 1307
1242 return (rval); 1308 return (rval);
@@ -1249,13 +1315,14 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1249 * Returns 0 on success. 1315 * Returns 0 on success.
1250 */ 1316 */
1251static int 1317static int
1252qla2x00_fw_ready(scsi_qla_host_t *ha) 1318qla2x00_fw_ready(scsi_qla_host_t *vha)
1253{ 1319{
1254 int rval; 1320 int rval;
1255 unsigned long wtime, mtime, cs84xx_time; 1321 unsigned long wtime, mtime, cs84xx_time;
1256 uint16_t min_wait; /* Minimum wait time if loop is down */ 1322 uint16_t min_wait; /* Minimum wait time if loop is down */
1257 uint16_t wait_time; /* Wait time if loop is coming ready */ 1323 uint16_t wait_time; /* Wait time if loop is coming ready */
1258 uint16_t state[3]; 1324 uint16_t state[3];
1325 struct qla_hw_data *ha = vha->hw;
1259 1326
1260 rval = QLA_SUCCESS; 1327 rval = QLA_SUCCESS;
1261 1328
@@ -1277,29 +1344,29 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1277 wtime = jiffies + (wait_time * HZ); 1344 wtime = jiffies + (wait_time * HZ);
1278 1345
1279 /* Wait for ISP to finish LIP */ 1346 /* Wait for ISP to finish LIP */
1280 if (!ha->flags.init_done) 1347 if (!vha->flags.init_done)
1281 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n"); 1348 qla_printk(KERN_INFO, ha, "Waiting for LIP to complete...\n");
1282 1349
1283 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n", 1350 DEBUG3(printk("scsi(%ld): Waiting for LIP to complete...\n",
1284 ha->host_no)); 1351 vha->host_no));
1285 1352
1286 do { 1353 do {
1287 rval = qla2x00_get_firmware_state(ha, state); 1354 rval = qla2x00_get_firmware_state(vha, state);
1288 if (rval == QLA_SUCCESS) { 1355 if (rval == QLA_SUCCESS) {
1289 if (state[0] < FSTATE_LOSS_OF_SYNC) { 1356 if (state[0] < FSTATE_LOSS_OF_SYNC) {
1290 ha->device_flags &= ~DFLG_NO_CABLE; 1357 vha->device_flags &= ~DFLG_NO_CABLE;
1291 } 1358 }
1292 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) { 1359 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
1293 DEBUG16(printk("scsi(%ld): fw_state=%x " 1360 DEBUG16(printk("scsi(%ld): fw_state=%x "
1294 "84xx=%x.\n", ha->host_no, state[0], 1361 "84xx=%x.\n", vha->host_no, state[0],
1295 state[2])); 1362 state[2]));
1296 if ((state[2] & FSTATE_LOGGED_IN) && 1363 if ((state[2] & FSTATE_LOGGED_IN) &&
1297 (state[2] & FSTATE_WAITING_FOR_VERIFY)) { 1364 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
1298 DEBUG16(printk("scsi(%ld): Sending " 1365 DEBUG16(printk("scsi(%ld): Sending "
1299 "verify iocb.\n", ha->host_no)); 1366 "verify iocb.\n", vha->host_no));
1300 1367
1301 cs84xx_time = jiffies; 1368 cs84xx_time = jiffies;
1302 rval = qla84xx_init_chip(ha); 1369 rval = qla84xx_init_chip(vha);
1303 if (rval != QLA_SUCCESS) 1370 if (rval != QLA_SUCCESS)
1304 break; 1371 break;
1305 1372
@@ -1309,13 +1376,13 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1309 mtime += cs84xx_time; 1376 mtime += cs84xx_time;
1310 DEBUG16(printk("scsi(%ld): Increasing " 1377 DEBUG16(printk("scsi(%ld): Increasing "
1311 "wait time by %ld. New time %ld\n", 1378 "wait time by %ld. New time %ld\n",
1312 ha->host_no, cs84xx_time, wtime)); 1379 vha->host_no, cs84xx_time, wtime));
1313 } 1380 }
1314 } else if (state[0] == FSTATE_READY) { 1381 } else if (state[0] == FSTATE_READY) {
1315 DEBUG(printk("scsi(%ld): F/W Ready - OK \n", 1382 DEBUG(printk("scsi(%ld): F/W Ready - OK \n",
1316 ha->host_no)); 1383 vha->host_no));
1317 1384
1318 qla2x00_get_retry_cnt(ha, &ha->retry_count, 1385 qla2x00_get_retry_cnt(vha, &ha->retry_count,
1319 &ha->login_timeout, &ha->r_a_tov); 1386 &ha->login_timeout, &ha->r_a_tov);
1320 1387
1321 rval = QLA_SUCCESS; 1388 rval = QLA_SUCCESS;
@@ -1324,7 +1391,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1324 1391
1325 rval = QLA_FUNCTION_FAILED; 1392 rval = QLA_FUNCTION_FAILED;
1326 1393
1327 if (atomic_read(&ha->loop_down_timer) && 1394 if (atomic_read(&vha->loop_down_timer) &&
1328 state[0] != FSTATE_READY) { 1395 state[0] != FSTATE_READY) {
1329 /* Loop down. Timeout on min_wait for states 1396 /* Loop down. Timeout on min_wait for states
1330 * other than Wait for Login. 1397 * other than Wait for Login.
@@ -1333,7 +1400,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1333 qla_printk(KERN_INFO, ha, 1400 qla_printk(KERN_INFO, ha,
1334 "Cable is unplugged...\n"); 1401 "Cable is unplugged...\n");
1335 1402
1336 ha->device_flags |= DFLG_NO_CABLE; 1403 vha->device_flags |= DFLG_NO_CABLE;
1337 break; 1404 break;
1338 } 1405 }
1339 } 1406 }
@@ -1350,15 +1417,15 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1350 msleep(500); 1417 msleep(500);
1351 1418
1352 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1419 DEBUG3(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1353 ha->host_no, state[0], jiffies)); 1420 vha->host_no, state[0], jiffies));
1354 } while (1); 1421 } while (1);
1355 1422
1356 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", 1423 DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
1357 ha->host_no, state[0], jiffies)); 1424 vha->host_no, state[0], jiffies));
1358 1425
1359 if (rval) { 1426 if (rval) {
1360 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", 1427 DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
1361 ha->host_no)); 1428 vha->host_no));
1362 } 1429 }
1363 1430
1364 return (rval); 1431 return (rval);
@@ -1378,7 +1445,7 @@ qla2x00_fw_ready(scsi_qla_host_t *ha)
1378* Kernel context. 1445* Kernel context.
1379*/ 1446*/
1380static int 1447static int
1381qla2x00_configure_hba(scsi_qla_host_t *ha) 1448qla2x00_configure_hba(scsi_qla_host_t *vha)
1382{ 1449{
1383 int rval; 1450 int rval;
1384 uint16_t loop_id; 1451 uint16_t loop_id;
@@ -1388,19 +1455,20 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1388 uint8_t area; 1455 uint8_t area;
1389 uint8_t domain; 1456 uint8_t domain;
1390 char connect_type[22]; 1457 char connect_type[22];
1458 struct qla_hw_data *ha = vha->hw;
1391 1459
1392 /* Get host addresses. */ 1460 /* Get host addresses. */
1393 rval = qla2x00_get_adapter_id(ha, 1461 rval = qla2x00_get_adapter_id(vha,
1394 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1462 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1395 if (rval != QLA_SUCCESS) { 1463 if (rval != QLA_SUCCESS) {
1396 if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) || 1464 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
1397 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1465 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
1398 DEBUG2(printk("%s(%ld) Loop is in a transition state\n", 1466 DEBUG2(printk("%s(%ld) Loop is in a transition state\n",
1399 __func__, ha->host_no)); 1467 __func__, vha->host_no));
1400 } else { 1468 } else {
1401 qla_printk(KERN_WARNING, ha, 1469 qla_printk(KERN_WARNING, ha,
1402 "ERROR -- Unable to get host loop ID.\n"); 1470 "ERROR -- Unable to get host loop ID.\n");
1403 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1404 } 1472 }
1405 return (rval); 1473 return (rval);
1406 } 1474 }
@@ -1411,7 +1479,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1411 return (QLA_FUNCTION_FAILED); 1479 return (QLA_FUNCTION_FAILED);
1412 } 1480 }
1413 1481
1414 ha->loop_id = loop_id; 1482 vha->loop_id = loop_id;
1415 1483
1416 /* initialize */ 1484 /* initialize */
1417 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1485 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
@@ -1421,14 +1489,14 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1421 switch (topo) { 1489 switch (topo) {
1422 case 0: 1490 case 0:
1423 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n", 1491 DEBUG3(printk("scsi(%ld): HBA in NL topology.\n",
1424 ha->host_no)); 1492 vha->host_no));
1425 ha->current_topology = ISP_CFG_NL; 1493 ha->current_topology = ISP_CFG_NL;
1426 strcpy(connect_type, "(Loop)"); 1494 strcpy(connect_type, "(Loop)");
1427 break; 1495 break;
1428 1496
1429 case 1: 1497 case 1:
1430 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1498 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1431 ha->host_no)); 1499 vha->host_no));
1432 ha->switch_cap = sw_cap; 1500 ha->switch_cap = sw_cap;
1433 ha->current_topology = ISP_CFG_FL; 1501 ha->current_topology = ISP_CFG_FL;
1434 strcpy(connect_type, "(FL_Port)"); 1502 strcpy(connect_type, "(FL_Port)");
@@ -1436,7 +1504,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1436 1504
1437 case 2: 1505 case 2:
1438 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n", 1506 DEBUG3(printk("scsi(%ld): HBA in N P2P topology.\n",
1439 ha->host_no)); 1507 vha->host_no));
1440 ha->operating_mode = P2P; 1508 ha->operating_mode = P2P;
1441 ha->current_topology = ISP_CFG_N; 1509 ha->current_topology = ISP_CFG_N;
1442 strcpy(connect_type, "(N_Port-to-N_Port)"); 1510 strcpy(connect_type, "(N_Port-to-N_Port)");
@@ -1444,7 +1512,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1444 1512
1445 case 3: 1513 case 3:
1446 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1514 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1447 ha->host_no)); 1515 vha->host_no));
1448 ha->switch_cap = sw_cap; 1516 ha->switch_cap = sw_cap;
1449 ha->operating_mode = P2P; 1517 ha->operating_mode = P2P;
1450 ha->current_topology = ISP_CFG_F; 1518 ha->current_topology = ISP_CFG_F;
@@ -1454,7 +1522,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1454 default: 1522 default:
1455 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. " 1523 DEBUG3(printk("scsi(%ld): HBA in unknown topology %x. "
1456 "Using NL.\n", 1524 "Using NL.\n",
1457 ha->host_no, topo)); 1525 vha->host_no, topo));
1458 ha->current_topology = ISP_CFG_NL; 1526 ha->current_topology = ISP_CFG_NL;
1459 strcpy(connect_type, "(Loop)"); 1527 strcpy(connect_type, "(Loop)");
1460 break; 1528 break;
@@ -1462,29 +1530,31 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1462 1530
1463 /* Save Host port and loop ID. */ 1531 /* Save Host port and loop ID. */
1464 /* byte order - Big Endian */ 1532 /* byte order - Big Endian */
1465 ha->d_id.b.domain = domain; 1533 vha->d_id.b.domain = domain;
1466 ha->d_id.b.area = area; 1534 vha->d_id.b.area = area;
1467 ha->d_id.b.al_pa = al_pa; 1535 vha->d_id.b.al_pa = al_pa;
1468 1536
1469 if (!ha->flags.init_done) 1537 if (!vha->flags.init_done)
1470 qla_printk(KERN_INFO, ha, 1538 qla_printk(KERN_INFO, ha,
1471 "Topology - %s, Host Loop address 0x%x\n", 1539 "Topology - %s, Host Loop address 0x%x\n",
1472 connect_type, ha->loop_id); 1540 connect_type, vha->loop_id);
1473 1541
1474 if (rval) { 1542 if (rval) {
1475 DEBUG2_3(printk("scsi(%ld): FAILED.\n", ha->host_no)); 1543 DEBUG2_3(printk("scsi(%ld): FAILED.\n", vha->host_no));
1476 } else { 1544 } else {
1477 DEBUG3(printk("scsi(%ld): exiting normally.\n", ha->host_no)); 1545 DEBUG3(printk("scsi(%ld): exiting normally.\n", vha->host_no));
1478 } 1546 }
1479 1547
1480 return(rval); 1548 return(rval);
1481} 1549}
1482 1550
1483static inline void 1551static inline void
1484qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *def) 1552qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
1553 char *def)
1485{ 1554{
1486 char *st, *en; 1555 char *st, *en;
1487 uint16_t index; 1556 uint16_t index;
1557 struct qla_hw_data *ha = vha->hw;
1488 1558
1489 if (memcmp(model, BINZERO, len) != 0) { 1559 if (memcmp(model, BINZERO, len) != 0) {
1490 strncpy(ha->model_number, model, len); 1560 strncpy(ha->model_number, model, len);
@@ -1516,16 +1586,17 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1516 } 1586 }
1517 } 1587 }
1518 if (IS_FWI2_CAPABLE(ha)) 1588 if (IS_FWI2_CAPABLE(ha))
1519 qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc, 1589 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
1520 sizeof(ha->model_desc)); 1590 sizeof(ha->model_desc));
1521} 1591}
1522 1592
1523/* On sparc systems, obtain port and node WWN from firmware 1593/* On sparc systems, obtain port and node WWN from firmware
1524 * properties. 1594 * properties.
1525 */ 1595 */
1526static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv) 1596static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
1527{ 1597{
1528#ifdef CONFIG_SPARC 1598#ifdef CONFIG_SPARC
1599 struct qla_hw_data *ha = vha->hw;
1529 struct pci_dev *pdev = ha->pdev; 1600 struct pci_dev *pdev = ha->pdev;
1530 struct device_node *dp = pci_device_to_OF_node(pdev); 1601 struct device_node *dp = pci_device_to_OF_node(pdev);
1531 const u8 *val; 1602 const u8 *val;
@@ -1555,12 +1626,13 @@ static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, nvram_t *nv)
1555* 0 = success. 1626* 0 = success.
1556*/ 1627*/
1557int 1628int
1558qla2x00_nvram_config(scsi_qla_host_t *ha) 1629qla2x00_nvram_config(scsi_qla_host_t *vha)
1559{ 1630{
1560 int rval; 1631 int rval;
1561 uint8_t chksum = 0; 1632 uint8_t chksum = 0;
1562 uint16_t cnt; 1633 uint16_t cnt;
1563 uint8_t *dptr1, *dptr2; 1634 uint8_t *dptr1, *dptr2;
1635 struct qla_hw_data *ha = vha->hw;
1564 init_cb_t *icb = ha->init_cb; 1636 init_cb_t *icb = ha->init_cb;
1565 nvram_t *nv = ha->nvram; 1637 nvram_t *nv = ha->nvram;
1566 uint8_t *ptr = ha->nvram; 1638 uint8_t *ptr = ha->nvram;
@@ -1576,11 +1648,11 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1576 ha->nvram_base = 0x80; 1648 ha->nvram_base = 0x80;
1577 1649
1578 /* Get NVRAM data and calculate checksum. */ 1650 /* Get NVRAM data and calculate checksum. */
1579 ha->isp_ops->read_nvram(ha, ptr, ha->nvram_base, ha->nvram_size); 1651 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
1580 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++) 1652 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
1581 chksum += *ptr++; 1653 chksum += *ptr++;
1582 1654
1583 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); 1655 DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
1584 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); 1656 DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
1585 1657
1586 /* Bad NVRAM data, set defaults parameters. */ 1658 /* Bad NVRAM data, set defaults parameters. */
@@ -1594,7 +1666,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1594 "invalid -- WWPN) defaults.\n"); 1666 "invalid -- WWPN) defaults.\n");
1595 1667
1596 if (chksum) 1668 if (chksum)
1597 qla2xxx_hw_event_log(ha, HW_EVENT_NVRAM_CHKSUM_ERR, 0, 1669 qla2xxx_hw_event_log(vha, HW_EVENT_NVRAM_CHKSUM_ERR, 0,
1598 MSW(chksum), LSW(chksum)); 1670 MSW(chksum), LSW(chksum));
1599 1671
1600 /* 1672 /*
@@ -1631,7 +1703,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1631 nv->port_name[3] = 224; 1703 nv->port_name[3] = 224;
1632 nv->port_name[4] = 139; 1704 nv->port_name[4] = 139;
1633 1705
1634 qla2xxx_nvram_wwn_from_ofw(ha, nv); 1706 qla2xxx_nvram_wwn_from_ofw(vha, nv);
1635 1707
1636 nv->login_timeout = 4; 1708 nv->login_timeout = 4;
1637 1709
@@ -1684,7 +1756,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1684 strcpy(ha->model_number, "QLA2300"); 1756 strcpy(ha->model_number, "QLA2300");
1685 } 1757 }
1686 } else { 1758 } else {
1687 qla2x00_set_model_info(ha, nv->model_number, 1759 qla2x00_set_model_info(vha, nv->model_number,
1688 sizeof(nv->model_number), "QLA23xx"); 1760 sizeof(nv->model_number), "QLA23xx");
1689 } 1761 }
1690 } else if (IS_QLA2200(ha)) { 1762 } else if (IS_QLA2200(ha)) {
@@ -1760,8 +1832,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1760 ha->serial0 = icb->port_name[5]; 1832 ha->serial0 = icb->port_name[5];
1761 ha->serial1 = icb->port_name[6]; 1833 ha->serial1 = icb->port_name[6];
1762 ha->serial2 = icb->port_name[7]; 1834 ha->serial2 = icb->port_name[7];
1763 ha->node_name = icb->node_name; 1835 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
1764 ha->port_name = icb->port_name; 1836 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
1765 1837
1766 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 1838 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
1767 1839
@@ -1829,10 +1901,10 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1829 icb->response_accumulation_timer = 3; 1901 icb->response_accumulation_timer = 3;
1830 icb->interrupt_delay_timer = 5; 1902 icb->interrupt_delay_timer = 5;
1831 1903
1832 ha->flags.process_response_queue = 1; 1904 vha->flags.process_response_queue = 1;
1833 } else { 1905 } else {
1834 /* Enable ZIO. */ 1906 /* Enable ZIO. */
1835 if (!ha->flags.init_done) { 1907 if (!vha->flags.init_done) {
1836 ha->zio_mode = icb->add_firmware_options[0] & 1908 ha->zio_mode = icb->add_firmware_options[0] &
1837 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 1909 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
1838 ha->zio_timer = icb->interrupt_delay_timer ? 1910 ha->zio_timer = icb->interrupt_delay_timer ?
@@ -1840,12 +1912,12 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1840 } 1912 }
1841 icb->add_firmware_options[0] &= 1913 icb->add_firmware_options[0] &=
1842 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 1914 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
1843 ha->flags.process_response_queue = 0; 1915 vha->flags.process_response_queue = 0;
1844 if (ha->zio_mode != QLA_ZIO_DISABLED) { 1916 if (ha->zio_mode != QLA_ZIO_DISABLED) {
1845 ha->zio_mode = QLA_ZIO_MODE_6; 1917 ha->zio_mode = QLA_ZIO_MODE_6;
1846 1918
1847 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer " 1919 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer "
1848 "delay (%d us).\n", ha->host_no, ha->zio_mode, 1920 "delay (%d us).\n", vha->host_no, ha->zio_mode,
1849 ha->zio_timer * 100)); 1921 ha->zio_timer * 100));
1850 qla_printk(KERN_INFO, ha, 1922 qla_printk(KERN_INFO, ha,
1851 "ZIO mode %d enabled; timer delay (%d us).\n", 1923 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -1853,13 +1925,13 @@ qla2x00_nvram_config(scsi_qla_host_t *ha)
1853 1925
1854 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode; 1926 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
1855 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer; 1927 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
1856 ha->flags.process_response_queue = 1; 1928 vha->flags.process_response_queue = 1;
1857 } 1929 }
1858 } 1930 }
1859 1931
1860 if (rval) { 1932 if (rval) {
1861 DEBUG2_3(printk(KERN_WARNING 1933 DEBUG2_3(printk(KERN_WARNING
1862 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 1934 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
1863 } 1935 }
1864 return (rval); 1936 return (rval);
1865} 1937}
@@ -1870,10 +1942,10 @@ qla2x00_rport_del(void *data)
1870 fc_port_t *fcport = data; 1942 fc_port_t *fcport = data;
1871 struct fc_rport *rport; 1943 struct fc_rport *rport;
1872 1944
1873 spin_lock_irq(fcport->ha->host->host_lock); 1945 spin_lock_irq(fcport->vha->host->host_lock);
1874 rport = fcport->drport; 1946 rport = fcport->drport;
1875 fcport->drport = NULL; 1947 fcport->drport = NULL;
1876 spin_unlock_irq(fcport->ha->host->host_lock); 1948 spin_unlock_irq(fcport->vha->host->host_lock);
1877 if (rport) 1949 if (rport)
1878 fc_remote_port_delete(rport); 1950 fc_remote_port_delete(rport);
1879} 1951}
@@ -1886,7 +1958,7 @@ qla2x00_rport_del(void *data)
1886 * Returns a pointer to the allocated fcport, or NULL, if none available. 1958 * Returns a pointer to the allocated fcport, or NULL, if none available.
1887 */ 1959 */
1888static fc_port_t * 1960static fc_port_t *
1889qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) 1961qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
1890{ 1962{
1891 fc_port_t *fcport; 1963 fc_port_t *fcport;
1892 1964
@@ -1895,8 +1967,8 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1895 return NULL; 1967 return NULL;
1896 1968
1897 /* Setup fcport template structure. */ 1969 /* Setup fcport template structure. */
1898 fcport->ha = ha; 1970 fcport->vha = vha;
1899 fcport->vp_idx = ha->vp_idx; 1971 fcport->vp_idx = vha->vp_idx;
1900 fcport->port_type = FCT_UNKNOWN; 1972 fcport->port_type = FCT_UNKNOWN;
1901 fcport->loop_id = FC_NO_LOOP_ID; 1973 fcport->loop_id = FC_NO_LOOP_ID;
1902 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1974 atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1919,101 +1991,97 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1919 * 2 = database was full and device was not configured. 1991 * 2 = database was full and device was not configured.
1920 */ 1992 */
1921static int 1993static int
1922qla2x00_configure_loop(scsi_qla_host_t *ha) 1994qla2x00_configure_loop(scsi_qla_host_t *vha)
1923{ 1995{
1924 int rval; 1996 int rval;
1925 unsigned long flags, save_flags; 1997 unsigned long flags, save_flags;
1926 1998 struct qla_hw_data *ha = vha->hw;
1927 rval = QLA_SUCCESS; 1999 rval = QLA_SUCCESS;
1928 2000
1929 /* Get Initiator ID */ 2001 /* Get Initiator ID */
1930 if (test_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags)) { 2002 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
1931 rval = qla2x00_configure_hba(ha); 2003 rval = qla2x00_configure_hba(vha);
1932 if (rval != QLA_SUCCESS) { 2004 if (rval != QLA_SUCCESS) {
1933 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n", 2005 DEBUG(printk("scsi(%ld): Unable to configure HBA.\n",
1934 ha->host_no)); 2006 vha->host_no));
1935 return (rval); 2007 return (rval);
1936 } 2008 }
1937 } 2009 }
1938 2010
1939 save_flags = flags = ha->dpc_flags; 2011 save_flags = flags = vha->dpc_flags;
1940 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n", 2012 DEBUG(printk("scsi(%ld): Configure loop -- dpc flags =0x%lx\n",
1941 ha->host_no, flags)); 2013 vha->host_no, flags));
1942 2014
1943 /* 2015 /*
1944 * If we have both an RSCN and PORT UPDATE pending then handle them 2016 * If we have both an RSCN and PORT UPDATE pending then handle them
1945 * both at the same time. 2017 * both at the same time.
1946 */ 2018 */
1947 clear_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2019 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1948 clear_bit(RSCN_UPDATE, &ha->dpc_flags); 2020 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
1949 2021
1950 /* Determine what we need to do */ 2022 /* Determine what we need to do */
1951 if (ha->current_topology == ISP_CFG_FL && 2023 if (ha->current_topology == ISP_CFG_FL &&
1952 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2024 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1953 2025
1954 ha->flags.rscn_queue_overflow = 1; 2026 vha->flags.rscn_queue_overflow = 1;
1955 set_bit(RSCN_UPDATE, &flags); 2027 set_bit(RSCN_UPDATE, &flags);
1956 2028
1957 } else if (ha->current_topology == ISP_CFG_F && 2029 } else if (ha->current_topology == ISP_CFG_F &&
1958 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2030 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
1959 2031
1960 ha->flags.rscn_queue_overflow = 1; 2032 vha->flags.rscn_queue_overflow = 1;
1961 set_bit(RSCN_UPDATE, &flags); 2033 set_bit(RSCN_UPDATE, &flags);
1962 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2034 clear_bit(LOCAL_LOOP_UPDATE, &flags);
1963 2035
1964 } else if (ha->current_topology == ISP_CFG_N) { 2036 } else if (ha->current_topology == ISP_CFG_N) {
1965 clear_bit(RSCN_UPDATE, &flags); 2037 clear_bit(RSCN_UPDATE, &flags);
1966 2038
1967 } else if (!ha->flags.online || 2039 } else if (!vha->flags.online ||
1968 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2040 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
1969 2041
1970 ha->flags.rscn_queue_overflow = 1; 2042 vha->flags.rscn_queue_overflow = 1;
1971 set_bit(RSCN_UPDATE, &flags); 2043 set_bit(RSCN_UPDATE, &flags);
1972 set_bit(LOCAL_LOOP_UPDATE, &flags); 2044 set_bit(LOCAL_LOOP_UPDATE, &flags);
1973 } 2045 }
1974 2046
1975 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) { 2047 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
1976 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2048 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1977 rval = QLA_FUNCTION_FAILED; 2049 rval = QLA_FUNCTION_FAILED;
1978 } else { 2050 else
1979 rval = qla2x00_configure_local_loop(ha); 2051 rval = qla2x00_configure_local_loop(vha);
1980 }
1981 } 2052 }
1982 2053
1983 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) { 2054 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
1984 if (LOOP_TRANSITION(ha)) { 2055 if (LOOP_TRANSITION(vha))
1985 rval = QLA_FUNCTION_FAILED; 2056 rval = QLA_FUNCTION_FAILED;
1986 } else { 2057 else
1987 rval = qla2x00_configure_fabric(ha); 2058 rval = qla2x00_configure_fabric(vha);
1988 }
1989 } 2059 }
1990 2060
1991 if (rval == QLA_SUCCESS) { 2061 if (rval == QLA_SUCCESS) {
1992 if (atomic_read(&ha->loop_down_timer) || 2062 if (atomic_read(&vha->loop_down_timer) ||
1993 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2063 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1994 rval = QLA_FUNCTION_FAILED; 2064 rval = QLA_FUNCTION_FAILED;
1995 } else { 2065 } else {
1996 atomic_set(&ha->loop_state, LOOP_READY); 2066 atomic_set(&vha->loop_state, LOOP_READY);
1997 2067
1998 DEBUG(printk("scsi(%ld): LOOP READY\n", ha->host_no)); 2068 DEBUG(printk("scsi(%ld): LOOP READY\n", vha->host_no));
1999 } 2069 }
2000 } 2070 }
2001 2071
2002 if (rval) { 2072 if (rval) {
2003 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n", 2073 DEBUG2_3(printk("%s(%ld): *** FAILED ***\n",
2004 __func__, ha->host_no)); 2074 __func__, vha->host_no));
2005 } else { 2075 } else {
2006 DEBUG3(printk("%s: exiting normally\n", __func__)); 2076 DEBUG3(printk("%s: exiting normally\n", __func__));
2007 } 2077 }
2008 2078
2009 /* Restore state if a resync event occurred during processing */ 2079 /* Restore state if a resync event occurred during processing */
2010 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2080 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
2011 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2081 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2012 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2082 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2013 if (test_bit(RSCN_UPDATE, &save_flags)) { 2083 if (test_bit(RSCN_UPDATE, &save_flags))
2014 ha->flags.rscn_queue_overflow = 1; 2084 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2015 set_bit(RSCN_UPDATE, &ha->dpc_flags);
2016 }
2017 } 2085 }
2018 2086
2019 return (rval); 2087 return (rval);
@@ -2032,7 +2100,7 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2032 * 0 = success. 2100 * 0 = success.
2033 */ 2101 */
2034static int 2102static int
2035qla2x00_configure_local_loop(scsi_qla_host_t *ha) 2103qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2036{ 2104{
2037 int rval, rval2; 2105 int rval, rval2;
2038 int found_devs; 2106 int found_devs;
@@ -2044,18 +2112,18 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2044 char *id_iter; 2112 char *id_iter;
2045 uint16_t loop_id; 2113 uint16_t loop_id;
2046 uint8_t domain, area, al_pa; 2114 uint8_t domain, area, al_pa;
2047 scsi_qla_host_t *pha = to_qla_parent(ha); 2115 struct qla_hw_data *ha = vha->hw;
2048 2116
2049 found_devs = 0; 2117 found_devs = 0;
2050 new_fcport = NULL; 2118 new_fcport = NULL;
2051 entries = MAX_FIBRE_DEVICES; 2119 entries = MAX_FIBRE_DEVICES;
2052 2120
2053 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", ha->host_no)); 2121 DEBUG3(printk("scsi(%ld): Getting FCAL position map\n", vha->host_no));
2054 DEBUG3(qla2x00_get_fcal_position_map(ha, NULL)); 2122 DEBUG3(qla2x00_get_fcal_position_map(vha, NULL));
2055 2123
2056 /* Get list of logged in devices. */ 2124 /* Get list of logged in devices. */
2057 memset(ha->gid_list, 0, GID_LIST_SIZE); 2125 memset(ha->gid_list, 0, GID_LIST_SIZE);
2058 rval = qla2x00_get_id_list(ha, ha->gid_list, ha->gid_list_dma, 2126 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2059 &entries); 2127 &entries);
2060 if (rval != QLA_SUCCESS) 2128 if (rval != QLA_SUCCESS)
2061 goto cleanup_allocation; 2129 goto cleanup_allocation;
@@ -2066,7 +2134,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2066 entries * sizeof(struct gid_list_info))); 2134 entries * sizeof(struct gid_list_info)));
2067 2135
2068 /* Allocate temporary fcport for any new fcports discovered. */ 2136 /* Allocate temporary fcport for any new fcports discovered. */
2069 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2137 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2070 if (new_fcport == NULL) { 2138 if (new_fcport == NULL) {
2071 rval = QLA_MEMORY_ALLOC_FAILED; 2139 rval = QLA_MEMORY_ALLOC_FAILED;
2072 goto cleanup_allocation; 2140 goto cleanup_allocation;
@@ -2076,17 +2144,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2076 /* 2144 /*
2077 * Mark local devices that were present with FCF_DEVICE_LOST for now. 2145 * Mark local devices that were present with FCF_DEVICE_LOST for now.
2078 */ 2146 */
2079 list_for_each_entry(fcport, &pha->fcports, list) { 2147 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2080 if (fcport->vp_idx != ha->vp_idx)
2081 continue;
2082
2083 if (atomic_read(&fcport->state) == FCS_ONLINE && 2148 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2084 fcport->port_type != FCT_BROADCAST && 2149 fcport->port_type != FCT_BROADCAST &&
2085 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 2150 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
2086 2151
2087 DEBUG(printk("scsi(%ld): Marking port lost, " 2152 DEBUG(printk("scsi(%ld): Marking port lost, "
2088 "loop_id=0x%04x\n", 2153 "loop_id=0x%04x\n",
2089 ha->host_no, fcport->loop_id)); 2154 vha->host_no, fcport->loop_id));
2090 2155
2091 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2156 atomic_set(&fcport->state, FCS_DEVICE_LOST);
2092 fcport->flags &= ~FCF_FARP_DONE; 2157 fcport->flags &= ~FCF_FARP_DONE;
@@ -2113,7 +2178,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2113 2178
2114 /* Bypass if not same domain and area of adapter. */ 2179 /* Bypass if not same domain and area of adapter. */
2115 if (area && domain && 2180 if (area && domain &&
2116 (area != ha->d_id.b.area || domain != ha->d_id.b.domain)) 2181 (area != vha->d_id.b.area || domain != vha->d_id.b.domain))
2117 continue; 2182 continue;
2118 2183
2119 /* Bypass invalid local loop ID. */ 2184 /* Bypass invalid local loop ID. */
@@ -2125,26 +2190,23 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2125 new_fcport->d_id.b.area = area; 2190 new_fcport->d_id.b.area = area;
2126 new_fcport->d_id.b.al_pa = al_pa; 2191 new_fcport->d_id.b.al_pa = al_pa;
2127 new_fcport->loop_id = loop_id; 2192 new_fcport->loop_id = loop_id;
2128 new_fcport->vp_idx = ha->vp_idx; 2193 new_fcport->vp_idx = vha->vp_idx;
2129 rval2 = qla2x00_get_port_database(ha, new_fcport, 0); 2194 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2130 if (rval2 != QLA_SUCCESS) { 2195 if (rval2 != QLA_SUCCESS) {
2131 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2196 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
2132 "information -- get_port_database=%x, " 2197 "information -- get_port_database=%x, "
2133 "loop_id=0x%04x\n", 2198 "loop_id=0x%04x\n",
2134 ha->host_no, rval2, new_fcport->loop_id)); 2199 vha->host_no, rval2, new_fcport->loop_id));
2135 DEBUG2(printk("scsi(%ld): Scheduling resync...\n", 2200 DEBUG2(printk("scsi(%ld): Scheduling resync...\n",
2136 ha->host_no)); 2201 vha->host_no));
2137 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 2202 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2138 continue; 2203 continue;
2139 } 2204 }
2140 2205
2141 /* Check for matching device in port list. */ 2206 /* Check for matching device in port list. */
2142 found = 0; 2207 found = 0;
2143 fcport = NULL; 2208 fcport = NULL;
2144 list_for_each_entry(fcport, &pha->fcports, list) { 2209 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2145 if (fcport->vp_idx != ha->vp_idx)
2146 continue;
2147
2148 if (memcmp(new_fcport->port_name, fcport->port_name, 2210 if (memcmp(new_fcport->port_name, fcport->port_name,
2149 WWN_SIZE)) 2211 WWN_SIZE))
2150 continue; 2212 continue;
@@ -2164,17 +2226,15 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2164 if (!found) { 2226 if (!found) {
2165 /* New device, add to fcports list. */ 2227 /* New device, add to fcports list. */
2166 new_fcport->flags &= ~FCF_PERSISTENT_BOUND; 2228 new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
2167 if (ha->parent) { 2229 if (vha->vp_idx) {
2168 new_fcport->ha = ha; 2230 new_fcport->vha = vha;
2169 new_fcport->vp_idx = ha->vp_idx; 2231 new_fcport->vp_idx = vha->vp_idx;
2170 list_add_tail(&new_fcport->vp_fcport,
2171 &ha->vp_fcports);
2172 } 2232 }
2173 list_add_tail(&new_fcport->list, &pha->fcports); 2233 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2174 2234
2175 /* Allocate a new replacement fcport. */ 2235 /* Allocate a new replacement fcport. */
2176 fcport = new_fcport; 2236 fcport = new_fcport;
2177 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2237 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2178 if (new_fcport == NULL) { 2238 if (new_fcport == NULL) {
2179 rval = QLA_MEMORY_ALLOC_FAILED; 2239 rval = QLA_MEMORY_ALLOC_FAILED;
2180 goto cleanup_allocation; 2240 goto cleanup_allocation;
@@ -2185,7 +2245,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2185 /* Base iIDMA settings on HBA port speed. */ 2245 /* Base iIDMA settings on HBA port speed. */
2186 fcport->fp_speed = ha->link_data_rate; 2246 fcport->fp_speed = ha->link_data_rate;
2187 2247
2188 qla2x00_update_fcport(ha, fcport); 2248 qla2x00_update_fcport(vha, fcport);
2189 2249
2190 found_devs++; 2250 found_devs++;
2191 } 2251 }
@@ -2195,24 +2255,25 @@ cleanup_allocation:
2195 2255
2196 if (rval != QLA_SUCCESS) { 2256 if (rval != QLA_SUCCESS) {
2197 DEBUG2(printk("scsi(%ld): Configure local loop error exit: " 2257 DEBUG2(printk("scsi(%ld): Configure local loop error exit: "
2198 "rval=%x\n", ha->host_no, rval)); 2258 "rval=%x\n", vha->host_no, rval));
2199 } 2259 }
2200 2260
2201 if (found_devs) { 2261 if (found_devs) {
2202 ha->device_flags |= DFLG_LOCAL_DEVICES; 2262 vha->device_flags |= DFLG_LOCAL_DEVICES;
2203 ha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES; 2263 vha->device_flags &= ~DFLG_RETRY_LOCAL_DEVICES;
2204 } 2264 }
2205 2265
2206 return (rval); 2266 return (rval);
2207} 2267}
2208 2268
2209static void 2269static void
2210qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2270qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2211{ 2271{
2212#define LS_UNKNOWN 2 2272#define LS_UNKNOWN 2
2213 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 2273 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
2214 int rval; 2274 int rval;
2215 uint16_t mb[6]; 2275 uint16_t mb[6];
2276 struct qla_hw_data *ha = vha->hw;
2216 2277
2217 if (!IS_IIDMA_CAPABLE(ha)) 2278 if (!IS_IIDMA_CAPABLE(ha))
2218 return; 2279 return;
@@ -2221,12 +2282,12 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2221 fcport->fp_speed > ha->link_data_rate) 2282 fcport->fp_speed > ha->link_data_rate)
2222 return; 2283 return;
2223 2284
2224 rval = qla2x00_set_idma_speed(ha, fcport->loop_id, fcport->fp_speed, 2285 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
2225 mb); 2286 mb);
2226 if (rval != QLA_SUCCESS) { 2287 if (rval != QLA_SUCCESS) {
2227 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA " 2288 DEBUG2(printk("scsi(%ld): Unable to adjust iIDMA "
2228 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n", 2289 "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x %04x.\n",
2229 ha->host_no, fcport->port_name[0], fcport->port_name[1], 2290 vha->host_no, fcport->port_name[0], fcport->port_name[1],
2230 fcport->port_name[2], fcport->port_name[3], 2291 fcport->port_name[2], fcport->port_name[3],
2231 fcport->port_name[4], fcport->port_name[5], 2292 fcport->port_name[4], fcport->port_name[5],
2232 fcport->port_name[6], fcport->port_name[7], rval, 2293 fcport->port_name[6], fcport->port_name[7], rval,
@@ -2244,10 +2305,11 @@ qla2x00_iidma_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2244} 2305}
2245 2306
2246static void 2307static void
2247qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport) 2308qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2248{ 2309{
2249 struct fc_rport_identifiers rport_ids; 2310 struct fc_rport_identifiers rport_ids;
2250 struct fc_rport *rport; 2311 struct fc_rport *rport;
2312 struct qla_hw_data *ha = vha->hw;
2251 2313
2252 if (fcport->drport) 2314 if (fcport->drport)
2253 qla2x00_rport_del(fcport); 2315 qla2x00_rport_del(fcport);
@@ -2257,15 +2319,15 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2257 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2319 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2258 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2320 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2259 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2321 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2260 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2322 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
2261 if (!rport) { 2323 if (!rport) {
2262 qla_printk(KERN_WARNING, ha, 2324 qla_printk(KERN_WARNING, ha,
2263 "Unable to allocate fc remote port!\n"); 2325 "Unable to allocate fc remote port!\n");
2264 return; 2326 return;
2265 } 2327 }
2266 spin_lock_irq(fcport->ha->host->host_lock); 2328 spin_lock_irq(fcport->vha->host->host_lock);
2267 *((fc_port_t **)rport->dd_data) = fcport; 2329 *((fc_port_t **)rport->dd_data) = fcport;
2268 spin_unlock_irq(fcport->ha->host->host_lock); 2330 spin_unlock_irq(fcport->vha->host->host_lock);
2269 2331
2270 rport->supported_classes = fcport->supported_classes; 2332 rport->supported_classes = fcport->supported_classes;
2271 2333
@@ -2293,23 +2355,23 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2293 * Kernel context. 2355 * Kernel context.
2294 */ 2356 */
2295void 2357void
2296qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2358qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2297{ 2359{
2298 scsi_qla_host_t *pha = to_qla_parent(ha); 2360 struct qla_hw_data *ha = vha->hw;
2299 2361
2300 fcport->ha = ha; 2362 fcport->vha = vha;
2301 fcport->login_retry = 0; 2363 fcport->login_retry = 0;
2302 fcport->port_login_retry_count = pha->port_down_retry_count * 2364 fcport->port_login_retry_count = ha->port_down_retry_count *
2303 PORT_RETRY_TIME; 2365 PORT_RETRY_TIME;
2304 atomic_set(&fcport->port_down_timer, pha->port_down_retry_count * 2366 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
2305 PORT_RETRY_TIME); 2367 PORT_RETRY_TIME);
2306 fcport->flags &= ~FCF_LOGIN_NEEDED; 2368 fcport->flags &= ~FCF_LOGIN_NEEDED;
2307 2369
2308 qla2x00_iidma_fcport(ha, fcport); 2370 qla2x00_iidma_fcport(vha, fcport);
2309 2371
2310 atomic_set(&fcport->state, FCS_ONLINE); 2372 atomic_set(&fcport->state, FCS_ONLINE);
2311 2373
2312 qla2x00_reg_remote_port(ha, fcport); 2374 qla2x00_reg_remote_port(vha, fcport);
2313} 2375}
2314 2376
2315/* 2377/*
@@ -2324,7 +2386,7 @@ qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2324 * BIT_0 = error 2386 * BIT_0 = error
2325 */ 2387 */
2326static int 2388static int
2327qla2x00_configure_fabric(scsi_qla_host_t *ha) 2389qla2x00_configure_fabric(scsi_qla_host_t *vha)
2328{ 2390{
2329 int rval, rval2; 2391 int rval, rval2;
2330 fc_port_t *fcport, *fcptemp; 2392 fc_port_t *fcport, *fcptemp;
@@ -2332,25 +2394,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2332 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2394 uint16_t mb[MAILBOX_REGISTER_COUNT];
2333 uint16_t loop_id; 2395 uint16_t loop_id;
2334 LIST_HEAD(new_fcports); 2396 LIST_HEAD(new_fcports);
2335 scsi_qla_host_t *pha = to_qla_parent(ha); 2397 struct qla_hw_data *ha = vha->hw;
2398 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2336 2399
2337 /* If FL port exists, then SNS is present */ 2400 /* If FL port exists, then SNS is present */
2338 if (IS_FWI2_CAPABLE(ha)) 2401 if (IS_FWI2_CAPABLE(ha))
2339 loop_id = NPH_F_PORT; 2402 loop_id = NPH_F_PORT;
2340 else 2403 else
2341 loop_id = SNS_FL_PORT; 2404 loop_id = SNS_FL_PORT;
2342 rval = qla2x00_get_port_name(ha, loop_id, ha->fabric_node_name, 1); 2405 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
2343 if (rval != QLA_SUCCESS) { 2406 if (rval != QLA_SUCCESS) {
2344 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL " 2407 DEBUG2(printk("scsi(%ld): MBC_GET_PORT_NAME Failed, No FL "
2345 "Port\n", ha->host_no)); 2408 "Port\n", vha->host_no));
2346 2409
2347 ha->device_flags &= ~SWITCH_FOUND; 2410 vha->device_flags &= ~SWITCH_FOUND;
2348 return (QLA_SUCCESS); 2411 return (QLA_SUCCESS);
2349 } 2412 }
2350 ha->device_flags |= SWITCH_FOUND; 2413 vha->device_flags |= SWITCH_FOUND;
2351 2414
2352 /* Mark devices that need re-synchronization. */ 2415 /* Mark devices that need re-synchronization. */
2353 rval2 = qla2x00_device_resync(ha); 2416 rval2 = qla2x00_device_resync(vha);
2354 if (rval2 == QLA_RSCNS_HANDLED) { 2417 if (rval2 == QLA_RSCNS_HANDLED) {
2355 /* No point doing the scan, just continue. */ 2418 /* No point doing the scan, just continue. */
2356 return (QLA_SUCCESS); 2419 return (QLA_SUCCESS);
@@ -2358,15 +2421,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2358 do { 2421 do {
2359 /* FDMI support. */ 2422 /* FDMI support. */
2360 if (ql2xfdmienable && 2423 if (ql2xfdmienable &&
2361 test_and_clear_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags)) 2424 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
2362 qla2x00_fdmi_register(ha); 2425 qla2x00_fdmi_register(vha);
2363 2426
2364 /* Ensure we are logged into the SNS. */ 2427 /* Ensure we are logged into the SNS. */
2365 if (IS_FWI2_CAPABLE(ha)) 2428 if (IS_FWI2_CAPABLE(ha))
2366 loop_id = NPH_SNS; 2429 loop_id = NPH_SNS;
2367 else 2430 else
2368 loop_id = SIMPLE_NAME_SERVER; 2431 loop_id = SIMPLE_NAME_SERVER;
2369 ha->isp_ops->fabric_login(ha, loop_id, 0xff, 0xff, 2432 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
2370 0xfc, mb, BIT_1 | BIT_0); 2433 0xfc, mb, BIT_1 | BIT_0);
2371 if (mb[0] != MBS_COMMAND_COMPLETE) { 2434 if (mb[0] != MBS_COMMAND_COMPLETE) {
2372 DEBUG2(qla_printk(KERN_INFO, ha, 2435 DEBUG2(qla_printk(KERN_INFO, ha,
@@ -2376,29 +2439,29 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2376 return (QLA_SUCCESS); 2439 return (QLA_SUCCESS);
2377 } 2440 }
2378 2441
2379 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags)) { 2442 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
2380 if (qla2x00_rft_id(ha)) { 2443 if (qla2x00_rft_id(vha)) {
2381 /* EMPTY */ 2444 /* EMPTY */
2382 DEBUG2(printk("scsi(%ld): Register FC-4 " 2445 DEBUG2(printk("scsi(%ld): Register FC-4 "
2383 "TYPE failed.\n", ha->host_no)); 2446 "TYPE failed.\n", vha->host_no));
2384 } 2447 }
2385 if (qla2x00_rff_id(ha)) { 2448 if (qla2x00_rff_id(vha)) {
2386 /* EMPTY */ 2449 /* EMPTY */
2387 DEBUG2(printk("scsi(%ld): Register FC-4 " 2450 DEBUG2(printk("scsi(%ld): Register FC-4 "
2388 "Features failed.\n", ha->host_no)); 2451 "Features failed.\n", vha->host_no));
2389 } 2452 }
2390 if (qla2x00_rnn_id(ha)) { 2453 if (qla2x00_rnn_id(vha)) {
2391 /* EMPTY */ 2454 /* EMPTY */
2392 DEBUG2(printk("scsi(%ld): Register Node Name " 2455 DEBUG2(printk("scsi(%ld): Register Node Name "
2393 "failed.\n", ha->host_no)); 2456 "failed.\n", vha->host_no));
2394 } else if (qla2x00_rsnn_nn(ha)) { 2457 } else if (qla2x00_rsnn_nn(vha)) {
2395 /* EMPTY */ 2458 /* EMPTY */
2396 DEBUG2(printk("scsi(%ld): Register Symbolic " 2459 DEBUG2(printk("scsi(%ld): Register Symbolic "
2397 "Node Name failed.\n", ha->host_no)); 2460 "Node Name failed.\n", vha->host_no));
2398 } 2461 }
2399 } 2462 }
2400 2463
2401 rval = qla2x00_find_all_fabric_devs(ha, &new_fcports); 2464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
2402 if (rval != QLA_SUCCESS) 2465 if (rval != QLA_SUCCESS)
2403 break; 2466 break;
2404 2467
@@ -2406,24 +2469,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2406 * Logout all previous fabric devices marked lost, except 2469 * Logout all previous fabric devices marked lost, except
2407 * tape devices. 2470 * tape devices.
2408 */ 2471 */
2409 list_for_each_entry(fcport, &pha->fcports, list) { 2472 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2410 if (fcport->vp_idx !=ha->vp_idx) 2473 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2411 continue;
2412
2413 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2414 break; 2474 break;
2415 2475
2416 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 2476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
2417 continue; 2477 continue;
2418 2478
2419 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 2479 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
2420 qla2x00_mark_device_lost(ha, fcport, 2480 qla2x00_mark_device_lost(vha, fcport,
2421 ql2xplogiabsentdevice, 0); 2481 ql2xplogiabsentdevice, 0);
2422 if (fcport->loop_id != FC_NO_LOOP_ID && 2482 if (fcport->loop_id != FC_NO_LOOP_ID &&
2423 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2483 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2424 fcport->port_type != FCT_INITIATOR && 2484 fcport->port_type != FCT_INITIATOR &&
2425 fcport->port_type != FCT_BROADCAST) { 2485 fcport->port_type != FCT_BROADCAST) {
2426 ha->isp_ops->fabric_logout(ha, 2486 ha->isp_ops->fabric_logout(vha,
2427 fcport->loop_id, 2487 fcport->loop_id,
2428 fcport->d_id.b.domain, 2488 fcport->d_id.b.domain,
2429 fcport->d_id.b.area, 2489 fcport->d_id.b.area,
@@ -2434,18 +2494,15 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2434 } 2494 }
2435 2495
2436 /* Starting free loop ID. */ 2496 /* Starting free loop ID. */
2437 next_loopid = pha->min_external_loopid; 2497 next_loopid = ha->min_external_loopid;
2438 2498
2439 /* 2499 /*
2440 * Scan through our port list and login entries that need to be 2500 * Scan through our port list and login entries that need to be
2441 * logged in. 2501 * logged in.
2442 */ 2502 */
2443 list_for_each_entry(fcport, &pha->fcports, list) { 2503 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2444 if (fcport->vp_idx != ha->vp_idx) 2504 if (atomic_read(&vha->loop_down_timer) ||
2445 continue; 2505 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2446
2447 if (atomic_read(&ha->loop_down_timer) ||
2448 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2449 break; 2506 break;
2450 2507
2451 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2508 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
@@ -2455,14 +2512,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2455 if (fcport->loop_id == FC_NO_LOOP_ID) { 2512 if (fcport->loop_id == FC_NO_LOOP_ID) {
2456 fcport->loop_id = next_loopid; 2513 fcport->loop_id = next_loopid;
2457 rval = qla2x00_find_new_loop_id( 2514 rval = qla2x00_find_new_loop_id(
2458 to_qla_parent(ha), fcport); 2515 base_vha, fcport);
2459 if (rval != QLA_SUCCESS) { 2516 if (rval != QLA_SUCCESS) {
2460 /* Ran out of IDs to use */ 2517 /* Ran out of IDs to use */
2461 break; 2518 break;
2462 } 2519 }
2463 } 2520 }
2464 /* Login and update database */ 2521 /* Login and update database */
2465 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2522 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2466 } 2523 }
2467 2524
2468 /* Exit if out of loop IDs. */ 2525 /* Exit if out of loop IDs. */
@@ -2474,31 +2531,26 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2474 * Login and add the new devices to our port list. 2531 * Login and add the new devices to our port list.
2475 */ 2532 */
2476 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 2533 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
2477 if (atomic_read(&ha->loop_down_timer) || 2534 if (atomic_read(&vha->loop_down_timer) ||
2478 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2535 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2479 break; 2536 break;
2480 2537
2481 /* Find a new loop ID to use. */ 2538 /* Find a new loop ID to use. */
2482 fcport->loop_id = next_loopid; 2539 fcport->loop_id = next_loopid;
2483 rval = qla2x00_find_new_loop_id(to_qla_parent(ha), 2540 rval = qla2x00_find_new_loop_id(base_vha, fcport);
2484 fcport);
2485 if (rval != QLA_SUCCESS) { 2541 if (rval != QLA_SUCCESS) {
2486 /* Ran out of IDs to use */ 2542 /* Ran out of IDs to use */
2487 break; 2543 break;
2488 } 2544 }
2489 2545
2490 /* Login and update database */ 2546 /* Login and update database */
2491 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2547 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
2492 2548
2493 if (ha->parent) { 2549 if (vha->vp_idx) {
2494 fcport->ha = ha; 2550 fcport->vha = vha;
2495 fcport->vp_idx = ha->vp_idx; 2551 fcport->vp_idx = vha->vp_idx;
2496 list_add_tail(&fcport->vp_fcport, 2552 }
2497 &ha->vp_fcports); 2553 list_move_tail(&fcport->list, &vha->vp_fcports);
2498 list_move_tail(&fcport->list,
2499 &ha->parent->fcports);
2500 } else
2501 list_move_tail(&fcport->list, &ha->fcports);
2502 } 2554 }
2503 } while (0); 2555 } while (0);
2504 2556
@@ -2510,7 +2562,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2510 2562
2511 if (rval) { 2563 if (rval) {
2512 DEBUG2(printk("scsi(%ld): Configure fabric error exit: " 2564 DEBUG2(printk("scsi(%ld): Configure fabric error exit: "
2513 "rval=%d\n", ha->host_no, rval)); 2565 "rval=%d\n", vha->host_no, rval));
2514 } 2566 }
2515 2567
2516 return (rval); 2568 return (rval);
@@ -2531,7 +2583,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2531 * Kernel context. 2583 * Kernel context.
2532 */ 2584 */
2533static int 2585static int
2534qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports) 2586qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
2587 struct list_head *new_fcports)
2535{ 2588{
2536 int rval; 2589 int rval;
2537 uint16_t loop_id; 2590 uint16_t loop_id;
@@ -2542,11 +2595,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2542 int swl_idx; 2595 int swl_idx;
2543 int first_dev, last_dev; 2596 int first_dev, last_dev;
2544 port_id_t wrap, nxt_d_id; 2597 port_id_t wrap, nxt_d_id;
2545 int vp_index; 2598 struct qla_hw_data *ha = vha->hw;
2546 int empty_vp_index; 2599 struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
2547 int found_vp;
2548 scsi_qla_host_t *vha;
2549 scsi_qla_host_t *pha = to_qla_parent(ha);
2550 2600
2551 rval = QLA_SUCCESS; 2601 rval = QLA_SUCCESS;
2552 2602
@@ -2555,43 +2605,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2555 if (!swl) { 2605 if (!swl) {
2556 /*EMPTY*/ 2606 /*EMPTY*/
2557 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback " 2607 DEBUG2(printk("scsi(%ld): GID_PT allocations failed, fallback "
2558 "on GA_NXT\n", ha->host_no)); 2608 "on GA_NXT\n", vha->host_no));
2559 } else { 2609 } else {
2560 if (qla2x00_gid_pt(ha, swl) != QLA_SUCCESS) { 2610 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
2561 kfree(swl); 2611 kfree(swl);
2562 swl = NULL; 2612 swl = NULL;
2563 } else if (qla2x00_gpn_id(ha, swl) != QLA_SUCCESS) { 2613 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
2564 kfree(swl); 2614 kfree(swl);
2565 swl = NULL; 2615 swl = NULL;
2566 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2616 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
2567 kfree(swl); 2617 kfree(swl);
2568 swl = NULL; 2618 swl = NULL;
2569 } else if (ql2xiidmaenable && 2619 } else if (ql2xiidmaenable &&
2570 qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2620 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
2571 qla2x00_gpsc(ha, swl); 2621 qla2x00_gpsc(vha, swl);
2572 } 2622 }
2573 } 2623 }
2574 swl_idx = 0; 2624 swl_idx = 0;
2575 2625
2576 /* Allocate temporary fcport for any new fcports discovered. */ 2626 /* Allocate temporary fcport for any new fcports discovered. */
2577 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2627 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2578 if (new_fcport == NULL) { 2628 if (new_fcport == NULL) {
2579 kfree(swl); 2629 kfree(swl);
2580 return (QLA_MEMORY_ALLOC_FAILED); 2630 return (QLA_MEMORY_ALLOC_FAILED);
2581 } 2631 }
2582 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2632 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2583 new_fcport->vp_idx = ha->vp_idx;
2584 /* Set start port ID scan at adapter ID. */ 2633 /* Set start port ID scan at adapter ID. */
2585 first_dev = 1; 2634 first_dev = 1;
2586 last_dev = 0; 2635 last_dev = 0;
2587 2636
2588 /* Starting free loop ID. */ 2637 /* Starting free loop ID. */
2589 loop_id = pha->min_external_loopid; 2638 loop_id = ha->min_external_loopid;
2590 for (; loop_id <= ha->last_loop_id; loop_id++) { 2639 for (; loop_id <= ha->max_loop_id; loop_id++) {
2591 if (qla2x00_is_reserved_id(ha, loop_id)) 2640 if (qla2x00_is_reserved_id(vha, loop_id))
2592 continue; 2641 continue;
2593 2642
2594 if (atomic_read(&ha->loop_down_timer) || LOOP_TRANSITION(ha)) 2643 if (atomic_read(&vha->loop_down_timer) || LOOP_TRANSITION(vha))
2595 break; 2644 break;
2596 2645
2597 if (swl != NULL) { 2646 if (swl != NULL) {
@@ -2614,7 +2663,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2614 } 2663 }
2615 } else { 2664 } else {
2616 /* Send GA_NXT to the switch */ 2665 /* Send GA_NXT to the switch */
2617 rval = qla2x00_ga_nxt(ha, new_fcport); 2666 rval = qla2x00_ga_nxt(vha, new_fcport);
2618 if (rval != QLA_SUCCESS) { 2667 if (rval != QLA_SUCCESS) {
2619 qla_printk(KERN_WARNING, ha, 2668 qla_printk(KERN_WARNING, ha,
2620 "SNS scan failed -- assuming zero-entry " 2669 "SNS scan failed -- assuming zero-entry "
@@ -2635,44 +2684,31 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2635 first_dev = 0; 2684 first_dev = 0;
2636 } else if (new_fcport->d_id.b24 == wrap.b24) { 2685 } else if (new_fcport->d_id.b24 == wrap.b24) {
2637 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n", 2686 DEBUG2(printk("scsi(%ld): device wrap (%02x%02x%02x)\n",
2638 ha->host_no, new_fcport->d_id.b.domain, 2687 vha->host_no, new_fcport->d_id.b.domain,
2639 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa)); 2688 new_fcport->d_id.b.area, new_fcport->d_id.b.al_pa));
2640 break; 2689 break;
2641 } 2690 }
2642 2691
2643 /* Bypass if same physical adapter. */ 2692 /* Bypass if same physical adapter. */
2644 if (new_fcport->d_id.b24 == pha->d_id.b24) 2693 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
2645 continue; 2694 continue;
2646 2695
2647 /* Bypass virtual ports of the same host. */ 2696 /* Bypass virtual ports of the same host. */
2648 if (pha->num_vhosts) { 2697 found = 0;
2649 for_each_mapped_vp_idx(pha, vp_index) { 2698 if (ha->num_vhosts) {
2650 empty_vp_index = 1; 2699 list_for_each_entry(vp, &ha->vp_list, list) {
2651 found_vp = 0; 2700 if (new_fcport->d_id.b24 == vp->d_id.b24) {
2652 list_for_each_entry(vha, &pha->vp_list, 2701 found = 1;
2653 vp_list) {
2654 if (vp_index == vha->vp_idx) {
2655 empty_vp_index = 0;
2656 found_vp = 1;
2657 break;
2658 }
2659 }
2660
2661 if (empty_vp_index)
2662 continue;
2663
2664 if (found_vp &&
2665 new_fcport->d_id.b24 == vha->d_id.b24)
2666 break; 2702 break;
2703 }
2667 } 2704 }
2668 2705 if (found)
2669 if (vp_index <= pha->max_npiv_vports)
2670 continue; 2706 continue;
2671 } 2707 }
2672 2708
2673 /* Bypass if same domain and area of adapter. */ 2709 /* Bypass if same domain and area of adapter. */
2674 if (((new_fcport->d_id.b24 & 0xffff00) == 2710 if (((new_fcport->d_id.b24 & 0xffff00) ==
2675 (ha->d_id.b24 & 0xffff00)) && ha->current_topology == 2711 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
2676 ISP_CFG_FL) 2712 ISP_CFG_FL)
2677 continue; 2713 continue;
2678 2714
@@ -2682,9 +2718,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2682 2718
2683 /* Locate matching device in database. */ 2719 /* Locate matching device in database. */
2684 found = 0; 2720 found = 0;
2685 list_for_each_entry(fcport, &pha->fcports, list) { 2721 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2686 if (new_fcport->vp_idx != fcport->vp_idx)
2687 continue;
2688 if (memcmp(new_fcport->port_name, fcport->port_name, 2722 if (memcmp(new_fcport->port_name, fcport->port_name,
2689 WWN_SIZE)) 2723 WWN_SIZE))
2690 continue; 2724 continue;
@@ -2728,7 +2762,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2728 (fcport->flags & FCF_TAPE_PRESENT) == 0 && 2762 (fcport->flags & FCF_TAPE_PRESENT) == 0 &&
2729 fcport->port_type != FCT_INITIATOR && 2763 fcport->port_type != FCT_INITIATOR &&
2730 fcport->port_type != FCT_BROADCAST) { 2764 fcport->port_type != FCT_BROADCAST) {
2731 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 2765 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2732 fcport->d_id.b.domain, fcport->d_id.b.area, 2766 fcport->d_id.b.domain, fcport->d_id.b.area,
2733 fcport->d_id.b.al_pa); 2767 fcport->d_id.b.al_pa);
2734 fcport->loop_id = FC_NO_LOOP_ID; 2768 fcport->loop_id = FC_NO_LOOP_ID;
@@ -2739,27 +2773,25 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2739 2773
2740 if (found) 2774 if (found)
2741 continue; 2775 continue;
2742
2743 /* If device was not in our fcports list, then add it. */ 2776 /* If device was not in our fcports list, then add it. */
2744 list_add_tail(&new_fcport->list, new_fcports); 2777 list_add_tail(&new_fcport->list, new_fcports);
2745 2778
2746 /* Allocate a new replacement fcport. */ 2779 /* Allocate a new replacement fcport. */
2747 nxt_d_id.b24 = new_fcport->d_id.b24; 2780 nxt_d_id.b24 = new_fcport->d_id.b24;
2748 new_fcport = qla2x00_alloc_fcport(ha, GFP_KERNEL); 2781 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2749 if (new_fcport == NULL) { 2782 if (new_fcport == NULL) {
2750 kfree(swl); 2783 kfree(swl);
2751 return (QLA_MEMORY_ALLOC_FAILED); 2784 return (QLA_MEMORY_ALLOC_FAILED);
2752 } 2785 }
2753 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2786 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2754 new_fcport->d_id.b24 = nxt_d_id.b24; 2787 new_fcport->d_id.b24 = nxt_d_id.b24;
2755 new_fcport->vp_idx = ha->vp_idx;
2756 } 2788 }
2757 2789
2758 kfree(swl); 2790 kfree(swl);
2759 kfree(new_fcport); 2791 kfree(new_fcport);
2760 2792
2761 if (!list_empty(new_fcports)) 2793 if (!list_empty(new_fcports))
2762 ha->device_flags |= DFLG_FABRIC_DEVICES; 2794 vha->device_flags |= DFLG_FABRIC_DEVICES;
2763 2795
2764 return (rval); 2796 return (rval);
2765} 2797}
@@ -2779,13 +2811,14 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2779 * Kernel context. 2811 * Kernel context.
2780 */ 2812 */
2781static int 2813static int
2782qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev) 2814qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
2783{ 2815{
2784 int rval; 2816 int rval;
2785 int found; 2817 int found;
2786 fc_port_t *fcport; 2818 fc_port_t *fcport;
2787 uint16_t first_loop_id; 2819 uint16_t first_loop_id;
2788 scsi_qla_host_t *pha = to_qla_parent(ha); 2820 struct qla_hw_data *ha = vha->hw;
2821 struct scsi_qla_host *vp;
2789 2822
2790 rval = QLA_SUCCESS; 2823 rval = QLA_SUCCESS;
2791 2824
@@ -2794,17 +2827,15 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2794 2827
2795 for (;;) { 2828 for (;;) {
2796 /* Skip loop ID if already used by adapter. */ 2829 /* Skip loop ID if already used by adapter. */
2797 if (dev->loop_id == ha->loop_id) { 2830 if (dev->loop_id == vha->loop_id)
2798 dev->loop_id++; 2831 dev->loop_id++;
2799 }
2800 2832
2801 /* Skip reserved loop IDs. */ 2833 /* Skip reserved loop IDs. */
2802 while (qla2x00_is_reserved_id(ha, dev->loop_id)) { 2834 while (qla2x00_is_reserved_id(vha, dev->loop_id))
2803 dev->loop_id++; 2835 dev->loop_id++;
2804 }
2805 2836
2806 /* Reset loop ID if passed the end. */ 2837 /* Reset loop ID if passed the end. */
2807 if (dev->loop_id > ha->last_loop_id) { 2838 if (dev->loop_id > ha->max_loop_id) {
2808 /* first loop ID. */ 2839 /* first loop ID. */
2809 dev->loop_id = ha->min_external_loopid; 2840 dev->loop_id = ha->min_external_loopid;
2810 } 2841 }
@@ -2812,12 +2843,17 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2812 /* Check for loop ID being already in use. */ 2843 /* Check for loop ID being already in use. */
2813 found = 0; 2844 found = 0;
2814 fcport = NULL; 2845 fcport = NULL;
2815 list_for_each_entry(fcport, &pha->fcports, list) { 2846 list_for_each_entry(vp, &ha->vp_list, list) {
2816 if (fcport->loop_id == dev->loop_id && fcport != dev) { 2847 list_for_each_entry(fcport, &vp->vp_fcports, list) {
2817 /* ID possibly in use */ 2848 if (fcport->loop_id == dev->loop_id &&
2818 found++; 2849 fcport != dev) {
2819 break; 2850 /* ID possibly in use */
2851 found++;
2852 break;
2853 }
2820 } 2854 }
2855 if (found)
2856 break;
2821 } 2857 }
2822 2858
2823 /* If not in use then it is free to use. */ 2859 /* If not in use then it is free to use. */
@@ -2850,7 +2886,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2850 * Kernel context. 2886 * Kernel context.
2851 */ 2887 */
2852static int 2888static int
2853qla2x00_device_resync(scsi_qla_host_t *ha) 2889qla2x00_device_resync(scsi_qla_host_t *vha)
2854{ 2890{
2855 int rval; 2891 int rval;
2856 uint32_t mask; 2892 uint32_t mask;
@@ -2859,14 +2895,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2859 uint8_t rscn_out_iter; 2895 uint8_t rscn_out_iter;
2860 uint8_t format; 2896 uint8_t format;
2861 port_id_t d_id; 2897 port_id_t d_id;
2862 scsi_qla_host_t *pha = to_qla_parent(ha);
2863 2898
2864 rval = QLA_RSCNS_HANDLED; 2899 rval = QLA_RSCNS_HANDLED;
2865 2900
2866 while (ha->rscn_out_ptr != ha->rscn_in_ptr || 2901 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
2867 ha->flags.rscn_queue_overflow) { 2902 vha->flags.rscn_queue_overflow) {
2868 2903
2869 rscn_entry = ha->rscn_queue[ha->rscn_out_ptr]; 2904 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
2870 format = MSB(MSW(rscn_entry)); 2905 format = MSB(MSW(rscn_entry));
2871 d_id.b.domain = LSB(MSW(rscn_entry)); 2906 d_id.b.domain = LSB(MSW(rscn_entry));
2872 d_id.b.area = MSB(LSW(rscn_entry)); 2907 d_id.b.area = MSB(LSW(rscn_entry));
@@ -2874,37 +2909,37 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2874 2909
2875 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = " 2910 DEBUG(printk("scsi(%ld): RSCN queue entry[%d] = "
2876 "[%02x/%02x%02x%02x].\n", 2911 "[%02x/%02x%02x%02x].\n",
2877 ha->host_no, ha->rscn_out_ptr, format, d_id.b.domain, 2912 vha->host_no, vha->rscn_out_ptr, format, d_id.b.domain,
2878 d_id.b.area, d_id.b.al_pa)); 2913 d_id.b.area, d_id.b.al_pa));
2879 2914
2880 ha->rscn_out_ptr++; 2915 vha->rscn_out_ptr++;
2881 if (ha->rscn_out_ptr == MAX_RSCN_COUNT) 2916 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
2882 ha->rscn_out_ptr = 0; 2917 vha->rscn_out_ptr = 0;
2883 2918
2884 /* Skip duplicate entries. */ 2919 /* Skip duplicate entries. */
2885 for (rscn_out_iter = ha->rscn_out_ptr; 2920 for (rscn_out_iter = vha->rscn_out_ptr;
2886 !ha->flags.rscn_queue_overflow && 2921 !vha->flags.rscn_queue_overflow &&
2887 rscn_out_iter != ha->rscn_in_ptr; 2922 rscn_out_iter != vha->rscn_in_ptr;
2888 rscn_out_iter = (rscn_out_iter == 2923 rscn_out_iter = (rscn_out_iter ==
2889 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) { 2924 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
2890 2925
2891 if (rscn_entry != ha->rscn_queue[rscn_out_iter]) 2926 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
2892 break; 2927 break;
2893 2928
2894 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue " 2929 DEBUG(printk("scsi(%ld): Skipping duplicate RSCN queue "
2895 "entry found at [%d].\n", ha->host_no, 2930 "entry found at [%d].\n", vha->host_no,
2896 rscn_out_iter)); 2931 rscn_out_iter));
2897 2932
2898 ha->rscn_out_ptr = rscn_out_iter; 2933 vha->rscn_out_ptr = rscn_out_iter;
2899 } 2934 }
2900 2935
2901 /* Queue overflow, set switch default case. */ 2936 /* Queue overflow, set switch default case. */
2902 if (ha->flags.rscn_queue_overflow) { 2937 if (vha->flags.rscn_queue_overflow) {
2903 DEBUG(printk("scsi(%ld): device_resync: rscn " 2938 DEBUG(printk("scsi(%ld): device_resync: rscn "
2904 "overflow.\n", ha->host_no)); 2939 "overflow.\n", vha->host_no));
2905 2940
2906 format = 3; 2941 format = 3;
2907 ha->flags.rscn_queue_overflow = 0; 2942 vha->flags.rscn_queue_overflow = 0;
2908 } 2943 }
2909 2944
2910 switch (format) { 2945 switch (format) {
@@ -2920,16 +2955,13 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2920 default: 2955 default:
2921 mask = 0x0; 2956 mask = 0x0;
2922 d_id.b24 = 0; 2957 d_id.b24 = 0;
2923 ha->rscn_out_ptr = ha->rscn_in_ptr; 2958 vha->rscn_out_ptr = vha->rscn_in_ptr;
2924 break; 2959 break;
2925 } 2960 }
2926 2961
2927 rval = QLA_SUCCESS; 2962 rval = QLA_SUCCESS;
2928 2963
2929 list_for_each_entry(fcport, &pha->fcports, list) { 2964 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2930 if (fcport->vp_idx != ha->vp_idx)
2931 continue;
2932
2933 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2965 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2934 (fcport->d_id.b24 & mask) != d_id.b24 || 2966 (fcport->d_id.b24 & mask) != d_id.b24 ||
2935 fcport->port_type == FCT_BROADCAST) 2967 fcport->port_type == FCT_BROADCAST)
@@ -2938,7 +2970,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2938 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2970 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2939 if (format != 3 || 2971 if (format != 3 ||
2940 fcport->port_type != FCT_INITIATOR) { 2972 fcport->port_type != FCT_INITIATOR) {
2941 qla2x00_mark_device_lost(ha, fcport, 2973 qla2x00_mark_device_lost(vha, fcport,
2942 0, 0); 2974 0, 0);
2943 } 2975 }
2944 } 2976 }
@@ -2965,30 +2997,31 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2965 * Kernel context. 2997 * Kernel context.
2966 */ 2998 */
2967static int 2999static int
2968qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3000qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
2969 uint16_t *next_loopid) 3001 uint16_t *next_loopid)
2970{ 3002{
2971 int rval; 3003 int rval;
2972 int retry; 3004 int retry;
2973 uint8_t opts; 3005 uint8_t opts;
3006 struct qla_hw_data *ha = vha->hw;
2974 3007
2975 rval = QLA_SUCCESS; 3008 rval = QLA_SUCCESS;
2976 retry = 0; 3009 retry = 0;
2977 3010
2978 rval = qla2x00_fabric_login(ha, fcport, next_loopid); 3011 rval = qla2x00_fabric_login(vha, fcport, next_loopid);
2979 if (rval == QLA_SUCCESS) { 3012 if (rval == QLA_SUCCESS) {
2980 /* Send an ADISC to tape devices.*/ 3013 /* Send an ADISC to tape devices.*/
2981 opts = 0; 3014 opts = 0;
2982 if (fcport->flags & FCF_TAPE_PRESENT) 3015 if (fcport->flags & FCF_TAPE_PRESENT)
2983 opts |= BIT_1; 3016 opts |= BIT_1;
2984 rval = qla2x00_get_port_database(ha, fcport, opts); 3017 rval = qla2x00_get_port_database(vha, fcport, opts);
2985 if (rval != QLA_SUCCESS) { 3018 if (rval != QLA_SUCCESS) {
2986 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3019 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
2987 fcport->d_id.b.domain, fcport->d_id.b.area, 3020 fcport->d_id.b.domain, fcport->d_id.b.area,
2988 fcport->d_id.b.al_pa); 3021 fcport->d_id.b.al_pa);
2989 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3022 qla2x00_mark_device_lost(vha, fcport, 1, 0);
2990 } else { 3023 } else {
2991 qla2x00_update_fcport(ha, fcport); 3024 qla2x00_update_fcport(vha, fcport);
2992 } 3025 }
2993 } 3026 }
2994 3027
@@ -3010,13 +3043,14 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3010 * 3 - Fatal error 3043 * 3 - Fatal error
3011 */ 3044 */
3012int 3045int
3013qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport, 3046qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3014 uint16_t *next_loopid) 3047 uint16_t *next_loopid)
3015{ 3048{
3016 int rval; 3049 int rval;
3017 int retry; 3050 int retry;
3018 uint16_t tmp_loopid; 3051 uint16_t tmp_loopid;
3019 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3052 uint16_t mb[MAILBOX_REGISTER_COUNT];
3053 struct qla_hw_data *ha = vha->hw;
3020 3054
3021 retry = 0; 3055 retry = 0;
3022 tmp_loopid = 0; 3056 tmp_loopid = 0;
@@ -3024,11 +3058,11 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3024 for (;;) { 3058 for (;;) {
3025 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x " 3059 DEBUG(printk("scsi(%ld): Trying Fabric Login w/loop id 0x%04x "
3026 "for port %02x%02x%02x.\n", 3060 "for port %02x%02x%02x.\n",
3027 ha->host_no, fcport->loop_id, fcport->d_id.b.domain, 3061 vha->host_no, fcport->loop_id, fcport->d_id.b.domain,
3028 fcport->d_id.b.area, fcport->d_id.b.al_pa)); 3062 fcport->d_id.b.area, fcport->d_id.b.al_pa));
3029 3063
3030 /* Login fcport on switch. */ 3064 /* Login fcport on switch. */
3031 ha->isp_ops->fabric_login(ha, fcport->loop_id, 3065 ha->isp_ops->fabric_login(vha, fcport->loop_id,
3032 fcport->d_id.b.domain, fcport->d_id.b.area, 3066 fcport->d_id.b.domain, fcport->d_id.b.area,
3033 fcport->d_id.b.al_pa, mb, BIT_0); 3067 fcport->d_id.b.al_pa, mb, BIT_0);
3034 if (mb[0] == MBS_PORT_ID_USED) { 3068 if (mb[0] == MBS_PORT_ID_USED) {
@@ -3084,7 +3118,7 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3084 * Loop ID already used, try next loop ID. 3118 * Loop ID already used, try next loop ID.
3085 */ 3119 */
3086 fcport->loop_id++; 3120 fcport->loop_id++;
3087 rval = qla2x00_find_new_loop_id(ha, fcport); 3121 rval = qla2x00_find_new_loop_id(vha, fcport);
3088 if (rval != QLA_SUCCESS) { 3122 if (rval != QLA_SUCCESS) {
3089 /* Ran out of loop IDs to use */ 3123 /* Ran out of loop IDs to use */
3090 break; 3124 break;
@@ -3096,10 +3130,10 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3096 * dead. 3130 * dead.
3097 */ 3131 */
3098 *next_loopid = fcport->loop_id; 3132 *next_loopid = fcport->loop_id;
3099 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3133 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3100 fcport->d_id.b.domain, fcport->d_id.b.area, 3134 fcport->d_id.b.domain, fcport->d_id.b.area,
3101 fcport->d_id.b.al_pa); 3135 fcport->d_id.b.al_pa);
3102 qla2x00_mark_device_lost(ha, fcport, 1, 0); 3136 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3103 3137
3104 rval = 1; 3138 rval = 1;
3105 break; 3139 break;
@@ -3109,12 +3143,12 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3109 */ 3143 */
3110 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x " 3144 DEBUG2(printk("%s(%ld): failed=%x port_id=%02x%02x%02x "
3111 "loop_id=%x jiffies=%lx.\n", 3145 "loop_id=%x jiffies=%lx.\n",
3112 __func__, ha->host_no, mb[0], 3146 __func__, vha->host_no, mb[0],
3113 fcport->d_id.b.domain, fcport->d_id.b.area, 3147 fcport->d_id.b.domain, fcport->d_id.b.area,
3114 fcport->d_id.b.al_pa, fcport->loop_id, jiffies)); 3148 fcport->d_id.b.al_pa, fcport->loop_id, jiffies));
3115 3149
3116 *next_loopid = fcport->loop_id; 3150 *next_loopid = fcport->loop_id;
3117 ha->isp_ops->fabric_logout(ha, fcport->loop_id, 3151 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
3118 fcport->d_id.b.domain, fcport->d_id.b.area, 3152 fcport->d_id.b.domain, fcport->d_id.b.area,
3119 fcport->d_id.b.al_pa); 3153 fcport->d_id.b.al_pa);
3120 fcport->loop_id = FC_NO_LOOP_ID; 3154 fcport->loop_id = FC_NO_LOOP_ID;
@@ -3142,13 +3176,13 @@ qla2x00_fabric_login(scsi_qla_host_t *ha, fc_port_t *fcport,
3142 * 3 - Fatal error 3176 * 3 - Fatal error
3143 */ 3177 */
3144int 3178int
3145qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport) 3179qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
3146{ 3180{
3147 int rval; 3181 int rval;
3148 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3182 uint16_t mb[MAILBOX_REGISTER_COUNT];
3149 3183
3150 memset(mb, 0, sizeof(mb)); 3184 memset(mb, 0, sizeof(mb));
3151 rval = qla2x00_login_local_device(ha, fcport, mb, BIT_0); 3185 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
3152 if (rval == QLA_SUCCESS) { 3186 if (rval == QLA_SUCCESS) {
3153 /* Interrogate mailbox registers for any errors */ 3187 /* Interrogate mailbox registers for any errors */
3154 if (mb[0] == MBS_COMMAND_ERROR) 3188 if (mb[0] == MBS_COMMAND_ERROR)
@@ -3172,57 +3206,57 @@ qla2x00_local_device_login(scsi_qla_host_t *ha, fc_port_t *fcport)
3172 * 0 = success 3206 * 0 = success
3173 */ 3207 */
3174int 3208int
3175qla2x00_loop_resync(scsi_qla_host_t *ha) 3209qla2x00_loop_resync(scsi_qla_host_t *vha)
3176{ 3210{
3177 int rval; 3211 int rval = QLA_SUCCESS;
3178 uint32_t wait_time; 3212 uint32_t wait_time;
3179 3213 struct qla_hw_data *ha = vha->hw;
3180 rval = QLA_SUCCESS; 3214 struct req_que *req = ha->req_q_map[0];
3181 3215 struct rsp_que *rsp = ha->rsp_q_map[0];
3182 atomic_set(&ha->loop_state, LOOP_UPDATE); 3216
3183 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3217 atomic_set(&vha->loop_state, LOOP_UPDATE);
3184 if (ha->flags.online) { 3218 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3185 if (!(rval = qla2x00_fw_ready(ha))) { 3219 if (vha->flags.online) {
3220 if (!(rval = qla2x00_fw_ready(vha))) {
3186 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3221 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3187 wait_time = 256; 3222 wait_time = 256;
3188 do { 3223 do {
3189 atomic_set(&ha->loop_state, LOOP_UPDATE); 3224 atomic_set(&vha->loop_state, LOOP_UPDATE);
3190 3225
3191 /* Issue a marker after FW becomes ready. */ 3226 /* Issue a marker after FW becomes ready. */
3192 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3227 qla2x00_marker(vha, req, rsp, 0, 0,
3193 ha->marker_needed = 0; 3228 MK_SYNC_ALL);
3229 vha->marker_needed = 0;
3194 3230
3195 /* Remap devices on Loop. */ 3231 /* Remap devices on Loop. */
3196 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3232 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3197 3233
3198 qla2x00_configure_loop(ha); 3234 qla2x00_configure_loop(vha);
3199 wait_time--; 3235 wait_time--;
3200 } while (!atomic_read(&ha->loop_down_timer) && 3236 } while (!atomic_read(&vha->loop_down_timer) &&
3201 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3237 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3202 wait_time && 3238 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3203 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3239 &vha->dpc_flags)));
3204 } 3240 }
3205 } 3241 }
3206 3242
3207 if (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 3243 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3208 return (QLA_FUNCTION_FAILED); 3244 return (QLA_FUNCTION_FAILED);
3209 }
3210 3245
3211 if (rval) { 3246 if (rval)
3212 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); 3247 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
3213 }
3214 3248
3215 return (rval); 3249 return (rval);
3216} 3250}
3217 3251
3218void 3252void
3219qla2x00_update_fcports(scsi_qla_host_t *ha) 3253qla2x00_update_fcports(scsi_qla_host_t *vha)
3220{ 3254{
3221 fc_port_t *fcport; 3255 fc_port_t *fcport;
3222 3256
3223 /* Go with deferred removal of rport references. */ 3257 /* Go with deferred removal of rport references. */
3224 list_for_each_entry(fcport, &ha->fcports, list) 3258 list_for_each_entry(fcport, &vha->vp_fcports, list)
3225 if (fcport->drport && 3259 if (fcport && fcport->drport &&
3226 atomic_read(&fcport->state) != FCS_UNCONFIGURED) 3260 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3227 qla2x00_rport_del(fcport); 3261 qla2x00_rport_del(fcport);
3228} 3262}
@@ -3238,63 +3272,65 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3238* 0 = success 3272* 0 = success
3239*/ 3273*/
3240int 3274int
3241qla2x00_abort_isp(scsi_qla_host_t *ha) 3275qla2x00_abort_isp(scsi_qla_host_t *vha)
3242{ 3276{
3243 int rval; 3277 int rval;
3244 uint8_t status = 0; 3278 uint8_t status = 0;
3245 scsi_qla_host_t *vha; 3279 struct qla_hw_data *ha = vha->hw;
3280 struct scsi_qla_host *vp;
3281 struct req_que *req = ha->req_q_map[0];
3246 3282
3247 if (ha->flags.online) { 3283 if (vha->flags.online) {
3248 ha->flags.online = 0; 3284 vha->flags.online = 0;
3249 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3285 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3250 ha->qla_stats.total_isp_aborts++; 3286 ha->qla_stats.total_isp_aborts++;
3251 3287
3252 qla_printk(KERN_INFO, ha, 3288 qla_printk(KERN_INFO, ha,
3253 "Performing ISP error recovery - ha= %p.\n", ha); 3289 "Performing ISP error recovery - ha= %p.\n", ha);
3254 ha->isp_ops->reset_chip(ha); 3290 ha->isp_ops->reset_chip(vha);
3255 3291
3256 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 3292 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
3257 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 3293 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
3258 atomic_set(&ha->loop_state, LOOP_DOWN); 3294 atomic_set(&vha->loop_state, LOOP_DOWN);
3259 qla2x00_mark_all_devices_lost(ha, 0); 3295 qla2x00_mark_all_devices_lost(vha, 0);
3260 list_for_each_entry(vha, &ha->vp_list, vp_list) 3296 list_for_each_entry(vp, &ha->vp_list, list)
3261 qla2x00_mark_all_devices_lost(vha, 0); 3297 qla2x00_mark_all_devices_lost(vp, 0);
3262 } else { 3298 } else {
3263 if (!atomic_read(&ha->loop_down_timer)) 3299 if (!atomic_read(&vha->loop_down_timer))
3264 atomic_set(&ha->loop_down_timer, 3300 atomic_set(&vha->loop_down_timer,
3265 LOOP_DOWN_TIME); 3301 LOOP_DOWN_TIME);
3266 } 3302 }
3267 3303
3268 /* Requeue all commands in outstanding command list. */ 3304 /* Requeue all commands in outstanding command list. */
3269 qla2x00_abort_all_cmds(ha, DID_RESET << 16); 3305 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
3270 3306
3271 ha->isp_ops->get_flash_version(ha, ha->request_ring); 3307 ha->isp_ops->get_flash_version(vha, req->ring);
3272 3308
3273 ha->isp_ops->nvram_config(ha); 3309 ha->isp_ops->nvram_config(vha);
3274 3310
3275 if (!qla2x00_restart_isp(ha)) { 3311 if (!qla2x00_restart_isp(vha)) {
3276 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3312 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3277 3313
3278 if (!atomic_read(&ha->loop_down_timer)) { 3314 if (!atomic_read(&vha->loop_down_timer)) {
3279 /* 3315 /*
3280 * Issue marker command only when we are going 3316 * Issue marker command only when we are going
3281 * to start the I/O . 3317 * to start the I/O .
3282 */ 3318 */
3283 ha->marker_needed = 1; 3319 vha->marker_needed = 1;
3284 } 3320 }
3285 3321
3286 ha->flags.online = 1; 3322 vha->flags.online = 1;
3287 3323
3288 ha->isp_ops->enable_intrs(ha); 3324 ha->isp_ops->enable_intrs(ha);
3289 3325
3290 ha->isp_abort_cnt = 0; 3326 ha->isp_abort_cnt = 0;
3291 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3327 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3292 3328
3293 if (ha->fce) { 3329 if (ha->fce) {
3294 ha->flags.fce_enabled = 1; 3330 ha->flags.fce_enabled = 1;
3295 memset(ha->fce, 0, 3331 memset(ha->fce, 0,
3296 fce_calc_size(ha->fce_bufs)); 3332 fce_calc_size(ha->fce_bufs));
3297 rval = qla2x00_enable_fce_trace(ha, 3333 rval = qla2x00_enable_fce_trace(vha,
3298 ha->fce_dma, ha->fce_bufs, ha->fce_mb, 3334 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
3299 &ha->fce_bufs); 3335 &ha->fce_bufs);
3300 if (rval) { 3336 if (rval) {
@@ -3307,7 +3343,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3307 3343
3308 if (ha->eft) { 3344 if (ha->eft) {
3309 memset(ha->eft, 0, EFT_SIZE); 3345 memset(ha->eft, 0, EFT_SIZE);
3310 rval = qla2x00_enable_eft_trace(ha, 3346 rval = qla2x00_enable_eft_trace(vha,
3311 ha->eft_dma, EFT_NUM_BUFFERS); 3347 ha->eft_dma, EFT_NUM_BUFFERS);
3312 if (rval) { 3348 if (rval) {
3313 qla_printk(KERN_WARNING, ha, 3349 qla_printk(KERN_WARNING, ha,
@@ -3316,8 +3352,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3316 } 3352 }
3317 } 3353 }
3318 } else { /* failed the ISP abort */ 3354 } else { /* failed the ISP abort */
3319 ha->flags.online = 1; 3355 vha->flags.online = 1;
3320 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3356 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
3321 if (ha->isp_abort_cnt == 0) { 3357 if (ha->isp_abort_cnt == 0) {
3322 qla_printk(KERN_WARNING, ha, 3358 qla_printk(KERN_WARNING, ha,
3323 "ISP error recovery failed - " 3359 "ISP error recovery failed - "
@@ -3326,37 +3362,41 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3326 * The next call disables the board 3362 * The next call disables the board
3327 * completely. 3363 * completely.
3328 */ 3364 */
3329 ha->isp_ops->reset_adapter(ha); 3365 ha->isp_ops->reset_adapter(vha);
3330 ha->flags.online = 0; 3366 vha->flags.online = 0;
3331 clear_bit(ISP_ABORT_RETRY, 3367 clear_bit(ISP_ABORT_RETRY,
3332 &ha->dpc_flags); 3368 &vha->dpc_flags);
3333 status = 0; 3369 status = 0;
3334 } else { /* schedule another ISP abort */ 3370 } else { /* schedule another ISP abort */
3335 ha->isp_abort_cnt--; 3371 ha->isp_abort_cnt--;
3336 DEBUG(printk("qla%ld: ISP abort - " 3372 DEBUG(printk("qla%ld: ISP abort - "
3337 "retry remaining %d\n", 3373 "retry remaining %d\n",
3338 ha->host_no, ha->isp_abort_cnt)); 3374 vha->host_no, ha->isp_abort_cnt));
3339 status = 1; 3375 status = 1;
3340 } 3376 }
3341 } else { 3377 } else {
3342 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; 3378 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
3343 DEBUG(printk("qla2x00(%ld): ISP error recovery " 3379 DEBUG(printk("qla2x00(%ld): ISP error recovery "
3344 "- retrying (%d) more times\n", 3380 "- retrying (%d) more times\n",
3345 ha->host_no, ha->isp_abort_cnt)); 3381 vha->host_no, ha->isp_abort_cnt));
3346 set_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3382 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
3347 status = 1; 3383 status = 1;
3348 } 3384 }
3349 } 3385 }
3350 3386
3351 } 3387 }
3352 3388
3353 if (status) { 3389 if (!status) {
3390 DEBUG(printk(KERN_INFO
3391 "qla2x00_abort_isp(%ld): succeeded.\n",
3392 vha->host_no));
3393 list_for_each_entry(vp, &ha->vp_list, list) {
3394 if (vp->vp_idx)
3395 qla2x00_vp_abort_isp(vp);
3396 }
3397 } else {
3354 qla_printk(KERN_INFO, ha, 3398 qla_printk(KERN_INFO, ha,
3355 "qla2x00_abort_isp: **** FAILED ****\n"); 3399 "qla2x00_abort_isp: **** FAILED ****\n");
3356 } else {
3357 DEBUG(printk(KERN_INFO
3358 "qla2x00_abort_isp(%ld): exiting.\n",
3359 ha->host_no));
3360 } 3400 }
3361 3401
3362 return(status); 3402 return(status);
@@ -3373,42 +3413,50 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3373* 0 = success 3413* 0 = success
3374*/ 3414*/
3375static int 3415static int
3376qla2x00_restart_isp(scsi_qla_host_t *ha) 3416qla2x00_restart_isp(scsi_qla_host_t *vha)
3377{ 3417{
3378 uint8_t status = 0; 3418 uint8_t status = 0;
3379 uint32_t wait_time; 3419 uint32_t wait_time;
3420 struct qla_hw_data *ha = vha->hw;
3421 struct req_que *req = ha->req_q_map[0];
3422 struct rsp_que *rsp = ha->rsp_q_map[0];
3380 3423
3381 /* If firmware needs to be loaded */ 3424 /* If firmware needs to be loaded */
3382 if (qla2x00_isp_firmware(ha)) { 3425 if (qla2x00_isp_firmware(vha)) {
3383 ha->flags.online = 0; 3426 vha->flags.online = 0;
3384 if (!(status = ha->isp_ops->chip_diag(ha))) 3427 status = ha->isp_ops->chip_diag(vha);
3385 status = qla2x00_setup_chip(ha); 3428 if (!status)
3429 status = qla2x00_setup_chip(vha);
3386 } 3430 }
3387 3431
3388 if (!status && !(status = qla2x00_init_rings(ha))) { 3432 if (!status && !(status = qla2x00_init_rings(vha))) {
3389 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 3433 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
3390 if (!(status = qla2x00_fw_ready(ha))) { 3434 /* Initialize the queues in use */
3435 qla25xx_init_queues(ha);
3436
3437 status = qla2x00_fw_ready(vha);
3438 if (!status) {
3391 DEBUG(printk("%s(): Start configure loop, " 3439 DEBUG(printk("%s(): Start configure loop, "
3392 "status = %d\n", __func__, status)); 3440 "status = %d\n", __func__, status));
3393 3441
3394 /* Issue a marker after FW becomes ready. */ 3442 /* Issue a marker after FW becomes ready. */
3395 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 3443 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
3396 3444
3397 ha->flags.online = 1; 3445 vha->flags.online = 1;
3398 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 3446 /* Wait at most MAX_TARGET RSCNs for a stable link. */
3399 wait_time = 256; 3447 wait_time = 256;
3400 do { 3448 do {
3401 clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 3449 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3402 qla2x00_configure_loop(ha); 3450 qla2x00_configure_loop(vha);
3403 wait_time--; 3451 wait_time--;
3404 } while (!atomic_read(&ha->loop_down_timer) && 3452 } while (!atomic_read(&vha->loop_down_timer) &&
3405 !(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) && 3453 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
3406 wait_time && 3454 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
3407 (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))); 3455 &vha->dpc_flags)));
3408 } 3456 }
3409 3457
3410 /* if no cable then assume it's good */ 3458 /* if no cable then assume it's good */
3411 if ((ha->device_flags & DFLG_NO_CABLE)) 3459 if ((vha->device_flags & DFLG_NO_CABLE))
3412 status = 0; 3460 status = 0;
3413 3461
3414 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n", 3462 DEBUG(printk("%s(): Configure loop done, status = 0x%x\n",
@@ -3418,6 +3466,46 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3418 return (status); 3466 return (status);
3419} 3467}
3420 3468
3469static int
3470qla25xx_init_queues(struct qla_hw_data *ha)
3471{
3472 struct rsp_que *rsp = NULL;
3473 struct req_que *req = NULL;
3474 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3475 int ret = -1;
3476 int i;
3477
3478 for (i = 1; i < ha->max_queues; i++) {
3479 rsp = ha->rsp_q_map[i];
3480 if (rsp) {
3481 rsp->options &= ~BIT_0;
3482 ret = qla25xx_init_rsp_que(base_vha, rsp, rsp->options);
3483 if (ret != QLA_SUCCESS)
3484 DEBUG2_17(printk(KERN_WARNING
3485 "%s Rsp que:%d init failed\n", __func__,
3486 rsp->id));
3487 else
3488 DEBUG2_17(printk(KERN_INFO
3489 "%s Rsp que:%d inited\n", __func__,
3490 rsp->id));
3491 }
3492 req = ha->req_q_map[i];
3493 if (req) {
3494 req->options &= ~BIT_0;
3495 ret = qla25xx_init_req_que(base_vha, req, req->options);
3496 if (ret != QLA_SUCCESS)
3497 DEBUG2_17(printk(KERN_WARNING
3498 "%s Req que:%d init failed\n", __func__,
3499 req->id));
3500 else
3501 DEBUG2_17(printk(KERN_WARNING
3502 "%s Rsp que:%d inited\n", __func__,
3503 req->id));
3504 }
3505 }
3506 return ret;
3507}
3508
3421/* 3509/*
3422* qla2x00_reset_adapter 3510* qla2x00_reset_adapter
3423* Reset adapter. 3511* Reset adapter.
@@ -3426,12 +3514,13 @@ qla2x00_restart_isp(scsi_qla_host_t *ha)
3426* ha = adapter block pointer. 3514* ha = adapter block pointer.
3427*/ 3515*/
3428void 3516void
3429qla2x00_reset_adapter(scsi_qla_host_t *ha) 3517qla2x00_reset_adapter(scsi_qla_host_t *vha)
3430{ 3518{
3431 unsigned long flags = 0; 3519 unsigned long flags = 0;
3520 struct qla_hw_data *ha = vha->hw;
3432 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3521 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3433 3522
3434 ha->flags.online = 0; 3523 vha->flags.online = 0;
3435 ha->isp_ops->disable_intrs(ha); 3524 ha->isp_ops->disable_intrs(ha);
3436 3525
3437 spin_lock_irqsave(&ha->hardware_lock, flags); 3526 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3443,12 +3532,13 @@ qla2x00_reset_adapter(scsi_qla_host_t *ha)
3443} 3532}
3444 3533
3445void 3534void
3446qla24xx_reset_adapter(scsi_qla_host_t *ha) 3535qla24xx_reset_adapter(scsi_qla_host_t *vha)
3447{ 3536{
3448 unsigned long flags = 0; 3537 unsigned long flags = 0;
3538 struct qla_hw_data *ha = vha->hw;
3449 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3539 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3450 3540
3451 ha->flags.online = 0; 3541 vha->flags.online = 0;
3452 ha->isp_ops->disable_intrs(ha); 3542 ha->isp_ops->disable_intrs(ha);
3453 3543
3454 spin_lock_irqsave(&ha->hardware_lock, flags); 3544 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3462,9 +3552,11 @@ qla24xx_reset_adapter(scsi_qla_host_t *ha)
3462/* On sparc systems, obtain port and node WWN from firmware 3552/* On sparc systems, obtain port and node WWN from firmware
3463 * properties. 3553 * properties.
3464 */ 3554 */
3465static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *nv) 3555static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
3556 struct nvram_24xx *nv)
3466{ 3557{
3467#ifdef CONFIG_SPARC 3558#ifdef CONFIG_SPARC
3559 struct qla_hw_data *ha = vha->hw;
3468 struct pci_dev *pdev = ha->pdev; 3560 struct pci_dev *pdev = ha->pdev;
3469 struct device_node *dp = pci_device_to_OF_node(pdev); 3561 struct device_node *dp = pci_device_to_OF_node(pdev);
3470 const u8 *val; 3562 const u8 *val;
@@ -3481,7 +3573,7 @@ static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *ha, struct nvram_24xx *n
3481} 3573}
3482 3574
3483int 3575int
3484qla24xx_nvram_config(scsi_qla_host_t *ha) 3576qla24xx_nvram_config(scsi_qla_host_t *vha)
3485{ 3577{
3486 int rval; 3578 int rval;
3487 struct init_cb_24xx *icb; 3579 struct init_cb_24xx *icb;
@@ -3490,6 +3582,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3490 uint8_t *dptr1, *dptr2; 3582 uint8_t *dptr1, *dptr2;
3491 uint32_t chksum; 3583 uint32_t chksum;
3492 uint16_t cnt; 3584 uint16_t cnt;
3585 struct qla_hw_data *ha = vha->hw;
3493 3586
3494 rval = QLA_SUCCESS; 3587 rval = QLA_SUCCESS;
3495 icb = (struct init_cb_24xx *)ha->init_cb; 3588 icb = (struct init_cb_24xx *)ha->init_cb;
@@ -3507,12 +3600,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3507 3600
3508 /* Get VPD data into cache */ 3601 /* Get VPD data into cache */
3509 ha->vpd = ha->nvram + VPD_OFFSET; 3602 ha->vpd = ha->nvram + VPD_OFFSET;
3510 ha->isp_ops->read_nvram(ha, (uint8_t *)ha->vpd, 3603 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd,
3511 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); 3604 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
3512 3605
3513 /* Get NVRAM data into cache and calculate checksum. */ 3606 /* Get NVRAM data into cache and calculate checksum. */
3514 dptr = (uint32_t *)nv; 3607 dptr = (uint32_t *)nv;
3515 ha->isp_ops->read_nvram(ha, (uint8_t *)dptr, ha->nvram_base, 3608 ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base,
3516 ha->nvram_size); 3609 ha->nvram_size);
3517 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) 3610 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
3518 chksum += le32_to_cpu(*dptr++); 3611 chksum += le32_to_cpu(*dptr++);
@@ -3557,7 +3650,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3557 nv->node_name[5] = 0x1c; 3650 nv->node_name[5] = 0x1c;
3558 nv->node_name[6] = 0x55; 3651 nv->node_name[6] = 0x55;
3559 nv->node_name[7] = 0x86; 3652 nv->node_name[7] = 0x86;
3560 qla24xx_nvram_wwn_from_ofw(ha, nv); 3653 qla24xx_nvram_wwn_from_ofw(vha, nv);
3561 nv->login_retry_count = __constant_cpu_to_le16(8); 3654 nv->login_retry_count = __constant_cpu_to_le16(8);
3562 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 3655 nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
3563 nv->login_timeout = __constant_cpu_to_le16(0); 3656 nv->login_timeout = __constant_cpu_to_le16(0);
@@ -3577,7 +3670,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3577 } 3670 }
3578 3671
3579 /* Reset Initialization control block */ 3672 /* Reset Initialization control block */
3580 memset(icb, 0, sizeof(struct init_cb_24xx)); 3673 memset(icb, 0, ha->init_cb_size);
3581 3674
3582 /* Copy 1st segment. */ 3675 /* Copy 1st segment. */
3583 dptr1 = (uint8_t *)icb; 3676 dptr1 = (uint8_t *)icb;
@@ -3600,7 +3693,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3600 /* 3693 /*
3601 * Setup driver NVRAM options. 3694 * Setup driver NVRAM options.
3602 */ 3695 */
3603 qla2x00_set_model_info(ha, nv->model_name, sizeof(nv->model_name), 3696 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
3604 "QLA2462"); 3697 "QLA2462");
3605 3698
3606 /* Use alternate WWN? */ 3699 /* Use alternate WWN? */
@@ -3639,8 +3732,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3639 ha->serial0 = icb->port_name[5]; 3732 ha->serial0 = icb->port_name[5];
3640 ha->serial1 = icb->port_name[6]; 3733 ha->serial1 = icb->port_name[6];
3641 ha->serial2 = icb->port_name[7]; 3734 ha->serial2 = icb->port_name[7];
3642 ha->node_name = icb->node_name; 3735 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
3643 ha->port_name = icb->port_name; 3736 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
3644 3737
3645 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 3738 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
3646 3739
@@ -3695,7 +3788,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3695 ha->login_retry_count = ql2xloginretrycount; 3788 ha->login_retry_count = ql2xloginretrycount;
3696 3789
3697 /* Enable ZIO. */ 3790 /* Enable ZIO. */
3698 if (!ha->flags.init_done) { 3791 if (!vha->flags.init_done) {
3699 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 3792 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
3700 (BIT_3 | BIT_2 | BIT_1 | BIT_0); 3793 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3701 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 3794 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
@@ -3703,12 +3796,12 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3703 } 3796 }
3704 icb->firmware_options_2 &= __constant_cpu_to_le32( 3797 icb->firmware_options_2 &= __constant_cpu_to_le32(
3705 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 3798 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
3706 ha->flags.process_response_queue = 0; 3799 vha->flags.process_response_queue = 0;
3707 if (ha->zio_mode != QLA_ZIO_DISABLED) { 3800 if (ha->zio_mode != QLA_ZIO_DISABLED) {
3708 ha->zio_mode = QLA_ZIO_MODE_6; 3801 ha->zio_mode = QLA_ZIO_MODE_6;
3709 3802
3710 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay " 3803 DEBUG2(printk("scsi(%ld): ZIO mode %d enabled; timer delay "
3711 "(%d us).\n", ha->host_no, ha->zio_mode, 3804 "(%d us).\n", vha->host_no, ha->zio_mode,
3712 ha->zio_timer * 100)); 3805 ha->zio_timer * 100));
3713 qla_printk(KERN_INFO, ha, 3806 qla_printk(KERN_INFO, ha,
3714 "ZIO mode %d enabled; timer delay (%d us).\n", 3807 "ZIO mode %d enabled; timer delay (%d us).\n",
@@ -3717,36 +3810,37 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
3717 icb->firmware_options_2 |= cpu_to_le32( 3810 icb->firmware_options_2 |= cpu_to_le32(
3718 (uint32_t)ha->zio_mode); 3811 (uint32_t)ha->zio_mode);
3719 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer); 3812 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
3720 ha->flags.process_response_queue = 1; 3813 vha->flags.process_response_queue = 1;
3721 } 3814 }
3722 3815
3723 if (rval) { 3816 if (rval) {
3724 DEBUG2_3(printk(KERN_WARNING 3817 DEBUG2_3(printk(KERN_WARNING
3725 "scsi(%ld): NVRAM configuration failed!\n", ha->host_no)); 3818 "scsi(%ld): NVRAM configuration failed!\n", vha->host_no));
3726 } 3819 }
3727 return (rval); 3820 return (rval);
3728} 3821}
3729 3822
3730static int 3823static int
3731qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3824qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3732{ 3825{
3733 int rval; 3826 int rval = QLA_SUCCESS;
3734 int segments, fragment; 3827 int segments, fragment;
3735 uint32_t faddr; 3828 uint32_t faddr;
3736 uint32_t *dcode, dlen; 3829 uint32_t *dcode, dlen;
3737 uint32_t risc_addr; 3830 uint32_t risc_addr;
3738 uint32_t risc_size; 3831 uint32_t risc_size;
3739 uint32_t i; 3832 uint32_t i;
3740 3833 struct qla_hw_data *ha = vha->hw;
3834 struct req_que *req = ha->req_q_map[0];
3741 rval = QLA_SUCCESS; 3835 rval = QLA_SUCCESS;
3742 3836
3743 segments = FA_RISC_CODE_SEGMENTS; 3837 segments = FA_RISC_CODE_SEGMENTS;
3744 faddr = ha->flt_region_fw; 3838 faddr = ha->flt_region_fw;
3745 dcode = (uint32_t *)ha->request_ring; 3839 dcode = (uint32_t *)req->ring;
3746 *srisc_addr = 0; 3840 *srisc_addr = 0;
3747 3841
3748 /* Validate firmware image by checking version. */ 3842 /* Validate firmware image by checking version. */
3749 qla24xx_read_flash_data(ha, dcode, faddr + 4, 4); 3843 qla24xx_read_flash_data(vha, dcode, faddr + 4, 4);
3750 for (i = 0; i < 4; i++) 3844 for (i = 0; i < 4; i++)
3751 dcode[i] = be32_to_cpu(dcode[i]); 3845 dcode[i] = be32_to_cpu(dcode[i]);
3752 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && 3846 if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff &&
@@ -3764,7 +3858,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3764 3858
3765 while (segments && rval == QLA_SUCCESS) { 3859 while (segments && rval == QLA_SUCCESS) {
3766 /* Read segment's load information. */ 3860 /* Read segment's load information. */
3767 qla24xx_read_flash_data(ha, dcode, faddr, 4); 3861 qla24xx_read_flash_data(vha, dcode, faddr, 4);
3768 3862
3769 risc_addr = be32_to_cpu(dcode[2]); 3863 risc_addr = be32_to_cpu(dcode[2]);
3770 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; 3864 *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
@@ -3778,17 +3872,17 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3778 3872
3779 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3873 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3780 "addr %x, number of dwords 0x%x, offset 0x%x.\n", 3874 "addr %x, number of dwords 0x%x, offset 0x%x.\n",
3781 ha->host_no, risc_addr, dlen, faddr)); 3875 vha->host_no, risc_addr, dlen, faddr));
3782 3876
3783 qla24xx_read_flash_data(ha, dcode, faddr, dlen); 3877 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
3784 for (i = 0; i < dlen; i++) 3878 for (i = 0; i < dlen; i++)
3785 dcode[i] = swab32(dcode[i]); 3879 dcode[i] = swab32(dcode[i]);
3786 3880
3787 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3881 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3788 dlen); 3882 dlen);
3789 if (rval) { 3883 if (rval) {
3790 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3884 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3791 "segment %d of firmware\n", ha->host_no, 3885 "segment %d of firmware\n", vha->host_no,
3792 fragment)); 3886 fragment));
3793 qla_printk(KERN_WARNING, ha, 3887 qla_printk(KERN_WARNING, ha,
3794 "[ERROR] Failed to load segment %d of " 3888 "[ERROR] Failed to load segment %d of "
@@ -3812,16 +3906,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3812#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/" 3906#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
3813 3907
3814int 3908int
3815qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 3909qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3816{ 3910{
3817 int rval; 3911 int rval;
3818 int i, fragment; 3912 int i, fragment;
3819 uint16_t *wcode, *fwcode; 3913 uint16_t *wcode, *fwcode;
3820 uint32_t risc_addr, risc_size, fwclen, wlen, *seg; 3914 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
3821 struct fw_blob *blob; 3915 struct fw_blob *blob;
3916 struct qla_hw_data *ha = vha->hw;
3917 struct req_que *req = ha->req_q_map[0];
3822 3918
3823 /* Load firmware blob. */ 3919 /* Load firmware blob. */
3824 blob = qla2x00_request_firmware(ha); 3920 blob = qla2x00_request_firmware(vha);
3825 if (!blob) { 3921 if (!blob) {
3826 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 3922 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3827 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 3923 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3831,7 +3927,7 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3831 3927
3832 rval = QLA_SUCCESS; 3928 rval = QLA_SUCCESS;
3833 3929
3834 wcode = (uint16_t *)ha->request_ring; 3930 wcode = (uint16_t *)req->ring;
3835 *srisc_addr = 0; 3931 *srisc_addr = 0;
3836 fwcode = (uint16_t *)blob->fw->data; 3932 fwcode = (uint16_t *)blob->fw->data;
3837 fwclen = 0; 3933 fwclen = 0;
@@ -3878,17 +3974,17 @@ qla2x00_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3878 wlen = risc_size; 3974 wlen = risc_size;
3879 3975
3880 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 3976 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3881 "addr %x, number of words 0x%x.\n", ha->host_no, 3977 "addr %x, number of words 0x%x.\n", vha->host_no,
3882 risc_addr, wlen)); 3978 risc_addr, wlen));
3883 3979
3884 for (i = 0; i < wlen; i++) 3980 for (i = 0; i < wlen; i++)
3885 wcode[i] = swab16(fwcode[i]); 3981 wcode[i] = swab16(fwcode[i]);
3886 3982
3887 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 3983 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3888 wlen); 3984 wlen);
3889 if (rval) { 3985 if (rval) {
3890 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 3986 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
3891 "segment %d of firmware\n", ha->host_no, 3987 "segment %d of firmware\n", vha->host_no,
3892 fragment)); 3988 fragment));
3893 qla_printk(KERN_WARNING, ha, 3989 qla_printk(KERN_WARNING, ha,
3894 "[ERROR] Failed to load segment %d of " 3990 "[ERROR] Failed to load segment %d of "
@@ -3912,7 +4008,7 @@ fail_fw_integrity:
3912} 4008}
3913 4009
3914int 4010int
3915qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr) 4011qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3916{ 4012{
3917 int rval; 4013 int rval;
3918 int segments, fragment; 4014 int segments, fragment;
@@ -3922,9 +4018,11 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3922 uint32_t i; 4018 uint32_t i;
3923 struct fw_blob *blob; 4019 struct fw_blob *blob;
3924 uint32_t *fwcode, fwclen; 4020 uint32_t *fwcode, fwclen;
4021 struct qla_hw_data *ha = vha->hw;
4022 struct req_que *req = ha->req_q_map[0];
3925 4023
3926 /* Load firmware blob. */ 4024 /* Load firmware blob. */
3927 blob = qla2x00_request_firmware(ha); 4025 blob = qla2x00_request_firmware(vha);
3928 if (!blob) { 4026 if (!blob) {
3929 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n"); 4027 qla_printk(KERN_ERR, ha, "Firmware image unavailable.\n");
3930 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved " 4028 qla_printk(KERN_ERR, ha, "Firmware images can be retrieved "
@@ -3933,13 +4031,13 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3933 /* Try to load RISC code from flash. */ 4031 /* Try to load RISC code from flash. */
3934 qla_printk(KERN_ERR, ha, "Attempting to load (potentially " 4032 qla_printk(KERN_ERR, ha, "Attempting to load (potentially "
3935 "outdated) firmware from flash.\n"); 4033 "outdated) firmware from flash.\n");
3936 return qla24xx_load_risc_flash(ha, srisc_addr); 4034 return qla24xx_load_risc_flash(vha, srisc_addr);
3937 } 4035 }
3938 4036
3939 rval = QLA_SUCCESS; 4037 rval = QLA_SUCCESS;
3940 4038
3941 segments = FA_RISC_CODE_SEGMENTS; 4039 segments = FA_RISC_CODE_SEGMENTS;
3942 dcode = (uint32_t *)ha->request_ring; 4040 dcode = (uint32_t *)req->ring;
3943 *srisc_addr = 0; 4041 *srisc_addr = 0;
3944 fwcode = (uint32_t *)blob->fw->data; 4042 fwcode = (uint32_t *)blob->fw->data;
3945 fwclen = 0; 4043 fwclen = 0;
@@ -3987,17 +4085,17 @@ qla24xx_load_risc(scsi_qla_host_t *ha, uint32_t *srisc_addr)
3987 dlen = risc_size; 4085 dlen = risc_size;
3988 4086
3989 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc " 4087 DEBUG7(printk("scsi(%ld): Loading risc segment@ risc "
3990 "addr %x, number of dwords 0x%x.\n", ha->host_no, 4088 "addr %x, number of dwords 0x%x.\n", vha->host_no,
3991 risc_addr, dlen)); 4089 risc_addr, dlen));
3992 4090
3993 for (i = 0; i < dlen; i++) 4091 for (i = 0; i < dlen; i++)
3994 dcode[i] = swab32(fwcode[i]); 4092 dcode[i] = swab32(fwcode[i]);
3995 4093
3996 rval = qla2x00_load_ram(ha, ha->request_dma, risc_addr, 4094 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
3997 dlen); 4095 dlen);
3998 if (rval) { 4096 if (rval) {
3999 DEBUG(printk("scsi(%ld):[ERROR] Failed to load " 4097 DEBUG(printk("scsi(%ld):[ERROR] Failed to load "
4000 "segment %d of firmware\n", ha->host_no, 4098 "segment %d of firmware\n", vha->host_no,
4001 fragment)); 4099 fragment));
4002 qla_printk(KERN_WARNING, ha, 4100 qla_printk(KERN_WARNING, ha,
4003 "[ERROR] Failed to load segment %d of " 4101 "[ERROR] Failed to load segment %d of "
@@ -4021,49 +4119,53 @@ fail_fw_integrity:
4021} 4119}
4022 4120
4023void 4121void
4024qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) 4122qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
4025{ 4123{
4026 int ret, retries; 4124 int ret, retries;
4125 struct qla_hw_data *ha = vha->hw;
4027 4126
4028 if (!IS_FWI2_CAPABLE(ha)) 4127 if (!IS_FWI2_CAPABLE(ha))
4029 return; 4128 return;
4030 if (!ha->fw_major_version) 4129 if (!ha->fw_major_version)
4031 return; 4130 return;
4032 4131
4033 ret = qla2x00_stop_firmware(ha); 4132 ret = qla2x00_stop_firmware(vha);
4034 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4133 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4035 retries ; retries--) { 4134 retries ; retries--) {
4036 ha->isp_ops->reset_chip(ha); 4135 ha->isp_ops->reset_chip(vha);
4037 if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS) 4136 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
4038 continue; 4137 continue;
4039 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4138 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
4040 continue; 4139 continue;
4041 qla_printk(KERN_INFO, ha, 4140 qla_printk(KERN_INFO, ha,
4042 "Attempting retry of stop-firmware command...\n"); 4141 "Attempting retry of stop-firmware command...\n");
4043 ret = qla2x00_stop_firmware(ha); 4142 ret = qla2x00_stop_firmware(vha);
4044 } 4143 }
4045} 4144}
4046 4145
4047int 4146int
4048qla24xx_configure_vhba(scsi_qla_host_t *ha) 4147qla24xx_configure_vhba(scsi_qla_host_t *vha)
4049{ 4148{
4050 int rval = QLA_SUCCESS; 4149 int rval = QLA_SUCCESS;
4051 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4150 uint16_t mb[MAILBOX_REGISTER_COUNT];
4151 struct qla_hw_data *ha = vha->hw;
4152 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4153 struct req_que *req = ha->req_q_map[0];
4154 struct rsp_que *rsp = ha->rsp_q_map[0];
4052 4155
4053 if (!ha->parent) 4156 if (!vha->vp_idx)
4054 return -EINVAL; 4157 return -EINVAL;
4055 4158
4056 rval = qla2x00_fw_ready(ha->parent); 4159 rval = qla2x00_fw_ready(base_vha);
4057 if (rval == QLA_SUCCESS) { 4160 if (rval == QLA_SUCCESS) {
4058 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4161 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
4059 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL); 4162 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4060 } 4163 }
4061 4164
4062 ha->flags.management_server_logged_in = 0; 4165 vha->flags.management_server_logged_in = 0;
4063 4166
4064 /* Login to SNS first */ 4167 /* Login to SNS first */
4065 qla24xx_login_fabric(ha->parent, NPH_SNS, 0xff, 0xff, 0xfc, 4168 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1);
4066 mb, BIT_1);
4067 if (mb[0] != MBS_COMMAND_COMPLETE) { 4169 if (mb[0] != MBS_COMMAND_COMPLETE) {
4068 DEBUG15(qla_printk(KERN_INFO, ha, 4170 DEBUG15(qla_printk(KERN_INFO, ha,
4069 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x " 4171 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
@@ -4072,11 +4174,11 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
4072 return (QLA_FUNCTION_FAILED); 4174 return (QLA_FUNCTION_FAILED);
4073 } 4175 }
4074 4176
4075 atomic_set(&ha->loop_down_timer, 0); 4177 atomic_set(&vha->loop_down_timer, 0);
4076 atomic_set(&ha->loop_state, LOOP_UP); 4178 atomic_set(&vha->loop_state, LOOP_UP);
4077 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 4179 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4078 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 4180 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4079 rval = qla2x00_loop_resync(ha->parent); 4181 rval = qla2x00_loop_resync(base_vha);
4080 4182
4081 return rval; 4183 return rval;
4082} 4184}
@@ -4087,9 +4189,10 @@ static LIST_HEAD(qla_cs84xx_list);
4087static DEFINE_MUTEX(qla_cs84xx_mutex); 4189static DEFINE_MUTEX(qla_cs84xx_mutex);
4088 4190
4089static struct qla_chip_state_84xx * 4191static struct qla_chip_state_84xx *
4090qla84xx_get_chip(struct scsi_qla_host *ha) 4192qla84xx_get_chip(struct scsi_qla_host *vha)
4091{ 4193{
4092 struct qla_chip_state_84xx *cs84xx; 4194 struct qla_chip_state_84xx *cs84xx;
4195 struct qla_hw_data *ha = vha->hw;
4093 4196
4094 mutex_lock(&qla_cs84xx_mutex); 4197 mutex_lock(&qla_cs84xx_mutex);
4095 4198
@@ -4129,21 +4232,23 @@ __qla84xx_chip_release(struct kref *kref)
4129} 4232}
4130 4233
4131void 4234void
4132qla84xx_put_chip(struct scsi_qla_host *ha) 4235qla84xx_put_chip(struct scsi_qla_host *vha)
4133{ 4236{
4237 struct qla_hw_data *ha = vha->hw;
4134 if (ha->cs84xx) 4238 if (ha->cs84xx)
4135 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release); 4239 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
4136} 4240}
4137 4241
4138static int 4242static int
4139qla84xx_init_chip(scsi_qla_host_t *ha) 4243qla84xx_init_chip(scsi_qla_host_t *vha)
4140{ 4244{
4141 int rval; 4245 int rval;
4142 uint16_t status[2]; 4246 uint16_t status[2];
4247 struct qla_hw_data *ha = vha->hw;
4143 4248
4144 mutex_lock(&ha->cs84xx->fw_update_mutex); 4249 mutex_lock(&ha->cs84xx->fw_update_mutex);
4145 4250
4146 rval = qla84xx_verify_chip(ha, status); 4251 rval = qla84xx_verify_chip(vha, status);
4147 4252
4148 mutex_unlock(&ha->cs84xx->fw_update_mutex); 4253 mutex_unlock(&ha->cs84xx->fw_update_mutex);
4149 4254
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index e90afad120ee..5e0a7095c9f2 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -32,47 +32,15 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr)
32} 32}
33 33
34static inline void 34static inline void
35qla2x00_poll(scsi_qla_host_t *ha) 35qla2x00_poll(struct rsp_que *rsp)
36{ 36{
37 unsigned long flags; 37 unsigned long flags;
38 38 struct qla_hw_data *ha = rsp->hw;
39 local_irq_save(flags); 39 local_irq_save(flags);
40 ha->isp_ops->intr_handler(0, ha); 40 ha->isp_ops->intr_handler(0, rsp);
41 local_irq_restore(flags); 41 local_irq_restore(flags);
42} 42}
43 43
44static __inline__ scsi_qla_host_t *
45to_qla_parent(scsi_qla_host_t *ha)
46{
47 return ha->parent ? ha->parent : ha;
48}
49
50/**
51 * qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
52 * @ha: HA context
53 * @ha_locked: is function called with the hardware lock
54 *
55 * Returns non-zero if a failure occurred, else zero.
56 */
57static inline int
58qla2x00_issue_marker(scsi_qla_host_t *ha, int ha_locked)
59{
60 /* Send marker if required */
61 if (ha->marker_needed != 0) {
62 if (ha_locked) {
63 if (__qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
64 QLA_SUCCESS)
65 return (QLA_FUNCTION_FAILED);
66 } else {
67 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) !=
68 QLA_SUCCESS)
69 return (QLA_FUNCTION_FAILED);
70 }
71 ha->marker_needed = 0;
72 }
73 return (QLA_SUCCESS);
74}
75
76static inline uint8_t * 44static inline uint8_t *
77host_to_fcp_swap(uint8_t *fcp, uint32_t bsize) 45host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
78{ 46{
@@ -87,11 +55,12 @@ host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
87} 55}
88 56
89static inline int 57static inline int
90qla2x00_is_reserved_id(scsi_qla_host_t *ha, uint16_t loop_id) 58qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
91{ 59{
60 struct qla_hw_data *ha = vha->hw;
92 if (IS_FWI2_CAPABLE(ha)) 61 if (IS_FWI2_CAPABLE(ha))
93 return (loop_id > NPH_LAST_HANDLE); 62 return (loop_id > NPH_LAST_HANDLE);
94 63
95 return ((loop_id > ha->last_loop_id && loop_id < SNS_FIRST_LOOP_ID) || 64 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
96 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST); 65 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
97}; 66}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 85bc0a48598b..5bedc9d05942 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,9 @@
11 11
12#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
13 13
14static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); 14static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15static void qla2x00_isp_cmd(scsi_qla_host_t *ha); 15 struct rsp_que *rsp);
16static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
16 17
17/** 18/**
18 * qla2x00_get_cmd_direction() - Determine control_flag data direction. 19 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -30,11 +31,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
30 /* Set transfer direction */ 31 /* Set transfer direction */
31 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE; 33 cflags = CF_WRITE;
33 sp->fcport->ha->qla_stats.output_bytes += 34 sp->fcport->vha->hw->qla_stats.output_bytes +=
34 scsi_bufflen(sp->cmd); 35 scsi_bufflen(sp->cmd);
35 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 36 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ; 37 cflags = CF_READ;
37 sp->fcport->ha->qla_stats.input_bytes += 38 sp->fcport->vha->hw->qla_stats.input_bytes +=
38 scsi_bufflen(sp->cmd); 39 scsi_bufflen(sp->cmd);
39 } 40 }
40 return (cflags); 41 return (cflags);
@@ -91,20 +92,19 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
91 * Returns a pointer to the Continuation Type 0 IOCB packet. 92 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */ 93 */
93static inline cont_entry_t * 94static inline cont_entry_t *
94qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha) 95qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
95{ 96{
96 cont_entry_t *cont_pkt; 97 cont_entry_t *cont_pkt;
97
98 /* Adjust ring index. */ 98 /* Adjust ring index. */
99 ha->req_ring_index++; 99 req->ring_index++;
100 if (ha->req_ring_index == ha->request_q_length) { 100 if (req->ring_index == req->length) {
101 ha->req_ring_index = 0; 101 req->ring_index = 0;
102 ha->request_ring_ptr = ha->request_ring; 102 req->ring_ptr = req->ring;
103 } else { 103 } else {
104 ha->request_ring_ptr++; 104 req->ring_ptr++;
105 } 105 }
106 106
107 cont_pkt = (cont_entry_t *)ha->request_ring_ptr; 107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108 108
109 /* Load packet defaults. */ 109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = 110 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -120,20 +120,20 @@ qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
120 * Returns a pointer to the continuation type 1 IOCB packet. 120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */ 121 */
122static inline cont_a64_entry_t * 122static inline cont_a64_entry_t *
123qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha) 123qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
124{ 124{
125 cont_a64_entry_t *cont_pkt; 125 cont_a64_entry_t *cont_pkt;
126 126
127 /* Adjust ring index. */ 127 /* Adjust ring index. */
128 ha->req_ring_index++; 128 req->ring_index++;
129 if (ha->req_ring_index == ha->request_q_length) { 129 if (req->ring_index == req->length) {
130 ha->req_ring_index = 0; 130 req->ring_index = 0;
131 ha->request_ring_ptr = ha->request_ring; 131 req->ring_ptr = req->ring;
132 } else { 132 } else {
133 ha->request_ring_ptr++; 133 req->ring_ptr++;
134 } 134 }
135 135
136 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr; 136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 137
138 /* Load packet defaults. */ 138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = 139 *((uint32_t *)(&cont_pkt->entry_type)) =
@@ -155,10 +155,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155{ 155{
156 uint16_t avail_dsds; 156 uint16_t avail_dsds;
157 uint32_t *cur_dsd; 157 uint32_t *cur_dsd;
158 scsi_qla_host_t *ha; 158 scsi_qla_host_t *vha;
159 struct scsi_cmnd *cmd; 159 struct scsi_cmnd *cmd;
160 struct scatterlist *sg; 160 struct scatterlist *sg;
161 int i; 161 int i;
162 struct req_que *req;
162 163
163 cmd = sp->cmd; 164 cmd = sp->cmd;
164 165
@@ -172,7 +173,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
172 return; 173 return;
173 } 174 }
174 175
175 ha = sp->ha; 176 vha = sp->vha;
177 req = sp->que;
176 178
177 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 179 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
178 180
@@ -190,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
190 * Seven DSDs are available in the Continuation 192 * Seven DSDs are available in the Continuation
191 * Type 0 IOCB. 193 * Type 0 IOCB.
192 */ 194 */
193 cont_pkt = qla2x00_prep_cont_type0_iocb(ha); 195 cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
194 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; 196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
195 avail_dsds = 7; 197 avail_dsds = 7;
196 } 198 }
@@ -214,10 +216,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
214{ 216{
215 uint16_t avail_dsds; 217 uint16_t avail_dsds;
216 uint32_t *cur_dsd; 218 uint32_t *cur_dsd;
217 scsi_qla_host_t *ha; 219 scsi_qla_host_t *vha;
218 struct scsi_cmnd *cmd; 220 struct scsi_cmnd *cmd;
219 struct scatterlist *sg; 221 struct scatterlist *sg;
220 int i; 222 int i;
223 struct req_que *req;
221 224
222 cmd = sp->cmd; 225 cmd = sp->cmd;
223 226
@@ -231,7 +234,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
231 return; 234 return;
232 } 235 }
233 236
234 ha = sp->ha; 237 vha = sp->vha;
238 req = sp->que;
235 239
236 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); 240 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
237 241
@@ -250,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 * Five DSDs are available in the Continuation 254 * Five DSDs are available in the Continuation
251 * Type 1 IOCB. 255 * Type 1 IOCB.
252 */ 256 */
253 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 257 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
254 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 258 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
255 avail_dsds = 5; 259 avail_dsds = 5;
256 } 260 }
@@ -274,7 +278,7 @@ qla2x00_start_scsi(srb_t *sp)
274{ 278{
275 int ret, nseg; 279 int ret, nseg;
276 unsigned long flags; 280 unsigned long flags;
277 scsi_qla_host_t *ha; 281 scsi_qla_host_t *vha;
278 struct scsi_cmnd *cmd; 282 struct scsi_cmnd *cmd;
279 uint32_t *clr_ptr; 283 uint32_t *clr_ptr;
280 uint32_t index; 284 uint32_t index;
@@ -284,33 +288,39 @@ qla2x00_start_scsi(srb_t *sp)
284 uint16_t req_cnt; 288 uint16_t req_cnt;
285 uint16_t tot_dsds; 289 uint16_t tot_dsds;
286 struct device_reg_2xxx __iomem *reg; 290 struct device_reg_2xxx __iomem *reg;
291 struct qla_hw_data *ha;
292 struct req_que *req;
293 struct rsp_que *rsp;
287 294
288 /* Setup device pointers. */ 295 /* Setup device pointers. */
289 ret = 0; 296 ret = 0;
290 ha = sp->ha; 297 vha = sp->vha;
298 ha = vha->hw;
291 reg = &ha->iobase->isp; 299 reg = &ha->iobase->isp;
292 cmd = sp->cmd; 300 cmd = sp->cmd;
301 req = ha->req_q_map[0];
302 rsp = ha->rsp_q_map[0];
293 /* So we know we haven't pci_map'ed anything yet */ 303 /* So we know we haven't pci_map'ed anything yet */
294 tot_dsds = 0; 304 tot_dsds = 0;
295 305
296 /* Send marker if required */ 306 /* Send marker if required */
297 if (ha->marker_needed != 0) { 307 if (vha->marker_needed != 0) {
298 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 308 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
309 != QLA_SUCCESS)
299 return (QLA_FUNCTION_FAILED); 310 return (QLA_FUNCTION_FAILED);
300 } 311 vha->marker_needed = 0;
301 ha->marker_needed = 0;
302 } 312 }
303 313
304 /* Acquire ring specific lock */ 314 /* Acquire ring specific lock */
305 spin_lock_irqsave(&ha->hardware_lock, flags); 315 spin_lock_irqsave(&ha->hardware_lock, flags);
306 316
307 /* Check for room in outstanding command list. */ 317 /* Check for room in outstanding command list. */
308 handle = ha->current_outstanding_cmd; 318 handle = req->current_outstanding_cmd;
309 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 319 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
310 handle++; 320 handle++;
311 if (handle == MAX_OUTSTANDING_COMMANDS) 321 if (handle == MAX_OUTSTANDING_COMMANDS)
312 handle = 1; 322 handle = 1;
313 if (!ha->outstanding_cmds[handle]) 323 if (!req->outstanding_cmds[handle])
314 break; 324 break;
315 } 325 }
316 if (index == MAX_OUTSTANDING_COMMANDS) 326 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -329,25 +339,26 @@ qla2x00_start_scsi(srb_t *sp)
329 339
330 /* Calculate the number of request entries needed. */ 340 /* Calculate the number of request entries needed. */
331 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); 341 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
332 if (ha->req_q_cnt < (req_cnt + 2)) { 342 if (req->cnt < (req_cnt + 2)) {
333 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); 343 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
334 if (ha->req_ring_index < cnt) 344 if (req->ring_index < cnt)
335 ha->req_q_cnt = cnt - ha->req_ring_index; 345 req->cnt = cnt - req->ring_index;
336 else 346 else
337 ha->req_q_cnt = ha->request_q_length - 347 req->cnt = req->length -
338 (ha->req_ring_index - cnt); 348 (req->ring_index - cnt);
339 } 349 }
340 if (ha->req_q_cnt < (req_cnt + 2)) 350 if (req->cnt < (req_cnt + 2))
341 goto queuing_error; 351 goto queuing_error;
342 352
343 /* Build command packet */ 353 /* Build command packet */
344 ha->current_outstanding_cmd = handle; 354 req->current_outstanding_cmd = handle;
345 ha->outstanding_cmds[handle] = sp; 355 req->outstanding_cmds[handle] = sp;
346 sp->ha = ha; 356 sp->vha = vha;
357 sp->que = req;
347 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 358 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
348 ha->req_q_cnt -= req_cnt; 359 req->cnt -= req_cnt;
349 360
350 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr; 361 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
351 cmd_pkt->handle = handle; 362 cmd_pkt->handle = handle;
352 /* Zero out remaining portion of packet. */ 363 /* Zero out remaining portion of packet. */
353 clr_ptr = (uint32_t *)cmd_pkt + 2; 364 clr_ptr = (uint32_t *)cmd_pkt + 2;
@@ -373,23 +384,23 @@ qla2x00_start_scsi(srb_t *sp)
373 wmb(); 384 wmb();
374 385
375 /* Adjust ring index. */ 386 /* Adjust ring index. */
376 ha->req_ring_index++; 387 req->ring_index++;
377 if (ha->req_ring_index == ha->request_q_length) { 388 if (req->ring_index == req->length) {
378 ha->req_ring_index = 0; 389 req->ring_index = 0;
379 ha->request_ring_ptr = ha->request_ring; 390 req->ring_ptr = req->ring;
380 } else 391 } else
381 ha->request_ring_ptr++; 392 req->ring_ptr++;
382 393
383 sp->flags |= SRB_DMA_VALID; 394 sp->flags |= SRB_DMA_VALID;
384 395
385 /* Set chip new ring index. */ 396 /* Set chip new ring index. */
386 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); 397 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
387 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ 398 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
388 399
389 /* Manage unprocessed RIO/ZIO commands in response queue. */ 400 /* Manage unprocessed RIO/ZIO commands in response queue. */
390 if (ha->flags.process_response_queue && 401 if (vha->flags.process_response_queue &&
391 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 402 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
392 qla2x00_process_response_queue(ha); 403 qla2x00_process_response_queue(rsp);
393 404
394 spin_unlock_irqrestore(&ha->hardware_lock, flags); 405 spin_unlock_irqrestore(&ha->hardware_lock, flags);
395 return (QLA_SUCCESS); 406 return (QLA_SUCCESS);
@@ -415,18 +426,20 @@ queuing_error:
415 * Returns non-zero if a failure occurred, else zero. 426 * Returns non-zero if a failure occurred, else zero.
416 */ 427 */
417int 428int
418__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 429__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
419 uint8_t type) 430 struct rsp_que *rsp, uint16_t loop_id,
431 uint16_t lun, uint8_t type)
420{ 432{
421 mrk_entry_t *mrk; 433 mrk_entry_t *mrk;
422 struct mrk_entry_24xx *mrk24; 434 struct mrk_entry_24xx *mrk24;
423 scsi_qla_host_t *pha = to_qla_parent(ha); 435 struct qla_hw_data *ha = vha->hw;
436 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
424 437
425 mrk24 = NULL; 438 mrk24 = NULL;
426 mrk = (mrk_entry_t *)qla2x00_req_pkt(pha); 439 mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
427 if (mrk == NULL) { 440 if (mrk == NULL) {
428 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 441 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
429 __func__, ha->host_no)); 442 __func__, base_vha->host_no));
430 443
431 return (QLA_FUNCTION_FAILED); 444 return (QLA_FUNCTION_FAILED);
432 } 445 }
@@ -440,7 +453,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
440 mrk24->lun[1] = LSB(lun); 453 mrk24->lun[1] = LSB(lun);
441 mrk24->lun[2] = MSB(lun); 454 mrk24->lun[2] = MSB(lun);
442 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 455 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
443 mrk24->vp_index = ha->vp_idx; 456 mrk24->vp_index = vha->vp_idx;
444 } else { 457 } else {
445 SET_TARGET_ID(ha, mrk->target, loop_id); 458 SET_TARGET_ID(ha, mrk->target, loop_id);
446 mrk->lun = cpu_to_le16(lun); 459 mrk->lun = cpu_to_le16(lun);
@@ -448,22 +461,22 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
448 } 461 }
449 wmb(); 462 wmb();
450 463
451 qla2x00_isp_cmd(pha); 464 qla2x00_isp_cmd(vha, req);
452 465
453 return (QLA_SUCCESS); 466 return (QLA_SUCCESS);
454} 467}
455 468
456int 469int
457qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, 470qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
458 uint8_t type) 471 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
472 uint8_t type)
459{ 473{
460 int ret; 474 int ret;
461 unsigned long flags = 0; 475 unsigned long flags = 0;
462 scsi_qla_host_t *pha = to_qla_parent(ha);
463 476
464 spin_lock_irqsave(&pha->hardware_lock, flags); 477 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
465 ret = __qla2x00_marker(ha, loop_id, lun, type); 478 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
466 spin_unlock_irqrestore(&pha->hardware_lock, flags); 479 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
467 480
468 return (ret); 481 return (ret);
469} 482}
@@ -477,9 +490,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
477 * Returns NULL if function failed, else, a pointer to the request packet. 490 * Returns NULL if function failed, else, a pointer to the request packet.
478 */ 491 */
479static request_t * 492static request_t *
480qla2x00_req_pkt(scsi_qla_host_t *ha) 493qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
494 struct rsp_que *rsp)
481{ 495{
482 device_reg_t __iomem *reg = ha->iobase; 496 struct qla_hw_data *ha = vha->hw;
497 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
483 request_t *pkt = NULL; 498 request_t *pkt = NULL;
484 uint16_t cnt; 499 uint16_t cnt;
485 uint32_t *dword_ptr; 500 uint32_t *dword_ptr;
@@ -488,24 +503,29 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
488 503
489 /* Wait 1 second for slot. */ 504 /* Wait 1 second for slot. */
490 for (timer = HZ; timer; timer--) { 505 for (timer = HZ; timer; timer--) {
491 if ((req_cnt + 2) >= ha->req_q_cnt) { 506 if ((req_cnt + 2) >= req->cnt) {
492 /* Calculate number of free request entries. */ 507 /* Calculate number of free request entries. */
493 if (IS_FWI2_CAPABLE(ha)) 508 if (ha->mqenable)
494 cnt = (uint16_t)RD_REG_DWORD( 509 cnt = (uint16_t)
495 &reg->isp24.req_q_out); 510 RD_REG_DWORD(&reg->isp25mq.req_q_out);
496 else 511 else {
497 cnt = qla2x00_debounce_register( 512 if (IS_FWI2_CAPABLE(ha))
498 ISP_REQ_Q_OUT(ha, &reg->isp)); 513 cnt = (uint16_t)RD_REG_DWORD(
499 if (ha->req_ring_index < cnt) 514 &reg->isp24.req_q_out);
500 ha->req_q_cnt = cnt - ha->req_ring_index; 515 else
516 cnt = qla2x00_debounce_register(
517 ISP_REQ_Q_OUT(ha, &reg->isp));
518 }
519 if (req->ring_index < cnt)
520 req->cnt = cnt - req->ring_index;
501 else 521 else
502 ha->req_q_cnt = ha->request_q_length - 522 req->cnt = req->length -
503 (ha->req_ring_index - cnt); 523 (req->ring_index - cnt);
504 } 524 }
505 /* If room for request in request ring. */ 525 /* If room for request in request ring. */
506 if ((req_cnt + 2) < ha->req_q_cnt) { 526 if ((req_cnt + 2) < req->cnt) {
507 ha->req_q_cnt--; 527 req->cnt--;
508 pkt = ha->request_ring_ptr; 528 pkt = req->ring_ptr;
509 529
510 /* Zero out packet. */ 530 /* Zero out packet. */
511 dword_ptr = (uint32_t *)pkt; 531 dword_ptr = (uint32_t *)pkt;
@@ -513,7 +533,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
513 *dword_ptr++ = 0; 533 *dword_ptr++ = 0;
514 534
515 /* Set system defined field. */ 535 /* Set system defined field. */
516 pkt->sys_define = (uint8_t)ha->req_ring_index; 536 pkt->sys_define = (uint8_t)req->ring_index;
517 537
518 /* Set entry count. */ 538 /* Set entry count. */
519 pkt->entry_count = 1; 539 pkt->entry_count = 1;
@@ -522,15 +542,14 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
522 } 542 }
523 543
524 /* Release ring specific lock */ 544 /* Release ring specific lock */
525 spin_unlock(&ha->hardware_lock); 545 spin_unlock_irq(&ha->hardware_lock);
526 546
527 udelay(2); /* 2 us */ 547 udelay(2); /* 2 us */
528 548
529 /* Check for pending interrupts. */ 549 /* Check for pending interrupts. */
530 /* During init we issue marker directly */ 550 /* During init we issue marker directly */
531 if (!ha->marker_needed && !ha->flags.init_done) 551 if (!vha->marker_needed && !vha->flags.init_done)
532 qla2x00_poll(ha); 552 qla2x00_poll(rsp);
533
534 spin_lock_irq(&ha->hardware_lock); 553 spin_lock_irq(&ha->hardware_lock);
535 } 554 }
536 if (!pkt) { 555 if (!pkt) {
@@ -547,29 +566,38 @@ qla2x00_req_pkt(scsi_qla_host_t *ha)
547 * Note: The caller must hold the hardware lock before calling this routine. 566 * Note: The caller must hold the hardware lock before calling this routine.
548 */ 567 */
549static void 568static void
550qla2x00_isp_cmd(scsi_qla_host_t *ha) 569qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
551{ 570{
552 device_reg_t __iomem *reg = ha->iobase; 571 struct qla_hw_data *ha = vha->hw;
572 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
573 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
553 574
554 DEBUG5(printk("%s(): IOCB data:\n", __func__)); 575 DEBUG5(printk("%s(): IOCB data:\n", __func__));
555 DEBUG5(qla2x00_dump_buffer( 576 DEBUG5(qla2x00_dump_buffer(
556 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE)); 577 (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
557 578
558 /* Adjust ring index. */ 579 /* Adjust ring index. */
559 ha->req_ring_index++; 580 req->ring_index++;
560 if (ha->req_ring_index == ha->request_q_length) { 581 if (req->ring_index == req->length) {
561 ha->req_ring_index = 0; 582 req->ring_index = 0;
562 ha->request_ring_ptr = ha->request_ring; 583 req->ring_ptr = req->ring;
563 } else 584 } else
564 ha->request_ring_ptr++; 585 req->ring_ptr++;
565 586
566 /* Set chip new ring index. */ 587 /* Set chip new ring index. */
567 if (IS_FWI2_CAPABLE(ha)) { 588 if (ha->mqenable) {
568 WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index); 589 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
569 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 590 RD_REG_DWORD(&ioreg->hccr);
570 } else { 591 }
571 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index); 592 else {
572 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp)); 593 if (IS_FWI2_CAPABLE(ha)) {
594 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
595 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
596 } else {
597 WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
598 req->ring_index);
599 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
600 }
573 } 601 }
574 602
575} 603}
@@ -610,10 +638,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
610{ 638{
611 uint16_t avail_dsds; 639 uint16_t avail_dsds;
612 uint32_t *cur_dsd; 640 uint32_t *cur_dsd;
613 scsi_qla_host_t *ha; 641 scsi_qla_host_t *vha;
614 struct scsi_cmnd *cmd; 642 struct scsi_cmnd *cmd;
615 struct scatterlist *sg; 643 struct scatterlist *sg;
616 int i; 644 int i;
645 struct req_que *req;
617 646
618 cmd = sp->cmd; 647 cmd = sp->cmd;
619 648
@@ -627,18 +656,19 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
627 return; 656 return;
628 } 657 }
629 658
630 ha = sp->ha; 659 vha = sp->vha;
660 req = sp->que;
631 661
632 /* Set transfer direction */ 662 /* Set transfer direction */
633 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 663 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634 cmd_pkt->task_mgmt_flags = 664 cmd_pkt->task_mgmt_flags =
635 __constant_cpu_to_le16(TMF_WRITE_DATA); 665 __constant_cpu_to_le16(TMF_WRITE_DATA);
636 sp->fcport->ha->qla_stats.output_bytes += 666 sp->fcport->vha->hw->qla_stats.output_bytes +=
637 scsi_bufflen(sp->cmd); 667 scsi_bufflen(sp->cmd);
638 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 668 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639 cmd_pkt->task_mgmt_flags = 669 cmd_pkt->task_mgmt_flags =
640 __constant_cpu_to_le16(TMF_READ_DATA); 670 __constant_cpu_to_le16(TMF_READ_DATA);
641 sp->fcport->ha->qla_stats.input_bytes += 671 sp->fcport->vha->hw->qla_stats.input_bytes +=
642 scsi_bufflen(sp->cmd); 672 scsi_bufflen(sp->cmd);
643 } 673 }
644 674
@@ -658,7 +688,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
658 * Five DSDs are available in the Continuation 688 * Five DSDs are available in the Continuation
659 * Type 1 IOCB. 689 * Type 1 IOCB.
660 */ 690 */
661 cont_pkt = qla2x00_prep_cont_type1_iocb(ha); 691 cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
662 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; 692 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
663 avail_dsds = 5; 693 avail_dsds = 5;
664 } 694 }
@@ -683,8 +713,6 @@ qla24xx_start_scsi(srb_t *sp)
683{ 713{
684 int ret, nseg; 714 int ret, nseg;
685 unsigned long flags; 715 unsigned long flags;
686 scsi_qla_host_t *ha, *pha;
687 struct scsi_cmnd *cmd;
688 uint32_t *clr_ptr; 716 uint32_t *clr_ptr;
689 uint32_t index; 717 uint32_t index;
690 uint32_t handle; 718 uint32_t handle;
@@ -692,35 +720,45 @@ qla24xx_start_scsi(srb_t *sp)
692 uint16_t cnt; 720 uint16_t cnt;
693 uint16_t req_cnt; 721 uint16_t req_cnt;
694 uint16_t tot_dsds; 722 uint16_t tot_dsds;
695 struct device_reg_24xx __iomem *reg; 723 struct req_que *req = NULL;
724 struct rsp_que *rsp = NULL;
725 struct scsi_cmnd *cmd = sp->cmd;
726 struct scsi_qla_host *vha = sp->vha;
727 struct qla_hw_data *ha = vha->hw;
728 uint16_t que_id;
696 729
697 /* Setup device pointers. */ 730 /* Setup device pointers. */
698 ret = 0; 731 ret = 0;
699 ha = sp->ha; 732 que_id = vha->req_ques[0];
700 pha = to_qla_parent(ha); 733
701 reg = &ha->iobase->isp24; 734 req = ha->req_q_map[que_id];
702 cmd = sp->cmd; 735 sp->que = req;
736
737 if (req->rsp)
738 rsp = req->rsp;
739 else
740 rsp = ha->rsp_q_map[que_id];
703 /* So we know we haven't pci_map'ed anything yet */ 741 /* So we know we haven't pci_map'ed anything yet */
704 tot_dsds = 0; 742 tot_dsds = 0;
705 743
706 /* Send marker if required */ 744 /* Send marker if required */
707 if (ha->marker_needed != 0) { 745 if (vha->marker_needed != 0) {
708 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { 746 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
747 != QLA_SUCCESS)
709 return QLA_FUNCTION_FAILED; 748 return QLA_FUNCTION_FAILED;
710 } 749 vha->marker_needed = 0;
711 ha->marker_needed = 0;
712 } 750 }
713 751
714 /* Acquire ring specific lock */ 752 /* Acquire ring specific lock */
715 spin_lock_irqsave(&pha->hardware_lock, flags); 753 spin_lock_irqsave(&ha->hardware_lock, flags);
716 754
717 /* Check for room in outstanding command list. */ 755 /* Check for room in outstanding command list. */
718 handle = ha->current_outstanding_cmd; 756 handle = req->current_outstanding_cmd;
719 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 757 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
720 handle++; 758 handle++;
721 if (handle == MAX_OUTSTANDING_COMMANDS) 759 if (handle == MAX_OUTSTANDING_COMMANDS)
722 handle = 1; 760 handle = 1;
723 if (!ha->outstanding_cmds[handle]) 761 if (!req->outstanding_cmds[handle])
724 break; 762 break;
725 } 763 }
726 if (index == MAX_OUTSTANDING_COMMANDS) 764 if (index == MAX_OUTSTANDING_COMMANDS)
@@ -738,25 +776,26 @@ qla24xx_start_scsi(srb_t *sp)
738 tot_dsds = nseg; 776 tot_dsds = nseg;
739 777
740 req_cnt = qla24xx_calc_iocbs(tot_dsds); 778 req_cnt = qla24xx_calc_iocbs(tot_dsds);
741 if (ha->req_q_cnt < (req_cnt + 2)) { 779 if (req->cnt < (req_cnt + 2)) {
742 cnt = (uint16_t)RD_REG_DWORD_RELAXED(&reg->req_q_out); 780 cnt = ha->isp_ops->rd_req_reg(ha, req->id);
743 if (ha->req_ring_index < cnt) 781
744 ha->req_q_cnt = cnt - ha->req_ring_index; 782 if (req->ring_index < cnt)
783 req->cnt = cnt - req->ring_index;
745 else 784 else
746 ha->req_q_cnt = ha->request_q_length - 785 req->cnt = req->length -
747 (ha->req_ring_index - cnt); 786 (req->ring_index - cnt);
748 } 787 }
749 if (ha->req_q_cnt < (req_cnt + 2)) 788 if (req->cnt < (req_cnt + 2))
750 goto queuing_error; 789 goto queuing_error;
751 790
752 /* Build command packet. */ 791 /* Build command packet. */
753 ha->current_outstanding_cmd = handle; 792 req->current_outstanding_cmd = handle;
754 ha->outstanding_cmds[handle] = sp; 793 req->outstanding_cmds[handle] = sp;
755 sp->ha = ha; 794 sp->vha = vha;
756 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 795 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
757 ha->req_q_cnt -= req_cnt; 796 req->cnt -= req_cnt;
758 797
759 cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; 798 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
760 cmd_pkt->handle = handle; 799 cmd_pkt->handle = handle;
761 800
762 /* Zero out remaining portion of packet. */ 801 /* Zero out remaining portion of packet. */
@@ -789,32 +828,63 @@ qla24xx_start_scsi(srb_t *sp)
789 wmb(); 828 wmb();
790 829
791 /* Adjust ring index. */ 830 /* Adjust ring index. */
792 ha->req_ring_index++; 831 req->ring_index++;
793 if (ha->req_ring_index == ha->request_q_length) { 832 if (req->ring_index == req->length) {
794 ha->req_ring_index = 0; 833 req->ring_index = 0;
795 ha->request_ring_ptr = ha->request_ring; 834 req->ring_ptr = req->ring;
796 } else 835 } else
797 ha->request_ring_ptr++; 836 req->ring_ptr++;
798 837
799 sp->flags |= SRB_DMA_VALID; 838 sp->flags |= SRB_DMA_VALID;
800 839
801 /* Set chip new ring index. */ 840 /* Set chip new ring index. */
802 WRT_REG_DWORD(&reg->req_q_in, ha->req_ring_index); 841 ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
803 RD_REG_DWORD_RELAXED(&reg->req_q_in); /* PCI Posting. */
804 842
805 /* Manage unprocessed RIO/ZIO commands in response queue. */ 843 /* Manage unprocessed RIO/ZIO commands in response queue. */
806 if (ha->flags.process_response_queue && 844 if (vha->flags.process_response_queue &&
807 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 845 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
808 qla24xx_process_response_queue(ha); 846 qla24xx_process_response_queue(rsp);
809 847
810 spin_unlock_irqrestore(&pha->hardware_lock, flags); 848 spin_unlock_irqrestore(&ha->hardware_lock, flags);
811 return QLA_SUCCESS; 849 return QLA_SUCCESS;
812 850
813queuing_error: 851queuing_error:
814 if (tot_dsds) 852 if (tot_dsds)
815 scsi_dma_unmap(cmd); 853 scsi_dma_unmap(cmd);
816 854
817 spin_unlock_irqrestore(&pha->hardware_lock, flags); 855 spin_unlock_irqrestore(&ha->hardware_lock, flags);
818 856
819 return QLA_FUNCTION_FAILED; 857 return QLA_FUNCTION_FAILED;
820} 858}
859
860uint16_t
861qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
862{
863 device_reg_t __iomem *reg = (void *) ha->iobase;
864 return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
865}
866
867uint16_t
868qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
869{
870 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
871 return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
872}
873
874void
875qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
876{
877 device_reg_t __iomem *reg = (void *) ha->iobase;
878 WRT_REG_DWORD(&reg->isp24.req_q_in, index);
879 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
880}
881
882void
883qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
884{
885 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
886 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
887 WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
888 RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
889}
890
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a76efd99d007..d5fb79a88001 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -10,10 +10,13 @@
10#include <scsi/scsi_tcq.h> 10#include <scsi/scsi_tcq.h>
11 11
12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 12static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); 13static void qla2x00_process_completed_request(struct scsi_qla_host *,
14static void qla2x00_status_entry(scsi_qla_host_t *, void *); 14 struct req_que *, uint32_t);
15static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 17static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
18 sts_entry_t *);
19static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
17 20
18/** 21/**
19 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -27,24 +30,28 @@ static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
27irqreturn_t 30irqreturn_t
28qla2100_intr_handler(int irq, void *dev_id) 31qla2100_intr_handler(int irq, void *dev_id)
29{ 32{
30 scsi_qla_host_t *ha; 33 scsi_qla_host_t *vha;
34 struct qla_hw_data *ha;
31 struct device_reg_2xxx __iomem *reg; 35 struct device_reg_2xxx __iomem *reg;
32 int status; 36 int status;
33 unsigned long iter; 37 unsigned long iter;
34 uint16_t hccr; 38 uint16_t hccr;
35 uint16_t mb[4]; 39 uint16_t mb[4];
40 struct rsp_que *rsp;
36 41
37 ha = (scsi_qla_host_t *) dev_id; 42 rsp = (struct rsp_que *) dev_id;
38 if (!ha) { 43 if (!rsp) {
39 printk(KERN_INFO 44 printk(KERN_INFO
40 "%s(): NULL host pointer\n", __func__); 45 "%s(): NULL response queue pointer\n", __func__);
41 return (IRQ_NONE); 46 return (IRQ_NONE);
42 } 47 }
43 48
49 ha = rsp->hw;
44 reg = &ha->iobase->isp; 50 reg = &ha->iobase->isp;
45 status = 0; 51 status = 0;
46 52
47 spin_lock(&ha->hardware_lock); 53 spin_lock(&ha->hardware_lock);
54 vha = qla2x00_get_rsp_host(rsp);
48 for (iter = 50; iter--; ) { 55 for (iter = 50; iter--; ) {
49 hccr = RD_REG_WORD(&reg->hccr); 56 hccr = RD_REG_WORD(&reg->hccr);
50 if (hccr & HCCR_RISC_PAUSE) { 57 if (hccr & HCCR_RISC_PAUSE) {
@@ -59,8 +66,8 @@ qla2100_intr_handler(int irq, void *dev_id)
59 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 66 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 RD_REG_WORD(&reg->hccr); 67 RD_REG_WORD(&reg->hccr);
61 68
62 ha->isp_ops->fw_dump(ha, 1); 69 ha->isp_ops->fw_dump(vha, 1);
63 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 70 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
64 break; 71 break;
65 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0) 72 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 break; 73 break;
@@ -72,24 +79,24 @@ qla2100_intr_handler(int irq, void *dev_id)
72 /* Get mailbox data. */ 79 /* Get mailbox data. */
73 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 80 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 81 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 qla2x00_mbx_completion(ha, mb[0]); 82 qla2x00_mbx_completion(vha, mb[0]);
76 status |= MBX_INTERRUPT; 83 status |= MBX_INTERRUPT;
77 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 84 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 85 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 86 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 87 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 qla2x00_async_event(ha, mb); 88 qla2x00_async_event(vha, rsp, mb);
82 } else { 89 } else {
83 /*EMPTY*/ 90 /*EMPTY*/
84 DEBUG2(printk("scsi(%ld): Unrecognized " 91 DEBUG2(printk("scsi(%ld): Unrecognized "
85 "interrupt type (%d).\n", 92 "interrupt type (%d).\n",
86 ha->host_no, mb[0])); 93 vha->host_no, mb[0]));
87 } 94 }
88 /* Release mailbox registers. */ 95 /* Release mailbox registers. */
89 WRT_REG_WORD(&reg->semaphore, 0); 96 WRT_REG_WORD(&reg->semaphore, 0);
90 RD_REG_WORD(&reg->semaphore); 97 RD_REG_WORD(&reg->semaphore);
91 } else { 98 } else {
92 qla2x00_process_response_queue(ha); 99 qla2x00_process_response_queue(rsp);
93 100
94 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 101 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 RD_REG_WORD(&reg->hccr); 102 RD_REG_WORD(&reg->hccr);
@@ -118,25 +125,29 @@ qla2100_intr_handler(int irq, void *dev_id)
118irqreturn_t 125irqreturn_t
119qla2300_intr_handler(int irq, void *dev_id) 126qla2300_intr_handler(int irq, void *dev_id)
120{ 127{
121 scsi_qla_host_t *ha; 128 scsi_qla_host_t *vha;
122 struct device_reg_2xxx __iomem *reg; 129 struct device_reg_2xxx __iomem *reg;
123 int status; 130 int status;
124 unsigned long iter; 131 unsigned long iter;
125 uint32_t stat; 132 uint32_t stat;
126 uint16_t hccr; 133 uint16_t hccr;
127 uint16_t mb[4]; 134 uint16_t mb[4];
135 struct rsp_que *rsp;
136 struct qla_hw_data *ha;
128 137
129 ha = (scsi_qla_host_t *) dev_id; 138 rsp = (struct rsp_que *) dev_id;
130 if (!ha) { 139 if (!rsp) {
131 printk(KERN_INFO 140 printk(KERN_INFO
132 "%s(): NULL host pointer\n", __func__); 141 "%s(): NULL response queue pointer\n", __func__);
133 return (IRQ_NONE); 142 return (IRQ_NONE);
134 } 143 }
135 144
145 ha = rsp->hw;
136 reg = &ha->iobase->isp; 146 reg = &ha->iobase->isp;
137 status = 0; 147 status = 0;
138 148
139 spin_lock(&ha->hardware_lock); 149 spin_lock(&ha->hardware_lock);
150 vha = qla2x00_get_rsp_host(rsp);
140 for (iter = 50; iter--; ) { 151 for (iter = 50; iter--; ) {
141 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 152 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 if (stat & HSR_RISC_PAUSED) { 153 if (stat & HSR_RISC_PAUSED) {
@@ -159,8 +170,8 @@ qla2300_intr_handler(int irq, void *dev_id)
159 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC); 170 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 RD_REG_WORD(&reg->hccr); 171 RD_REG_WORD(&reg->hccr);
161 172
162 ha->isp_ops->fw_dump(ha, 1); 173 ha->isp_ops->fw_dump(vha, 1);
163 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
164 break; 175 break;
165 } else if ((stat & HSR_RISC_INT) == 0) 176 } else if ((stat & HSR_RISC_INT) == 0)
166 break; 177 break;
@@ -170,7 +181,7 @@ qla2300_intr_handler(int irq, void *dev_id)
170 case 0x2: 181 case 0x2:
171 case 0x10: 182 case 0x10:
172 case 0x11: 183 case 0x11:
173 qla2x00_mbx_completion(ha, MSW(stat)); 184 qla2x00_mbx_completion(vha, MSW(stat));
174 status |= MBX_INTERRUPT; 185 status |= MBX_INTERRUPT;
175 186
176 /* Release mailbox registers. */ 187 /* Release mailbox registers. */
@@ -181,26 +192,26 @@ qla2300_intr_handler(int irq, void *dev_id)
181 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 qla2x00_async_event(ha, mb); 195 qla2x00_async_event(vha, rsp, mb);
185 break; 196 break;
186 case 0x13: 197 case 0x13:
187 qla2x00_process_response_queue(ha); 198 qla2x00_process_response_queue(rsp);
188 break; 199 break;
189 case 0x15: 200 case 0x15:
190 mb[0] = MBA_CMPLT_1_16BIT; 201 mb[0] = MBA_CMPLT_1_16BIT;
191 mb[1] = MSW(stat); 202 mb[1] = MSW(stat);
192 qla2x00_async_event(ha, mb); 203 qla2x00_async_event(vha, rsp, mb);
193 break; 204 break;
194 case 0x16: 205 case 0x16:
195 mb[0] = MBA_SCSI_COMPLETION; 206 mb[0] = MBA_SCSI_COMPLETION;
196 mb[1] = MSW(stat); 207 mb[1] = MSW(stat);
197 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 qla2x00_async_event(ha, mb); 209 qla2x00_async_event(vha, rsp, mb);
199 break; 210 break;
200 default: 211 default:
201 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 "(%d).\n", 213 "(%d).\n",
203 ha->host_no, stat & 0xff)); 214 vha->host_no, stat & 0xff));
204 break; 215 break;
205 } 216 }
206 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT); 217 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
@@ -223,10 +234,11 @@ qla2300_intr_handler(int irq, void *dev_id)
223 * @mb0: Mailbox0 register 234 * @mb0: Mailbox0 register
224 */ 235 */
225static void 236static void
226qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 237qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
227{ 238{
228 uint16_t cnt; 239 uint16_t cnt;
229 uint16_t __iomem *wptr; 240 uint16_t __iomem *wptr;
241 struct qla_hw_data *ha = vha->hw;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 243
232 /* Load return mailbox registers. */ 244 /* Load return mailbox registers. */
@@ -247,10 +259,10 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
247 259
248 if (ha->mcp) { 260 if (ha->mcp) {
249 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 __func__, ha->host_no, ha->mcp->mb[0])); 262 __func__, vha->host_no, ha->mcp->mb[0]));
251 } else { 263 } else {
252 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 __func__, ha->host_no)); 265 __func__, vha->host_no));
254 } 266 }
255} 267}
256 268
@@ -260,7 +272,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
260 * @mb: Mailbox registers (0 - 3) 272 * @mb: Mailbox registers (0 - 3)
261 */ 273 */
262void 274void
263qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 275qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
264{ 276{
265#define LS_UNKNOWN 2 277#define LS_UNKNOWN 2
266 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 278 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
@@ -268,6 +280,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
268 uint16_t handle_cnt; 280 uint16_t handle_cnt;
269 uint16_t cnt; 281 uint16_t cnt;
270 uint32_t handles[5]; 282 uint32_t handles[5];
283 struct qla_hw_data *ha = vha->hw;
271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 284 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 uint32_t rscn_entry, host_pid; 285 uint32_t rscn_entry, host_pid;
273 uint8_t rscn_queue_index; 286 uint8_t rscn_queue_index;
@@ -329,17 +342,19 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
329 342
330 switch (mb[0]) { 343 switch (mb[0]) {
331 case MBA_SCSI_COMPLETION: /* Fast Post */ 344 case MBA_SCSI_COMPLETION: /* Fast Post */
332 if (!ha->flags.online) 345 if (!vha->flags.online)
333 break; 346 break;
334 347
335 for (cnt = 0; cnt < handle_cnt; cnt++) 348 for (cnt = 0; cnt < handle_cnt; cnt++)
336 qla2x00_process_completed_request(ha, handles[cnt]); 349 qla2x00_process_completed_request(vha, rsp->req,
350 handles[cnt]);
337 break; 351 break;
338 352
339 case MBA_RESET: /* Reset */ 353 case MBA_RESET: /* Reset */
340 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); 354 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
355 vha->host_no));
341 356
342 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 357 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
343 break; 358 break;
344 359
345 case MBA_SYSTEM_ERR: /* System Error */ 360 case MBA_SYSTEM_ERR: /* System Error */
@@ -347,70 +362,70 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
347 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 362 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
348 mb[1], mb[2], mb[3]); 363 mb[1], mb[2], mb[3]);
349 364
350 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 365 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
351 ha->isp_ops->fw_dump(ha, 1); 366 ha->isp_ops->fw_dump(vha, 1);
352 367
353 if (IS_FWI2_CAPABLE(ha)) { 368 if (IS_FWI2_CAPABLE(ha)) {
354 if (mb[1] == 0 && mb[2] == 0) { 369 if (mb[1] == 0 && mb[2] == 0) {
355 qla_printk(KERN_ERR, ha, 370 qla_printk(KERN_ERR, ha,
356 "Unrecoverable Hardware Error: adapter " 371 "Unrecoverable Hardware Error: adapter "
357 "marked OFFLINE!\n"); 372 "marked OFFLINE!\n");
358 ha->flags.online = 0; 373 vha->flags.online = 0;
359 } else 374 } else
360 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 375 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
361 } else if (mb[1] == 0) { 376 } else if (mb[1] == 0) {
362 qla_printk(KERN_INFO, ha, 377 qla_printk(KERN_INFO, ha,
363 "Unrecoverable Hardware Error: adapter marked " 378 "Unrecoverable Hardware Error: adapter marked "
364 "OFFLINE!\n"); 379 "OFFLINE!\n");
365 ha->flags.online = 0; 380 vha->flags.online = 0;
366 } else 381 } else
367 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 382 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break; 383 break;
369 384
370 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 385 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
371 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 386 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
372 ha->host_no)); 387 vha->host_no));
373 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 388 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
374 389
375 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 390 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
376 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
377 break; 392 break;
378 393
379 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 394 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
380 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 395 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
381 ha->host_no)); 396 vha->host_no));
382 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 397 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
383 398
384 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]); 399 qla2x00_post_hwe_work(vha, mb[0], mb[1], mb[2], mb[3]);
385 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 400 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
386 break; 401 break;
387 402
388 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 403 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
389 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 404 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
390 ha->host_no)); 405 vha->host_no));
391 break; 406 break;
392 407
393 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 408 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
394 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", ha->host_no, 409 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no,
395 mb[1])); 410 mb[1]));
396 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); 411 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]);
397 412
398 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 413 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
399 atomic_set(&ha->loop_state, LOOP_DOWN); 414 atomic_set(&vha->loop_state, LOOP_DOWN);
400 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 415 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
401 qla2x00_mark_all_devices_lost(ha, 1); 416 qla2x00_mark_all_devices_lost(vha, 1);
402 } 417 }
403 418
404 if (ha->parent) { 419 if (vha->vp_idx) {
405 atomic_set(&ha->vp_state, VP_FAILED); 420 atomic_set(&vha->vp_state, VP_FAILED);
406 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 421 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
407 } 422 }
408 423
409 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 424 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
410 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 425 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
411 426
412 ha->flags.management_server_logged_in = 0; 427 vha->flags.management_server_logged_in = 0;
413 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]); 428 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
414 break; 429 break;
415 430
416 case MBA_LOOP_UP: /* Loop Up Event */ 431 case MBA_LOOP_UP: /* Loop Up Event */
@@ -425,59 +440,59 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
425 } 440 }
426 441
427 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 442 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
428 ha->host_no, link_speed)); 443 vha->host_no, link_speed));
429 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", 444 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
430 link_speed); 445 link_speed);
431 446
432 ha->flags.management_server_logged_in = 0; 447 vha->flags.management_server_logged_in = 0;
433 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate); 448 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
434 break; 449 break;
435 450
436 case MBA_LOOP_DOWN: /* Loop Down Event */ 451 case MBA_LOOP_DOWN: /* Loop Down Event */
437 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 452 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
438 "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3])); 453 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3]));
439 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", 454 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
440 mb[1], mb[2], mb[3]); 455 mb[1], mb[2], mb[3]);
441 456
442 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
443 atomic_set(&ha->loop_state, LOOP_DOWN); 458 atomic_set(&vha->loop_state, LOOP_DOWN);
444 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 459 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
445 ha->device_flags |= DFLG_NO_CABLE; 460 vha->device_flags |= DFLG_NO_CABLE;
446 qla2x00_mark_all_devices_lost(ha, 1); 461 qla2x00_mark_all_devices_lost(vha, 1);
447 } 462 }
448 463
449 if (ha->parent) { 464 if (vha->vp_idx) {
450 atomic_set(&ha->vp_state, VP_FAILED); 465 atomic_set(&vha->vp_state, VP_FAILED);
451 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 466 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
452 } 467 }
453 468
454 ha->flags.management_server_logged_in = 0; 469 vha->flags.management_server_logged_in = 0;
455 ha->link_data_rate = PORT_SPEED_UNKNOWN; 470 ha->link_data_rate = PORT_SPEED_UNKNOWN;
456 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0); 471 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
457 break; 472 break;
458 473
459 case MBA_LIP_RESET: /* LIP reset occurred */ 474 case MBA_LIP_RESET: /* LIP reset occurred */
460 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 475 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 ha->host_no, mb[1])); 476 vha->host_no, mb[1]));
462 qla_printk(KERN_INFO, ha, 477 qla_printk(KERN_INFO, ha,
463 "LIP reset occurred (%x).\n", mb[1]); 478 "LIP reset occurred (%x).\n", mb[1]);
464 479
465 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 480 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
466 atomic_set(&ha->loop_state, LOOP_DOWN); 481 atomic_set(&vha->loop_state, LOOP_DOWN);
467 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 482 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
468 qla2x00_mark_all_devices_lost(ha, 1); 483 qla2x00_mark_all_devices_lost(vha, 1);
469 } 484 }
470 485
471 if (ha->parent) { 486 if (vha->vp_idx) {
472 atomic_set(&ha->vp_state, VP_FAILED); 487 atomic_set(&vha->vp_state, VP_FAILED);
473 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 488 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
474 } 489 }
475 490
476 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 491 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
477 492
478 ha->operating_mode = LOOP; 493 ha->operating_mode = LOOP;
479 ha->flags.management_server_logged_in = 0; 494 vha->flags.management_server_logged_in = 0;
480 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]); 495 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
481 break; 496 break;
482 497
483 case MBA_POINT_TO_POINT: /* Point-to-Point */ 498 case MBA_POINT_TO_POINT: /* Point-to-Point */
@@ -485,33 +500,33 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
485 break; 500 break;
486 501
487 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 502 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
488 ha->host_no)); 503 vha->host_no));
489 504
490 /* 505 /*
491 * Until there's a transition from loop down to loop up, treat 506 * Until there's a transition from loop down to loop up, treat
492 * this as loop down only. 507 * this as loop down only.
493 */ 508 */
494 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 509 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
495 atomic_set(&ha->loop_state, LOOP_DOWN); 510 atomic_set(&vha->loop_state, LOOP_DOWN);
496 if (!atomic_read(&ha->loop_down_timer)) 511 if (!atomic_read(&vha->loop_down_timer))
497 atomic_set(&ha->loop_down_timer, 512 atomic_set(&vha->loop_down_timer,
498 LOOP_DOWN_TIME); 513 LOOP_DOWN_TIME);
499 qla2x00_mark_all_devices_lost(ha, 1); 514 qla2x00_mark_all_devices_lost(vha, 1);
500 } 515 }
501 516
502 if (ha->parent) { 517 if (vha->vp_idx) {
503 atomic_set(&ha->vp_state, VP_FAILED); 518 atomic_set(&vha->vp_state, VP_FAILED);
504 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 519 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
505 } 520 }
506 521
507 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 522 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
508 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 523 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
509 } 524
510 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 525 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
511 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 526 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
512 527
513 ha->flags.gpsc_supported = 1; 528 ha->flags.gpsc_supported = 1;
514 ha->flags.management_server_logged_in = 0; 529 vha->flags.management_server_logged_in = 0;
515 break; 530 break;
516 531
517 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 532 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
@@ -520,134 +535,137 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
520 535
521 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 536 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
522 "received.\n", 537 "received.\n",
523 ha->host_no)); 538 vha->host_no));
524 qla_printk(KERN_INFO, ha, 539 qla_printk(KERN_INFO, ha,
525 "Configuration change detected: value=%x.\n", mb[1]); 540 "Configuration change detected: value=%x.\n", mb[1]);
526 541
527 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 542 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
528 atomic_set(&ha->loop_state, LOOP_DOWN); 543 atomic_set(&vha->loop_state, LOOP_DOWN);
529 if (!atomic_read(&ha->loop_down_timer)) 544 if (!atomic_read(&vha->loop_down_timer))
530 atomic_set(&ha->loop_down_timer, 545 atomic_set(&vha->loop_down_timer,
531 LOOP_DOWN_TIME); 546 LOOP_DOWN_TIME);
532 qla2x00_mark_all_devices_lost(ha, 1); 547 qla2x00_mark_all_devices_lost(vha, 1);
533 } 548 }
534 549
535 if (ha->parent) { 550 if (vha->vp_idx) {
536 atomic_set(&ha->vp_state, VP_FAILED); 551 atomic_set(&vha->vp_state, VP_FAILED);
537 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 552 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
538 } 553 }
539 554
540 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 555 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
541 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 556 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
542 break; 557 break;
543 558
544 case MBA_PORT_UPDATE: /* Port database update */ 559 case MBA_PORT_UPDATE: /* Port database update */
560 /* Only handle SCNs for our Vport index. */
561 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
562 break;
563
545 /* 564 /*
546 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 565 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
547 * event etc. earlier indicating loop is down) then process 566 * event etc. earlier indicating loop is down) then process
548 * it. Otherwise ignore it and Wait for RSCN to come in. 567 * it. Otherwise ignore it and Wait for RSCN to come in.
549 */ 568 */
550 atomic_set(&ha->loop_down_timer, 0); 569 atomic_set(&vha->loop_down_timer, 0);
551 if (atomic_read(&ha->loop_state) != LOOP_DOWN && 570 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
552 atomic_read(&ha->loop_state) != LOOP_DEAD) { 571 atomic_read(&vha->loop_state) != LOOP_DEAD) {
553 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 572 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
554 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], 573 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1],
555 mb[2], mb[3])); 574 mb[2], mb[3]));
556 break; 575 break;
557 } 576 }
558 577
559 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 578 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
560 ha->host_no)); 579 vha->host_no));
561 DEBUG(printk(KERN_INFO 580 DEBUG(printk(KERN_INFO
562 "scsi(%ld): Port database changed %04x %04x %04x.\n", 581 "scsi(%ld): Port database changed %04x %04x %04x.\n",
563 ha->host_no, mb[1], mb[2], mb[3])); 582 vha->host_no, mb[1], mb[2], mb[3]));
564 583
565 /* 584 /*
566 * Mark all devices as missing so we will login again. 585 * Mark all devices as missing so we will login again.
567 */ 586 */
568 atomic_set(&ha->loop_state, LOOP_UP); 587 atomic_set(&vha->loop_state, LOOP_UP);
569 588
570 qla2x00_mark_all_devices_lost(ha, 1); 589 qla2x00_mark_all_devices_lost(vha, 1);
571 590
572 ha->flags.rscn_queue_overflow = 1; 591 vha->flags.rscn_queue_overflow = 1;
573 592
574 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 593 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
575 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 594 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
576 break; 595 break;
577 596
578 case MBA_RSCN_UPDATE: /* State Change Registration */ 597 case MBA_RSCN_UPDATE: /* State Change Registration */
579 /* Check if the Vport has issued a SCR */ 598 /* Check if the Vport has issued a SCR */
580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) 599 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
581 break; 600 break;
582 /* Only handle SCNs for our Vport index. */ 601 /* Only handle SCNs for our Vport index. */
583 if (ha->parent && ha->vp_idx != (mb[3] & 0xff)) 602 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
584 break; 603 break;
585
586 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 604 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
587 ha->host_no)); 605 vha->host_no));
588 DEBUG(printk(KERN_INFO 606 DEBUG(printk(KERN_INFO
589 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 607 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 ha->host_no, mb[1], mb[2], mb[3])); 608 vha->host_no, mb[1], mb[2], mb[3]));
591 609
592 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 610 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
593 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 611 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
594 ha->d_id.b.al_pa; 612 | vha->d_id.b.al_pa;
595 if (rscn_entry == host_pid) { 613 if (rscn_entry == host_pid) {
596 DEBUG(printk(KERN_INFO 614 DEBUG(printk(KERN_INFO
597 "scsi(%ld): Ignoring RSCN update to local host " 615 "scsi(%ld): Ignoring RSCN update to local host "
598 "port ID (%06x)\n", 616 "port ID (%06x)\n",
599 ha->host_no, host_pid)); 617 vha->host_no, host_pid));
600 break; 618 break;
601 } 619 }
602 620
603 /* Ignore reserved bits from RSCN-payload. */ 621 /* Ignore reserved bits from RSCN-payload. */
604 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 622 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
605 rscn_queue_index = ha->rscn_in_ptr + 1; 623 rscn_queue_index = vha->rscn_in_ptr + 1;
606 if (rscn_queue_index == MAX_RSCN_COUNT) 624 if (rscn_queue_index == MAX_RSCN_COUNT)
607 rscn_queue_index = 0; 625 rscn_queue_index = 0;
608 if (rscn_queue_index != ha->rscn_out_ptr) { 626 if (rscn_queue_index != vha->rscn_out_ptr) {
609 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; 627 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
610 ha->rscn_in_ptr = rscn_queue_index; 628 vha->rscn_in_ptr = rscn_queue_index;
611 } else { 629 } else {
612 ha->flags.rscn_queue_overflow = 1; 630 vha->flags.rscn_queue_overflow = 1;
613 } 631 }
614 632
615 atomic_set(&ha->loop_state, LOOP_UPDATE); 633 atomic_set(&vha->loop_state, LOOP_UPDATE);
616 atomic_set(&ha->loop_down_timer, 0); 634 atomic_set(&vha->loop_down_timer, 0);
617 ha->flags.management_server_logged_in = 0; 635 vha->flags.management_server_logged_in = 0;
618 636
619 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 637 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
620 set_bit(RSCN_UPDATE, &ha->dpc_flags); 638 set_bit(RSCN_UPDATE, &vha->dpc_flags);
621 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry); 639 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
622 break; 640 break;
623 641
624 /* case MBA_RIO_RESPONSE: */ 642 /* case MBA_RIO_RESPONSE: */
625 case MBA_ZIO_RESPONSE: 643 case MBA_ZIO_RESPONSE:
626 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
627 ha->host_no)); 645 vha->host_no));
628 DEBUG(printk(KERN_INFO 646 DEBUG(printk(KERN_INFO
629 "scsi(%ld): [R|Z]IO update completion.\n", 647 "scsi(%ld): [R|Z]IO update completion.\n",
630 ha->host_no)); 648 vha->host_no));
631 649
632 if (IS_FWI2_CAPABLE(ha)) 650 if (IS_FWI2_CAPABLE(ha))
633 qla24xx_process_response_queue(ha); 651 qla24xx_process_response_queue(rsp);
634 else 652 else
635 qla2x00_process_response_queue(ha); 653 qla2x00_process_response_queue(rsp);
636 break; 654 break;
637 655
638 case MBA_DISCARD_RND_FRAME: 656 case MBA_DISCARD_RND_FRAME:
639 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 657 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
640 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 658 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3]));
641 break; 659 break;
642 660
643 case MBA_TRACE_NOTIFICATION: 661 case MBA_TRACE_NOTIFICATION:
644 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 662 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
645 ha->host_no, mb[1], mb[2])); 663 vha->host_no, mb[1], mb[2]));
646 break; 664 break;
647 665
648 case MBA_ISP84XX_ALERT: 666 case MBA_ISP84XX_ALERT:
649 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 667 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
650 "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3])); 668 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3]));
651 669
652 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 670 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
653 switch (mb[1]) { 671 switch (mb[1]) {
@@ -682,16 +700,22 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
682 break; 700 break;
683 } 701 }
684 702
685 if (!ha->parent && ha->num_vhosts) 703 if (!vha->vp_idx && ha->num_vhosts)
686 qla2x00_alert_all_vps(ha, mb); 704 qla2x00_alert_all_vps(rsp, mb);
687} 705}
688 706
689static void 707static void
690qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 708qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
691{ 709{
692 fc_port_t *fcport = data; 710 fc_port_t *fcport = data;
711 struct scsi_qla_host *vha = fcport->vha;
712 struct qla_hw_data *ha = vha->hw;
713 struct req_que *req = NULL;
693 714
694 if (fcport->ha->max_q_depth <= sdev->queue_depth) 715 req = ha->req_q_map[vha->req_ques[0]];
716 if (!req)
717 return;
718 if (req->max_q_depth <= sdev->queue_depth)
695 return; 719 return;
696 720
697 if (sdev->ordered_tags) 721 if (sdev->ordered_tags)
@@ -703,9 +727,9 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
703 727
704 fcport->last_ramp_up = jiffies; 728 fcport->last_ramp_up = jiffies;
705 729
706 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 730 DEBUG2(qla_printk(KERN_INFO, ha,
707 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 731 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
708 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 732 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
709 sdev->queue_depth)); 733 sdev->queue_depth));
710} 734}
711 735
@@ -717,20 +741,21 @@ qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
717 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) 741 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
718 return; 742 return;
719 743
720 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 744 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
721 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 745 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
722 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 746 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
723 sdev->queue_depth)); 747 sdev->queue_depth));
724} 748}
725 749
726static inline void 750static inline void
727qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) 751qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
752 srb_t *sp)
728{ 753{
729 fc_port_t *fcport; 754 fc_port_t *fcport;
730 struct scsi_device *sdev; 755 struct scsi_device *sdev;
731 756
732 sdev = sp->cmd->device; 757 sdev = sp->cmd->device;
733 if (sdev->queue_depth >= ha->max_q_depth) 758 if (sdev->queue_depth >= req->max_q_depth)
734 return; 759 return;
735 760
736 fcport = sp->fcport; 761 fcport = sp->fcport;
@@ -751,25 +776,27 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
751 * @index: SRB index 776 * @index: SRB index
752 */ 777 */
753static void 778static void
754qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) 779qla2x00_process_completed_request(struct scsi_qla_host *vha,
780 struct req_que *req, uint32_t index)
755{ 781{
756 srb_t *sp; 782 srb_t *sp;
783 struct qla_hw_data *ha = vha->hw;
757 784
758 /* Validate handle. */ 785 /* Validate handle. */
759 if (index >= MAX_OUTSTANDING_COMMANDS) { 786 if (index >= MAX_OUTSTANDING_COMMANDS) {
760 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 787 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
761 ha->host_no, index)); 788 vha->host_no, index));
762 qla_printk(KERN_WARNING, ha, 789 qla_printk(KERN_WARNING, ha,
763 "Invalid SCSI completion handle %d.\n", index); 790 "Invalid SCSI completion handle %d.\n", index);
764 791
765 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 792 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
766 return; 793 return;
767 } 794 }
768 795
769 sp = ha->outstanding_cmds[index]; 796 sp = req->outstanding_cmds[index];
770 if (sp) { 797 if (sp) {
771 /* Free outstanding command slot. */ 798 /* Free outstanding command slot. */
772 ha->outstanding_cmds[index] = NULL; 799 req->outstanding_cmds[index] = NULL;
773 800
774 CMD_COMPL_STATUS(sp->cmd) = 0L; 801 CMD_COMPL_STATUS(sp->cmd) = 0L;
775 CMD_SCSI_STATUS(sp->cmd) = 0L; 802 CMD_SCSI_STATUS(sp->cmd) = 0L;
@@ -777,15 +804,15 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
777 /* Save ISP completion status */ 804 /* Save ISP completion status */
778 sp->cmd->result = DID_OK << 16; 805 sp->cmd->result = DID_OK << 16;
779 806
780 qla2x00_ramp_up_queue_depth(ha, sp); 807 qla2x00_ramp_up_queue_depth(vha, req, sp);
781 qla2x00_sp_compl(ha, sp); 808 qla2x00_sp_compl(ha, sp);
782 } else { 809 } else {
783 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 810 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
784 ha->host_no)); 811 vha->host_no));
785 qla_printk(KERN_WARNING, ha, 812 qla_printk(KERN_WARNING, ha,
786 "Invalid ISP SCSI completion handle\n"); 813 "Invalid ISP SCSI completion handle\n");
787 814
788 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 815 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
789 } 816 }
790} 817}
791 818
@@ -794,32 +821,36 @@ qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
794 * @ha: SCSI driver HA context 821 * @ha: SCSI driver HA context
795 */ 822 */
796void 823void
797qla2x00_process_response_queue(struct scsi_qla_host *ha) 824qla2x00_process_response_queue(struct rsp_que *rsp)
798{ 825{
826 struct scsi_qla_host *vha;
827 struct qla_hw_data *ha = rsp->hw;
799 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 828 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
800 sts_entry_t *pkt; 829 sts_entry_t *pkt;
801 uint16_t handle_cnt; 830 uint16_t handle_cnt;
802 uint16_t cnt; 831 uint16_t cnt;
803 832
804 if (!ha->flags.online) 833 vha = qla2x00_get_rsp_host(rsp);
834
835 if (!vha->flags.online)
805 return; 836 return;
806 837
807 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 838 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
808 pkt = (sts_entry_t *)ha->response_ring_ptr; 839 pkt = (sts_entry_t *)rsp->ring_ptr;
809 840
810 ha->rsp_ring_index++; 841 rsp->ring_index++;
811 if (ha->rsp_ring_index == ha->response_q_length) { 842 if (rsp->ring_index == rsp->length) {
812 ha->rsp_ring_index = 0; 843 rsp->ring_index = 0;
813 ha->response_ring_ptr = ha->response_ring; 844 rsp->ring_ptr = rsp->ring;
814 } else { 845 } else {
815 ha->response_ring_ptr++; 846 rsp->ring_ptr++;
816 } 847 }
817 848
818 if (pkt->entry_status != 0) { 849 if (pkt->entry_status != 0) {
819 DEBUG3(printk(KERN_INFO 850 DEBUG3(printk(KERN_INFO
820 "scsi(%ld): Process error entry.\n", ha->host_no)); 851 "scsi(%ld): Process error entry.\n", vha->host_no));
821 852
822 qla2x00_error_entry(ha, pkt); 853 qla2x00_error_entry(vha, rsp, pkt);
823 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 854 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
824 wmb(); 855 wmb();
825 continue; 856 continue;
@@ -827,31 +858,31 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
827 858
828 switch (pkt->entry_type) { 859 switch (pkt->entry_type) {
829 case STATUS_TYPE: 860 case STATUS_TYPE:
830 qla2x00_status_entry(ha, pkt); 861 qla2x00_status_entry(vha, rsp, pkt);
831 break; 862 break;
832 case STATUS_TYPE_21: 863 case STATUS_TYPE_21:
833 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 864 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
834 for (cnt = 0; cnt < handle_cnt; cnt++) { 865 for (cnt = 0; cnt < handle_cnt; cnt++) {
835 qla2x00_process_completed_request(ha, 866 qla2x00_process_completed_request(vha, rsp->req,
836 ((sts21_entry_t *)pkt)->handle[cnt]); 867 ((sts21_entry_t *)pkt)->handle[cnt]);
837 } 868 }
838 break; 869 break;
839 case STATUS_TYPE_22: 870 case STATUS_TYPE_22:
840 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 871 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
841 for (cnt = 0; cnt < handle_cnt; cnt++) { 872 for (cnt = 0; cnt < handle_cnt; cnt++) {
842 qla2x00_process_completed_request(ha, 873 qla2x00_process_completed_request(vha, rsp->req,
843 ((sts22_entry_t *)pkt)->handle[cnt]); 874 ((sts22_entry_t *)pkt)->handle[cnt]);
844 } 875 }
845 break; 876 break;
846 case STATUS_CONT_TYPE: 877 case STATUS_CONT_TYPE:
847 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 878 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
848 break; 879 break;
849 default: 880 default:
850 /* Type Not Supported. */ 881 /* Type Not Supported. */
851 DEBUG4(printk(KERN_WARNING 882 DEBUG4(printk(KERN_WARNING
852 "scsi(%ld): Received unknown response pkt type %x " 883 "scsi(%ld): Received unknown response pkt type %x "
853 "entry status=%x.\n", 884 "entry status=%x.\n",
854 ha->host_no, pkt->entry_type, pkt->entry_status)); 885 vha->host_no, pkt->entry_type, pkt->entry_status));
855 break; 886 break;
856 } 887 }
857 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 888 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -859,7 +890,7 @@ qla2x00_process_response_queue(struct scsi_qla_host *ha)
859 } 890 }
860 891
861 /* Adjust ring index */ 892 /* Adjust ring index */
862 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); 893 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
863} 894}
864 895
865static inline void 896static inline void
@@ -881,10 +912,10 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
881 sp->request_sense_ptr += sense_len; 912 sp->request_sense_ptr += sense_len;
882 sp->request_sense_length -= sense_len; 913 sp->request_sense_length -= sense_len;
883 if (sp->request_sense_length != 0) 914 if (sp->request_sense_length != 0)
884 sp->fcport->ha->status_srb = sp; 915 sp->fcport->vha->status_srb = sp;
885 916
886 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 917 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
887 "cmd=%p pid=%ld\n", __func__, sp->fcport->ha->host_no, 918 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
888 cp->device->channel, cp->device->id, cp->device->lun, cp, 919 cp->device->channel, cp->device->id, cp->device->lun, cp,
889 cp->serial_number)); 920 cp->serial_number));
890 if (sense_len) 921 if (sense_len)
@@ -898,7 +929,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
898 * @pkt: Entry pointer 929 * @pkt: Entry pointer
899 */ 930 */
900static void 931static void
901qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) 932qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
902{ 933{
903 srb_t *sp; 934 srb_t *sp;
904 fc_port_t *fcport; 935 fc_port_t *fcport;
@@ -911,6 +942,8 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
911 int32_t resid; 942 int32_t resid;
912 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 943 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
913 uint8_t *rsp_info, *sense_data; 944 uint8_t *rsp_info, *sense_data;
945 struct qla_hw_data *ha = vha->hw;
946 struct req_que *req = rsp->req;
914 947
915 sts = (sts_entry_t *) pkt; 948 sts = (sts_entry_t *) pkt;
916 sts24 = (struct sts_entry_24xx *) pkt; 949 sts24 = (struct sts_entry_24xx *) pkt;
@@ -924,31 +957,31 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
924 957
925 /* Fast path completion. */ 958 /* Fast path completion. */
926 if (comp_status == CS_COMPLETE && scsi_status == 0) { 959 if (comp_status == CS_COMPLETE && scsi_status == 0) {
927 qla2x00_process_completed_request(ha, sts->handle); 960 qla2x00_process_completed_request(vha, req, sts->handle);
928 961
929 return; 962 return;
930 } 963 }
931 964
932 /* Validate handle. */ 965 /* Validate handle. */
933 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 966 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
934 sp = ha->outstanding_cmds[sts->handle]; 967 sp = req->outstanding_cmds[sts->handle];
935 ha->outstanding_cmds[sts->handle] = NULL; 968 req->outstanding_cmds[sts->handle] = NULL;
936 } else 969 } else
937 sp = NULL; 970 sp = NULL;
938 971
939 if (sp == NULL) { 972 if (sp == NULL) {
940 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 973 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
941 ha->host_no)); 974 vha->host_no));
942 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 975 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
943 976
944 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 977 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
945 qla2xxx_wake_dpc(ha); 978 qla2xxx_wake_dpc(vha);
946 return; 979 return;
947 } 980 }
948 cp = sp->cmd; 981 cp = sp->cmd;
949 if (cp == NULL) { 982 if (cp == NULL) {
950 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 983 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
951 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp)); 984 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
952 qla_printk(KERN_WARNING, ha, 985 qla_printk(KERN_WARNING, ha,
953 "Command is NULL: already returned to OS (sp=%p)\n", sp); 986 "Command is NULL: already returned to OS (sp=%p)\n", sp);
954 987
@@ -987,7 +1020,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
987 if (rsp_info_len > 3 && rsp_info[3]) { 1020 if (rsp_info_len > 3 && rsp_info[3]) {
988 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 1021 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
989 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 1022 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
990 "retrying command\n", ha->host_no, 1023 "retrying command\n", vha->host_no,
991 cp->device->channel, cp->device->id, 1024 cp->device->channel, cp->device->id,
992 cp->device->lun, rsp_info_len, rsp_info[0], 1025 cp->device->lun, rsp_info_len, rsp_info[0],
993 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], 1026 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
@@ -1025,7 +1058,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1025 qla_printk(KERN_INFO, ha, 1058 qla_printk(KERN_INFO, ha,
1026 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1027 "detected (%x of %x bytes)...returning " 1060 "detected (%x of %x bytes)...returning "
1028 "error status.\n", ha->host_no, 1061 "error status.\n", vha->host_no,
1029 cp->device->channel, cp->device->id, 1062 cp->device->channel, cp->device->id,
1030 cp->device->lun, resid, 1063 cp->device->lun, resid,
1031 scsi_bufflen(cp)); 1064 scsi_bufflen(cp));
@@ -1039,7 +1072,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1039 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1072 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1040 DEBUG2(printk(KERN_INFO 1073 DEBUG2(printk(KERN_INFO
1041 "scsi(%ld): QUEUE FULL status detected " 1074 "scsi(%ld): QUEUE FULL status detected "
1042 "0x%x-0x%x.\n", ha->host_no, comp_status, 1075 "0x%x-0x%x.\n", vha->host_no, comp_status,
1043 scsi_status)); 1076 scsi_status));
1044 1077
1045 /* Adjust queue depth for all luns on the port. */ 1078 /* Adjust queue depth for all luns on the port. */
@@ -1078,7 +1111,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1078 DEBUG2(printk(KERN_INFO 1111 DEBUG2(printk(KERN_INFO
1079 "scsi(%ld:%d:%d) UNDERRUN status detected " 1112 "scsi(%ld:%d:%d) UNDERRUN status detected "
1080 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1113 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1081 "os_underflow=0x%x\n", ha->host_no, 1114 "os_underflow=0x%x\n", vha->host_no,
1082 cp->device->id, cp->device->lun, comp_status, 1115 cp->device->id, cp->device->lun, comp_status,
1083 scsi_status, resid_len, resid, cp->cmnd[0], 1116 scsi_status, resid_len, resid, cp->cmnd[0],
1084 cp->underflow)); 1117 cp->underflow));
@@ -1095,7 +1128,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1095 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1128 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1096 DEBUG2(printk(KERN_INFO 1129 DEBUG2(printk(KERN_INFO
1097 "scsi(%ld): QUEUE FULL status detected " 1130 "scsi(%ld): QUEUE FULL status detected "
1098 "0x%x-0x%x.\n", ha->host_no, comp_status, 1131 "0x%x-0x%x.\n", vha->host_no, comp_status,
1099 scsi_status)); 1132 scsi_status));
1100 1133
1101 /* 1134 /*
@@ -1125,10 +1158,10 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1125 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1158 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1126 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1159 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1127 "frame(s) detected (%x of %x bytes)..." 1160 "frame(s) detected (%x of %x bytes)..."
1128 "retrying command.\n", ha->host_no, 1161 "retrying command.\n",
1129 cp->device->channel, cp->device->id, 1162 vha->host_no, cp->device->channel,
1130 cp->device->lun, resid, 1163 cp->device->id, cp->device->lun, resid,
1131 scsi_bufflen(cp))); 1164 scsi_bufflen(cp)));
1132 1165
1133 cp->result = DID_BUS_BUSY << 16; 1166 cp->result = DID_BUS_BUSY << 16;
1134 break; 1167 break;
@@ -1140,7 +1173,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1140 qla_printk(KERN_INFO, ha, 1173 qla_printk(KERN_INFO, ha,
1141 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1174 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1142 "detected (%x of %x bytes)...returning " 1175 "detected (%x of %x bytes)...returning "
1143 "error status.\n", ha->host_no, 1176 "error status.\n", vha->host_no,
1144 cp->device->channel, cp->device->id, 1177 cp->device->channel, cp->device->id,
1145 cp->device->lun, resid, 1178 cp->device->lun, resid,
1146 scsi_bufflen(cp)); 1179 scsi_bufflen(cp));
@@ -1157,7 +1190,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1157 case CS_DATA_OVERRUN: 1190 case CS_DATA_OVERRUN:
1158 DEBUG2(printk(KERN_INFO 1191 DEBUG2(printk(KERN_INFO
1159 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", 1192 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1160 ha->host_no, cp->device->id, cp->device->lun, comp_status, 1193 vha->host_no, cp->device->id, cp->device->lun, comp_status,
1161 scsi_status)); 1194 scsi_status));
1162 DEBUG2(printk(KERN_INFO 1195 DEBUG2(printk(KERN_INFO
1163 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1196 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
@@ -1183,7 +1216,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1183 */ 1216 */
1184 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1217 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1185 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1218 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1186 ha->host_no, cp->device->id, cp->device->lun, 1219 vha->host_no, cp->device->id, cp->device->lun,
1187 cp->serial_number, comp_status, 1220 cp->serial_number, comp_status,
1188 atomic_read(&fcport->state))); 1221 atomic_read(&fcport->state)));
1189 1222
@@ -1194,13 +1227,13 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1194 */ 1227 */
1195 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1228 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1196 if (atomic_read(&fcport->state) == FCS_ONLINE) 1229 if (atomic_read(&fcport->state) == FCS_ONLINE)
1197 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1230 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1198 break; 1231 break;
1199 1232
1200 case CS_RESET: 1233 case CS_RESET:
1201 DEBUG2(printk(KERN_INFO 1234 DEBUG2(printk(KERN_INFO
1202 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1235 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1203 ha->host_no, comp_status, scsi_status)); 1236 vha->host_no, comp_status, scsi_status));
1204 1237
1205 cp->result = DID_RESET << 16; 1238 cp->result = DID_RESET << 16;
1206 break; 1239 break;
@@ -1213,7 +1246,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1213 */ 1246 */
1214 DEBUG2(printk(KERN_INFO 1247 DEBUG2(printk(KERN_INFO
1215 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", 1248 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1216 ha->host_no, comp_status, scsi_status)); 1249 vha->host_no, comp_status, scsi_status));
1217 1250
1218 cp->result = DID_RESET << 16; 1251 cp->result = DID_RESET << 16;
1219 break; 1252 break;
@@ -1229,25 +1262,25 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1229 if (IS_FWI2_CAPABLE(ha)) { 1262 if (IS_FWI2_CAPABLE(ha)) {
1230 DEBUG2(printk(KERN_INFO 1263 DEBUG2(printk(KERN_INFO
1231 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1264 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1232 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1265 "0x%x-0x%x\n", vha->host_no, cp->device->channel,
1233 cp->device->id, cp->device->lun, comp_status, 1266 cp->device->id, cp->device->lun, comp_status,
1234 scsi_status)); 1267 scsi_status));
1235 break; 1268 break;
1236 } 1269 }
1237 DEBUG2(printk(KERN_INFO 1270 DEBUG2(printk(KERN_INFO
1238 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " 1271 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1239 "sflags=%x.\n", ha->host_no, cp->device->channel, 1272 "sflags=%x.\n", vha->host_no, cp->device->channel,
1240 cp->device->id, cp->device->lun, comp_status, scsi_status, 1273 cp->device->id, cp->device->lun, comp_status, scsi_status,
1241 le16_to_cpu(sts->status_flags))); 1274 le16_to_cpu(sts->status_flags)));
1242 1275
1243 /* Check to see if logout occurred. */ 1276 /* Check to see if logout occurred. */
1244 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1277 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1245 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1); 1278 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1246 break; 1279 break;
1247 1280
1248 default: 1281 default:
1249 DEBUG3(printk("scsi(%ld): Error detected (unknown status) " 1282 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1250 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status)); 1283 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status));
1251 qla_printk(KERN_INFO, ha, 1284 qla_printk(KERN_INFO, ha,
1252 "Unknown status detected 0x%x-0x%x.\n", 1285 "Unknown status detected 0x%x-0x%x.\n",
1253 comp_status, scsi_status); 1286 comp_status, scsi_status);
@@ -1257,7 +1290,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1257 } 1290 }
1258 1291
1259 /* Place command on done queue. */ 1292 /* Place command on done queue. */
1260 if (ha->status_srb == NULL) 1293 if (vha->status_srb == NULL)
1261 qla2x00_sp_compl(ha, sp); 1294 qla2x00_sp_compl(ha, sp);
1262} 1295}
1263 1296
@@ -1269,10 +1302,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1269 * Extended sense data. 1302 * Extended sense data.
1270 */ 1303 */
1271static void 1304static void
1272qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) 1305qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
1273{ 1306{
1274 uint8_t sense_sz = 0; 1307 uint8_t sense_sz = 0;
1275 srb_t *sp = ha->status_srb; 1308 struct qla_hw_data *ha = vha->hw;
1309 srb_t *sp = vha->status_srb;
1276 struct scsi_cmnd *cp; 1310 struct scsi_cmnd *cp;
1277 1311
1278 if (sp != NULL && sp->request_sense_length != 0) { 1312 if (sp != NULL && sp->request_sense_length != 0) {
@@ -1284,7 +1318,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1284 "cmd is NULL: already returned to OS (sp=%p)\n", 1318 "cmd is NULL: already returned to OS (sp=%p)\n",
1285 sp); 1319 sp);
1286 1320
1287 ha->status_srb = NULL; 1321 vha->status_srb = NULL;
1288 return; 1322 return;
1289 } 1323 }
1290 1324
@@ -1305,7 +1339,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1305 1339
1306 /* Place command on done queue. */ 1340 /* Place command on done queue. */
1307 if (sp->request_sense_length == 0) { 1341 if (sp->request_sense_length == 0) {
1308 ha->status_srb = NULL; 1342 vha->status_srb = NULL;
1309 qla2x00_sp_compl(ha, sp); 1343 qla2x00_sp_compl(ha, sp);
1310 } 1344 }
1311 } 1345 }
@@ -1317,10 +1351,11 @@ qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1317 * @pkt: Entry pointer 1351 * @pkt: Entry pointer
1318 */ 1352 */
1319static void 1353static void
1320qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) 1354qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1321{ 1355{
1322 srb_t *sp; 1356 srb_t *sp;
1323 1357 struct qla_hw_data *ha = vha->hw;
1358 struct req_que *req = rsp->req;
1324#if defined(QL_DEBUG_LEVEL_2) 1359#if defined(QL_DEBUG_LEVEL_2)
1325 if (pkt->entry_status & RF_INV_E_ORDER) 1360 if (pkt->entry_status & RF_INV_E_ORDER)
1326 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1361 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1339,13 +1374,13 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1339 1374
1340 /* Validate handle. */ 1375 /* Validate handle. */
1341 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1376 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1342 sp = ha->outstanding_cmds[pkt->handle]; 1377 sp = req->outstanding_cmds[pkt->handle];
1343 else 1378 else
1344 sp = NULL; 1379 sp = NULL;
1345 1380
1346 if (sp) { 1381 if (sp) {
1347 /* Free outstanding command slot. */ 1382 /* Free outstanding command slot. */
1348 ha->outstanding_cmds[pkt->handle] = NULL; 1383 req->outstanding_cmds[pkt->handle] = NULL;
1349 1384
1350 /* Bad payload or header */ 1385 /* Bad payload or header */
1351 if (pkt->entry_status & 1386 if (pkt->entry_status &
@@ -1362,12 +1397,12 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1362 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1397 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1363 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1398 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1364 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1399 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1365 ha->host_no)); 1400 vha->host_no));
1366 qla_printk(KERN_WARNING, ha, 1401 qla_printk(KERN_WARNING, ha,
1367 "Error entry - invalid handle\n"); 1402 "Error entry - invalid handle\n");
1368 1403
1369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1404 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1370 qla2xxx_wake_dpc(ha); 1405 qla2xxx_wake_dpc(vha);
1371 } 1406 }
1372} 1407}
1373 1408
@@ -1377,10 +1412,11 @@ qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1377 * @mb0: Mailbox0 register 1412 * @mb0: Mailbox0 register
1378 */ 1413 */
1379static void 1414static void
1380qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 1415qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
1381{ 1416{
1382 uint16_t cnt; 1417 uint16_t cnt;
1383 uint16_t __iomem *wptr; 1418 uint16_t __iomem *wptr;
1419 struct qla_hw_data *ha = vha->hw;
1384 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1420 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1385 1421
1386 /* Load return mailbox registers. */ 1422 /* Load return mailbox registers. */
@@ -1395,10 +1431,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1395 1431
1396 if (ha->mcp) { 1432 if (ha->mcp) {
1397 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1433 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1398 __func__, ha->host_no, ha->mcp->mb[0])); 1434 __func__, vha->host_no, ha->mcp->mb[0]));
1399 } else { 1435 } else {
1400 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1436 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1401 __func__, ha->host_no)); 1437 __func__, vha->host_no));
1402 } 1438 }
1403} 1439}
1404 1440
@@ -1407,30 +1443,33 @@ qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1407 * @ha: SCSI driver HA context 1443 * @ha: SCSI driver HA context
1408 */ 1444 */
1409void 1445void
1410qla24xx_process_response_queue(struct scsi_qla_host *ha) 1446qla24xx_process_response_queue(struct rsp_que *rsp)
1411{ 1447{
1412 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1448 struct qla_hw_data *ha = rsp->hw;
1413 struct sts_entry_24xx *pkt; 1449 struct sts_entry_24xx *pkt;
1450 struct scsi_qla_host *vha;
1451
1452 vha = qla2x00_get_rsp_host(rsp);
1414 1453
1415 if (!ha->flags.online) 1454 if (!vha->flags.online)
1416 return; 1455 return;
1417 1456
1418 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 1457 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1419 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr; 1458 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
1420 1459
1421 ha->rsp_ring_index++; 1460 rsp->ring_index++;
1422 if (ha->rsp_ring_index == ha->response_q_length) { 1461 if (rsp->ring_index == rsp->length) {
1423 ha->rsp_ring_index = 0; 1462 rsp->ring_index = 0;
1424 ha->response_ring_ptr = ha->response_ring; 1463 rsp->ring_ptr = rsp->ring;
1425 } else { 1464 } else {
1426 ha->response_ring_ptr++; 1465 rsp->ring_ptr++;
1427 } 1466 }
1428 1467
1429 if (pkt->entry_status != 0) { 1468 if (pkt->entry_status != 0) {
1430 DEBUG3(printk(KERN_INFO 1469 DEBUG3(printk(KERN_INFO
1431 "scsi(%ld): Process error entry.\n", ha->host_no)); 1470 "scsi(%ld): Process error entry.\n", vha->host_no));
1432 1471
1433 qla2x00_error_entry(ha, (sts_entry_t *) pkt); 1472 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
1434 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1473 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1435 wmb(); 1474 wmb();
1436 continue; 1475 continue;
@@ -1438,13 +1477,13 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1438 1477
1439 switch (pkt->entry_type) { 1478 switch (pkt->entry_type) {
1440 case STATUS_TYPE: 1479 case STATUS_TYPE:
1441 qla2x00_status_entry(ha, pkt); 1480 qla2x00_status_entry(vha, rsp, pkt);
1442 break; 1481 break;
1443 case STATUS_CONT_TYPE: 1482 case STATUS_CONT_TYPE:
1444 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 1483 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
1445 break; 1484 break;
1446 case VP_RPT_ID_IOCB_TYPE: 1485 case VP_RPT_ID_IOCB_TYPE:
1447 qla24xx_report_id_acquisition(ha, 1486 qla24xx_report_id_acquisition(vha,
1448 (struct vp_rpt_id_entry_24xx *)pkt); 1487 (struct vp_rpt_id_entry_24xx *)pkt);
1449 break; 1488 break;
1450 default: 1489 default:
@@ -1452,7 +1491,7 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1452 DEBUG4(printk(KERN_WARNING 1491 DEBUG4(printk(KERN_WARNING
1453 "scsi(%ld): Received unknown response pkt type %x " 1492 "scsi(%ld): Received unknown response pkt type %x "
1454 "entry status=%x.\n", 1493 "entry status=%x.\n",
1455 ha->host_no, pkt->entry_type, pkt->entry_status)); 1494 vha->host_no, pkt->entry_type, pkt->entry_status));
1456 break; 1495 break;
1457 } 1496 }
1458 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1497 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -1460,14 +1499,15 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1460 } 1499 }
1461 1500
1462 /* Adjust ring index */ 1501 /* Adjust ring index */
1463 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index); 1502 ha->isp_ops->wrt_rsp_reg(ha, rsp->id, rsp->ring_index);
1464} 1503}
1465 1504
1466static void 1505static void
1467qla2xxx_check_risc_status(scsi_qla_host_t *ha) 1506qla2xxx_check_risc_status(scsi_qla_host_t *vha)
1468{ 1507{
1469 int rval; 1508 int rval;
1470 uint32_t cnt; 1509 uint32_t cnt;
1510 struct qla_hw_data *ha = vha->hw;
1471 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1511 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1472 1512
1473 if (!IS_QLA25XX(ha)) 1513 if (!IS_QLA25XX(ha))
@@ -1521,25 +1561,29 @@ done:
1521irqreturn_t 1561irqreturn_t
1522qla24xx_intr_handler(int irq, void *dev_id) 1562qla24xx_intr_handler(int irq, void *dev_id)
1523{ 1563{
1524 scsi_qla_host_t *ha; 1564 scsi_qla_host_t *vha;
1565 struct qla_hw_data *ha;
1525 struct device_reg_24xx __iomem *reg; 1566 struct device_reg_24xx __iomem *reg;
1526 int status; 1567 int status;
1527 unsigned long iter; 1568 unsigned long iter;
1528 uint32_t stat; 1569 uint32_t stat;
1529 uint32_t hccr; 1570 uint32_t hccr;
1530 uint16_t mb[4]; 1571 uint16_t mb[4];
1572 struct rsp_que *rsp;
1531 1573
1532 ha = (scsi_qla_host_t *) dev_id; 1574 rsp = (struct rsp_que *) dev_id;
1533 if (!ha) { 1575 if (!rsp) {
1534 printk(KERN_INFO 1576 printk(KERN_INFO
1535 "%s(): NULL host pointer\n", __func__); 1577 "%s(): NULL response queue pointer\n", __func__);
1536 return IRQ_NONE; 1578 return IRQ_NONE;
1537 } 1579 }
1538 1580
1581 ha = rsp->hw;
1539 reg = &ha->iobase->isp24; 1582 reg = &ha->iobase->isp24;
1540 status = 0; 1583 status = 0;
1541 1584
1542 spin_lock(&ha->hardware_lock); 1585 spin_lock(&ha->hardware_lock);
1586 vha = qla2x00_get_rsp_host(rsp);
1543 for (iter = 50; iter--; ) { 1587 for (iter = 50; iter--; ) {
1544 stat = RD_REG_DWORD(&reg->host_status); 1588 stat = RD_REG_DWORD(&reg->host_status);
1545 if (stat & HSRX_RISC_PAUSED) { 1589 if (stat & HSRX_RISC_PAUSED) {
@@ -1547,7 +1591,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1547 break; 1591 break;
1548 1592
1549 if (ha->hw_event_pause_errors == 0) 1593 if (ha->hw_event_pause_errors == 0)
1550 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1594 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1551 0, MSW(stat), LSW(stat)); 1595 0, MSW(stat), LSW(stat));
1552 else if (ha->hw_event_pause_errors < 0xffffffff) 1596 else if (ha->hw_event_pause_errors < 0xffffffff)
1553 ha->hw_event_pause_errors++; 1597 ha->hw_event_pause_errors++;
@@ -1557,10 +1601,10 @@ qla24xx_intr_handler(int irq, void *dev_id)
1557 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1601 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1558 "Dumping firmware!\n", hccr); 1602 "Dumping firmware!\n", hccr);
1559 1603
1560 qla2xxx_check_risc_status(ha); 1604 qla2xxx_check_risc_status(vha);
1561 1605
1562 ha->isp_ops->fw_dump(ha, 1); 1606 ha->isp_ops->fw_dump(vha, 1);
1563 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1607 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1564 break; 1608 break;
1565 } else if ((stat & HSRX_RISC_INT) == 0) 1609 } else if ((stat & HSRX_RISC_INT) == 0)
1566 break; 1610 break;
@@ -1570,7 +1614,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
1570 case 0x2: 1614 case 0x2:
1571 case 0x10: 1615 case 0x10:
1572 case 0x11: 1616 case 0x11:
1573 qla24xx_mbx_completion(ha, MSW(stat)); 1617 qla24xx_mbx_completion(vha, MSW(stat));
1574 status |= MBX_INTERRUPT; 1618 status |= MBX_INTERRUPT;
1575 1619
1576 break; 1620 break;
@@ -1579,15 +1623,16 @@ qla24xx_intr_handler(int irq, void *dev_id)
1579 mb[1] = RD_REG_WORD(&reg->mailbox1); 1623 mb[1] = RD_REG_WORD(&reg->mailbox1);
1580 mb[2] = RD_REG_WORD(&reg->mailbox2); 1624 mb[2] = RD_REG_WORD(&reg->mailbox2);
1581 mb[3] = RD_REG_WORD(&reg->mailbox3); 1625 mb[3] = RD_REG_WORD(&reg->mailbox3);
1582 qla2x00_async_event(ha, mb); 1626 qla2x00_async_event(vha, rsp, mb);
1583 break; 1627 break;
1584 case 0x13: 1628 case 0x13:
1585 qla24xx_process_response_queue(ha); 1629 case 0x14:
1630 qla24xx_process_response_queue(rsp);
1586 break; 1631 break;
1587 default: 1632 default:
1588 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1633 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1589 "(%d).\n", 1634 "(%d).\n",
1590 ha->host_no, stat & 0xff)); 1635 vha->host_no, stat & 0xff));
1591 break; 1636 break;
1592 } 1637 }
1593 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1638 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1607,15 +1652,22 @@ qla24xx_intr_handler(int irq, void *dev_id)
1607static irqreturn_t 1652static irqreturn_t
1608qla24xx_msix_rsp_q(int irq, void *dev_id) 1653qla24xx_msix_rsp_q(int irq, void *dev_id)
1609{ 1654{
1610 scsi_qla_host_t *ha; 1655 struct qla_hw_data *ha;
1656 struct rsp_que *rsp;
1611 struct device_reg_24xx __iomem *reg; 1657 struct device_reg_24xx __iomem *reg;
1612 1658
1613 ha = dev_id; 1659 rsp = (struct rsp_que *) dev_id;
1660 if (!rsp) {
1661 printk(KERN_INFO
1662 "%s(): NULL response queue pointer\n", __func__);
1663 return IRQ_NONE;
1664 }
1665 ha = rsp->hw;
1614 reg = &ha->iobase->isp24; 1666 reg = &ha->iobase->isp24;
1615 1667
1616 spin_lock_irq(&ha->hardware_lock); 1668 spin_lock_irq(&ha->hardware_lock);
1617 1669
1618 qla24xx_process_response_queue(ha); 1670 qla24xx_process_response_queue(rsp);
1619 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1671 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1620 1672
1621 spin_unlock_irq(&ha->hardware_lock); 1673 spin_unlock_irq(&ha->hardware_lock);
@@ -1624,20 +1676,64 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
1624} 1676}
1625 1677
1626static irqreturn_t 1678static irqreturn_t
1679qla25xx_msix_rsp_q(int irq, void *dev_id)
1680{
1681 struct qla_hw_data *ha;
1682 struct rsp_que *rsp;
1683 struct device_reg_24xx __iomem *reg;
1684 uint16_t msix_disabled_hccr = 0;
1685
1686 rsp = (struct rsp_que *) dev_id;
1687 if (!rsp) {
1688 printk(KERN_INFO
1689 "%s(): NULL response queue pointer\n", __func__);
1690 return IRQ_NONE;
1691 }
1692 ha = rsp->hw;
1693 reg = &ha->iobase->isp24;
1694
1695 spin_lock_irq(&ha->hardware_lock);
1696
1697 msix_disabled_hccr = rsp->options;
1698 if (!rsp->id)
1699 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_22);
1700 else
1701 msix_disabled_hccr &= __constant_cpu_to_le32(BIT_6);
1702
1703 qla24xx_process_response_queue(rsp);
1704
1705 if (!msix_disabled_hccr)
1706 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1707
1708 spin_unlock_irq(&ha->hardware_lock);
1709
1710 return IRQ_HANDLED;
1711}
1712
1713static irqreturn_t
1627qla24xx_msix_default(int irq, void *dev_id) 1714qla24xx_msix_default(int irq, void *dev_id)
1628{ 1715{
1629 scsi_qla_host_t *ha; 1716 scsi_qla_host_t *vha;
1717 struct qla_hw_data *ha;
1718 struct rsp_que *rsp;
1630 struct device_reg_24xx __iomem *reg; 1719 struct device_reg_24xx __iomem *reg;
1631 int status; 1720 int status;
1632 uint32_t stat; 1721 uint32_t stat;
1633 uint32_t hccr; 1722 uint32_t hccr;
1634 uint16_t mb[4]; 1723 uint16_t mb[4];
1635 1724
1636 ha = dev_id; 1725 rsp = (struct rsp_que *) dev_id;
1726 if (!rsp) {
1727 DEBUG(printk(
1728 "%s(): NULL response queue pointer\n", __func__));
1729 return IRQ_NONE;
1730 }
1731 ha = rsp->hw;
1637 reg = &ha->iobase->isp24; 1732 reg = &ha->iobase->isp24;
1638 status = 0; 1733 status = 0;
1639 1734
1640 spin_lock_irq(&ha->hardware_lock); 1735 spin_lock_irq(&ha->hardware_lock);
1736 vha = qla2x00_get_rsp_host(rsp);
1641 do { 1737 do {
1642 stat = RD_REG_DWORD(&reg->host_status); 1738 stat = RD_REG_DWORD(&reg->host_status);
1643 if (stat & HSRX_RISC_PAUSED) { 1739 if (stat & HSRX_RISC_PAUSED) {
@@ -1645,7 +1741,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1645 break; 1741 break;
1646 1742
1647 if (ha->hw_event_pause_errors == 0) 1743 if (ha->hw_event_pause_errors == 0)
1648 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR, 1744 qla2x00_post_hwe_work(vha, HW_EVENT_PARITY_ERR,
1649 0, MSW(stat), LSW(stat)); 1745 0, MSW(stat), LSW(stat));
1650 else if (ha->hw_event_pause_errors < 0xffffffff) 1746 else if (ha->hw_event_pause_errors < 0xffffffff)
1651 ha->hw_event_pause_errors++; 1747 ha->hw_event_pause_errors++;
@@ -1655,10 +1751,10 @@ qla24xx_msix_default(int irq, void *dev_id)
1655 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1751 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1656 "Dumping firmware!\n", hccr); 1752 "Dumping firmware!\n", hccr);
1657 1753
1658 qla2xxx_check_risc_status(ha); 1754 qla2xxx_check_risc_status(vha);
1659 1755
1660 ha->isp_ops->fw_dump(ha, 1); 1756 ha->isp_ops->fw_dump(vha, 1);
1661 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1662 break; 1758 break;
1663 } else if ((stat & HSRX_RISC_INT) == 0) 1759 } else if ((stat & HSRX_RISC_INT) == 0)
1664 break; 1760 break;
@@ -1668,7 +1764,7 @@ qla24xx_msix_default(int irq, void *dev_id)
1668 case 0x2: 1764 case 0x2:
1669 case 0x10: 1765 case 0x10:
1670 case 0x11: 1766 case 0x11:
1671 qla24xx_mbx_completion(ha, MSW(stat)); 1767 qla24xx_mbx_completion(vha, MSW(stat));
1672 status |= MBX_INTERRUPT; 1768 status |= MBX_INTERRUPT;
1673 1769
1674 break; 1770 break;
@@ -1677,15 +1773,16 @@ qla24xx_msix_default(int irq, void *dev_id)
1677 mb[1] = RD_REG_WORD(&reg->mailbox1); 1773 mb[1] = RD_REG_WORD(&reg->mailbox1);
1678 mb[2] = RD_REG_WORD(&reg->mailbox2); 1774 mb[2] = RD_REG_WORD(&reg->mailbox2);
1679 mb[3] = RD_REG_WORD(&reg->mailbox3); 1775 mb[3] = RD_REG_WORD(&reg->mailbox3);
1680 qla2x00_async_event(ha, mb); 1776 qla2x00_async_event(vha, rsp, mb);
1681 break; 1777 break;
1682 case 0x13: 1778 case 0x13:
1683 qla24xx_process_response_queue(ha); 1779 case 0x14:
1780 qla24xx_process_response_queue(rsp);
1684 break; 1781 break;
1685 default: 1782 default:
1686 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1783 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1687 "(%d).\n", 1784 "(%d).\n",
1688 ha->host_no, stat & 0xff)); 1785 vha->host_no, stat & 0xff));
1689 break; 1786 break;
1690 } 1787 }
1691 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT); 1788 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
@@ -1710,70 +1807,138 @@ struct qla_init_msix_entry {
1710 irq_handler_t handler; 1807 irq_handler_t handler;
1711}; 1808};
1712 1809
1713static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1810static struct qla_init_msix_entry base_queue = {
1714 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, 1811 .entry = 0,
1715 "qla2xxx (default)", qla24xx_msix_default }, 1812 .index = 0,
1813 .name = "qla2xxx (default)",
1814 .handler = qla24xx_msix_default,
1815};
1816
1817static struct qla_init_msix_entry base_rsp_queue = {
1818 .entry = 1,
1819 .index = 1,
1820 .name = "qla2xxx (rsp_q)",
1821 .handler = qla24xx_msix_rsp_q,
1822};
1716 1823
1717 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, 1824static struct qla_init_msix_entry multi_rsp_queue = {
1718 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 1825 .entry = 1,
1826 .index = 1,
1827 .name = "qla2xxx (multi_q)",
1828 .handler = qla25xx_msix_rsp_q,
1719}; 1829};
1720 1830
1721static void 1831static void
1722qla24xx_disable_msix(scsi_qla_host_t *ha) 1832qla24xx_disable_msix(struct qla_hw_data *ha)
1723{ 1833{
1724 int i; 1834 int i;
1725 struct qla_msix_entry *qentry; 1835 struct qla_msix_entry *qentry;
1726 1836
1727 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1837 for (i = 0; i < ha->msix_count; i++) {
1728 qentry = &ha->msix_entries[imsix_entries[i].index]; 1838 qentry = &ha->msix_entries[i];
1729 if (qentry->have_irq) 1839 if (qentry->have_irq)
1730 free_irq(qentry->msix_vector, ha); 1840 free_irq(qentry->vector, qentry->rsp);
1731 } 1841 }
1732 pci_disable_msix(ha->pdev); 1842 pci_disable_msix(ha->pdev);
1843 kfree(ha->msix_entries);
1844 ha->msix_entries = NULL;
1845 ha->flags.msix_enabled = 0;
1733} 1846}
1734 1847
1735static int 1848static int
1736qla24xx_enable_msix(scsi_qla_host_t *ha) 1849qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
1737{ 1850{
1738 int i, ret; 1851 int i, ret;
1739 struct msix_entry entries[QLA_MSIX_ENTRIES]; 1852 struct msix_entry *entries;
1740 struct qla_msix_entry *qentry; 1853 struct qla_msix_entry *qentry;
1854 struct qla_init_msix_entry *msix_queue;
1741 1855
1742 for (i = 0; i < QLA_MSIX_ENTRIES; i++) 1856 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
1743 entries[i].entry = imsix_entries[i].entry; 1857 GFP_KERNEL);
1858 if (!entries)
1859 return -ENOMEM;
1744 1860
1745 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); 1861 for (i = 0; i < ha->msix_count; i++)
1862 entries[i].entry = i;
1863
1864 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1746 if (ret) { 1865 if (ret) {
1747 qla_printk(KERN_WARNING, ha, 1866 qla_printk(KERN_WARNING, ha,
1748 "MSI-X: Failed to enable support -- %d/%d\n", 1867 "MSI-X: Failed to enable support -- %d/%d\n"
1749 QLA_MSIX_ENTRIES, ret); 1868 " Retry with %d vectors\n", ha->msix_count, ret, ret);
1869 ha->msix_count = ret;
1870 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
1871 if (ret) {
1872 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable"
1873 " support, giving up -- %d/%d\n",
1874 ha->msix_count, ret);
1875 goto msix_out;
1876 }
1877 ha->max_queues = ha->msix_count - 1;
1878 }
1879 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
1880 ha->msix_count, GFP_KERNEL);
1881 if (!ha->msix_entries) {
1882 ret = -ENOMEM;
1750 goto msix_out; 1883 goto msix_out;
1751 } 1884 }
1752 ha->flags.msix_enabled = 1; 1885 ha->flags.msix_enabled = 1;
1753 1886
1754 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1887 for (i = 0; i < ha->msix_count; i++) {
1755 qentry = &ha->msix_entries[imsix_entries[i].index]; 1888 qentry = &ha->msix_entries[i];
1756 qentry->msix_vector = entries[i].vector; 1889 qentry->vector = entries[i].vector;
1757 qentry->msix_entry = entries[i].entry; 1890 qentry->entry = entries[i].entry;
1758 qentry->have_irq = 0; 1891 qentry->have_irq = 0;
1759 ret = request_irq(qentry->msix_vector, 1892 qentry->rsp = NULL;
1760 imsix_entries[i].handler, 0, imsix_entries[i].name, ha); 1893 }
1761 if (ret) { 1894
1762 qla_printk(KERN_WARNING, ha, 1895 /* Enable MSI-X for AENs for queue 0 */
1763 "MSI-X: Unable to register handler -- %x/%d.\n", 1896 qentry = &ha->msix_entries[0];
1764 imsix_entries[i].index, ret); 1897 ret = request_irq(qentry->vector, base_queue.handler, 0,
1765 qla24xx_disable_msix(ha); 1898 base_queue.name, rsp);
1766 goto msix_out; 1899 if (ret) {
1767 } 1900 qla_printk(KERN_WARNING, ha,
1768 qentry->have_irq = 1; 1901 "MSI-X: Unable to register handler -- %x/%d.\n",
1902 qentry->vector, ret);
1903 qla24xx_disable_msix(ha);
1904 goto msix_out;
1769 } 1905 }
1906 qentry->have_irq = 1;
1907 qentry->rsp = rsp;
1908
1909 /* Enable MSI-X vector for response queue update for queue 0 */
1910 if (ha->max_queues > 1 && ha->mqiobase) {
1911 ha->mqenable = 1;
1912 msix_queue = &multi_rsp_queue;
1913 qla_printk(KERN_INFO, ha,
1914 "MQ enabled, Number of Queue Resources: %d \n",
1915 ha->max_queues);
1916 } else {
1917 ha->mqenable = 0;
1918 msix_queue = &base_rsp_queue;
1919 }
1920
1921 qentry = &ha->msix_entries[1];
1922 ret = request_irq(qentry->vector, msix_queue->handler, 0,
1923 msix_queue->name, rsp);
1924 if (ret) {
1925 qla_printk(KERN_WARNING, ha,
1926 "MSI-X: Unable to register handler -- %x/%d.\n",
1927 qentry->vector, ret);
1928 qla24xx_disable_msix(ha);
1929 ha->mqenable = 0;
1930 goto msix_out;
1931 }
1932 qentry->have_irq = 1;
1933 qentry->rsp = rsp;
1770 1934
1771msix_out: 1935msix_out:
1936 kfree(entries);
1772 return ret; 1937 return ret;
1773} 1938}
1774 1939
1775int 1940int
1776qla2x00_request_irqs(scsi_qla_host_t *ha) 1941qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
1777{ 1942{
1778 int ret; 1943 int ret;
1779 device_reg_t __iomem *reg = ha->iobase; 1944 device_reg_t __iomem *reg = ha->iobase;
@@ -1782,11 +1947,11 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1782 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha)) 1947 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1783 goto skip_msix; 1948 goto skip_msix;
1784 1949
1785 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1950 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX ||
1786 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 1951 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1787 DEBUG2(qla_printk(KERN_WARNING, ha, 1952 DEBUG2(qla_printk(KERN_WARNING, ha,
1788 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1953 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1789 ha->chip_revision, ha->fw_attributes)); 1954 ha->pdev->revision, ha->fw_attributes));
1790 1955
1791 goto skip_msix; 1956 goto skip_msix;
1792 } 1957 }
@@ -1803,7 +1968,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
1803 goto skip_msi; 1968 goto skip_msi;
1804 } 1969 }
1805 1970
1806 ret = qla24xx_enable_msix(ha); 1971 ret = qla24xx_enable_msix(ha, rsp);
1807 if (!ret) { 1972 if (!ret) {
1808 DEBUG2(qla_printk(KERN_INFO, ha, 1973 DEBUG2(qla_printk(KERN_INFO, ha,
1809 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1974 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
@@ -1825,7 +1990,7 @@ skip_msix:
1825skip_msi: 1990skip_msi:
1826 1991
1827 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1992 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1828 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1993 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp);
1829 if (ret) { 1994 if (ret) {
1830 qla_printk(KERN_WARNING, ha, 1995 qla_printk(KERN_WARNING, ha,
1831 "Failed to reserve interrupt %d already in use.\n", 1996 "Failed to reserve interrupt %d already in use.\n",
@@ -1833,10 +1998,8 @@ skip_msi:
1833 goto fail; 1998 goto fail;
1834 } 1999 }
1835 ha->flags.inta_enabled = 1; 2000 ha->flags.inta_enabled = 1;
1836 ha->host->irq = ha->pdev->irq;
1837clear_risc_ints: 2001clear_risc_ints:
1838 2002
1839 ha->isp_ops->disable_intrs(ha);
1840 spin_lock_irq(&ha->hardware_lock); 2003 spin_lock_irq(&ha->hardware_lock);
1841 if (IS_FWI2_CAPABLE(ha)) { 2004 if (IS_FWI2_CAPABLE(ha)) {
1842 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT); 2005 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
@@ -1853,13 +2016,74 @@ fail:
1853} 2016}
1854 2017
1855void 2018void
1856qla2x00_free_irqs(scsi_qla_host_t *ha) 2019qla2x00_free_irqs(scsi_qla_host_t *vha)
1857{ 2020{
2021 struct qla_hw_data *ha = vha->hw;
2022 struct rsp_que *rsp = ha->rsp_q_map[0];
1858 2023
1859 if (ha->flags.msix_enabled) 2024 if (ha->flags.msix_enabled)
1860 qla24xx_disable_msix(ha); 2025 qla24xx_disable_msix(ha);
1861 else if (ha->flags.inta_enabled) { 2026 else if (ha->flags.inta_enabled) {
1862 free_irq(ha->host->irq, ha); 2027 free_irq(ha->pdev->irq, rsp);
1863 pci_disable_msi(ha->pdev); 2028 pci_disable_msi(ha->pdev);
1864 } 2029 }
1865} 2030}
2031
2032static struct scsi_qla_host *
2033qla2x00_get_rsp_host(struct rsp_que *rsp)
2034{
2035 srb_t *sp;
2036 struct qla_hw_data *ha = rsp->hw;
2037 struct scsi_qla_host *vha = NULL;
2038 struct sts_entry_24xx *pkt;
2039 struct req_que *req;
2040
2041 if (rsp->id) {
2042 pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
2043 req = rsp->req;
2044 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
2045 sp = req->outstanding_cmds[pkt->handle];
2046 if (sp)
2047 vha = sp->vha;
2048 }
2049 }
2050 if (!vha)
2051 /* handle it in base queue */
2052 vha = pci_get_drvdata(ha->pdev);
2053
2054 return vha;
2055}
2056
2057int qla25xx_request_irq(struct rsp_que *rsp)
2058{
2059 struct qla_hw_data *ha = rsp->hw;
2060 struct qla_init_msix_entry *intr = &multi_rsp_queue;
2061 struct qla_msix_entry *msix = rsp->msix;
2062 int ret;
2063
2064 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2065 if (ret) {
2066 qla_printk(KERN_WARNING, ha,
2067 "MSI-X: Unable to register handler -- %x/%d.\n",
2068 msix->vector, ret);
2069 return ret;
2070 }
2071 msix->have_irq = 1;
2072 msix->rsp = rsp;
2073 return ret;
2074}
2075
2076void
2077qla25xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2078{
2079 device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
2080 WRT_REG_DWORD(&reg->isp25mq.rsp_q_out, index);
2081}
2082
2083void
2084qla24xx_wrt_rsp_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
2085{
2086 device_reg_t __iomem *reg = (void *) ha->iobase;
2087 WRT_REG_DWORD(&reg->isp24.rsp_q_out, index);
2088}
2089
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3402746ec128..a99976f5fabd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -29,7 +29,7 @@
29 * Kernel context. 29 * Kernel context.
30 */ 30 */
31static int 31static int
32qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp) 32qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
33{ 33{
34 int rval; 34 int rval;
35 unsigned long flags = 0; 35 unsigned long flags = 0;
@@ -42,15 +42,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
42 uint32_t cnt; 42 uint32_t cnt;
43 uint32_t mboxes; 43 uint32_t mboxes;
44 unsigned long wait_time; 44 unsigned long wait_time;
45 scsi_qla_host_t *ha = to_qla_parent(pvha); 45 struct qla_hw_data *ha = vha->hw;
46 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
46 47
47 reg = ha->iobase; 48 reg = ha->iobase;
48 io_lock_on = ha->flags.init_done; 49 io_lock_on = base_vha->flags.init_done;
49 50
50 rval = QLA_SUCCESS; 51 rval = QLA_SUCCESS;
51 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 52 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
52 53
53 DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no)); 54 DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
54 55
55 /* 56 /*
56 * Wait for active mailbox commands to finish by waiting at most tov 57 * Wait for active mailbox commands to finish by waiting at most tov
@@ -62,7 +63,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
62 mcp->tov * HZ)) { 63 mcp->tov * HZ)) {
63 /* Timeout occurred. Return error. */ 64 /* Timeout occurred. Return error. */
64 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. " 65 DEBUG2_3_11(printk("%s(%ld): cmd access timeout. "
65 "Exiting.\n", __func__, ha->host_no)); 66 "Exiting.\n", __func__, base_vha->host_no));
66 return QLA_FUNCTION_TIMEOUT; 67 return QLA_FUNCTION_TIMEOUT;
67 } 68 }
68 } 69 }
@@ -72,7 +73,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
72 ha->mcp = mcp; 73 ha->mcp = mcp;
73 74
74 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n", 75 DEBUG11(printk("scsi(%ld): prepare to issue mbox cmd=0x%x.\n",
75 ha->host_no, mcp->mb[0])); 76 base_vha->host_no, mcp->mb[0]));
76 77
77 spin_lock_irqsave(&ha->hardware_lock, flags); 78 spin_lock_irqsave(&ha->hardware_lock, flags);
78 79
@@ -100,15 +101,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
100 101
101#if defined(QL_DEBUG_LEVEL_1) 102#if defined(QL_DEBUG_LEVEL_1)
102 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n", 103 printk("%s(%ld): Loaded MBX registers (displayed in bytes) = \n",
103 __func__, ha->host_no); 104 __func__, base_vha->host_no);
104 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16); 105 qla2x00_dump_buffer((uint8_t *)mcp->mb, 16);
105 printk("\n"); 106 printk("\n");
106 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16); 107 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x10), 16);
107 printk("\n"); 108 printk("\n");
108 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8); 109 qla2x00_dump_buffer(((uint8_t *)mcp->mb + 0x20), 8);
109 printk("\n"); 110 printk("\n");
110 printk("%s(%ld): I/O address = %p.\n", __func__, ha->host_no, optr); 111 printk("%s(%ld): I/O address = %p.\n", __func__, base_vha->host_no,
111 qla2x00_dump_regs(ha); 112 optr);
113 qla2x00_dump_regs(base_vha);
112#endif 114#endif
113 115
114 /* Issue set host interrupt command to send cmd out. */ 116 /* Issue set host interrupt command to send cmd out. */
@@ -117,7 +119,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
117 119
118 /* Unlock mbx registers and wait for interrupt */ 120 /* Unlock mbx registers and wait for interrupt */
119 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. " 121 DEBUG11(printk("%s(%ld): going to unlock irq & waiting for interrupt. "
120 "jiffies=%lx.\n", __func__, ha->host_no, jiffies)); 122 "jiffies=%lx.\n", __func__, base_vha->host_no, jiffies));
121 123
122 /* Wait for mbx cmd completion until timeout */ 124 /* Wait for mbx cmd completion until timeout */
123 125
@@ -137,7 +139,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
137 139
138 } else { 140 } else {
139 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__, 141 DEBUG3_11(printk("%s(%ld): cmd=%x POLLING MODE.\n", __func__,
140 ha->host_no, command)); 142 base_vha->host_no, command));
141 143
142 if (IS_FWI2_CAPABLE(ha)) 144 if (IS_FWI2_CAPABLE(ha))
143 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT); 145 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
@@ -151,7 +153,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
151 break; 153 break;
152 154
153 /* Check for pending interrupts. */ 155 /* Check for pending interrupts. */
154 qla2x00_poll(ha); 156 qla2x00_poll(ha->rsp_q_map[0]);
155 157
156 if (command != MBC_LOAD_RISC_RAM_EXTENDED && 158 if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
157 !ha->flags.mbox_int) 159 !ha->flags.mbox_int)
@@ -164,7 +166,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
164 uint16_t *iptr2; 166 uint16_t *iptr2;
165 167
166 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__, 168 DEBUG3_11(printk("%s(%ld): cmd %x completed.\n", __func__,
167 ha->host_no, command)); 169 base_vha->host_no, command));
168 170
169 /* Got interrupt. Clear the flag. */ 171 /* Got interrupt. Clear the flag. */
170 ha->flags.mbox_int = 0; 172 ha->flags.mbox_int = 0;
@@ -200,12 +202,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
200 ictrl = RD_REG_WORD(&reg->isp.ictrl); 202 ictrl = RD_REG_WORD(&reg->isp.ictrl);
201 } 203 }
202 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n", 204 printk("%s(%ld): **** MB Command Timeout for cmd %x ****\n",
203 __func__, ha->host_no, command); 205 __func__, base_vha->host_no, command);
204 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__, 206 printk("%s(%ld): icontrol=%x jiffies=%lx\n", __func__,
205 ha->host_no, ictrl, jiffies); 207 base_vha->host_no, ictrl, jiffies);
206 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__, 208 printk("%s(%ld): *** mailbox[0] = 0x%x ***\n", __func__,
207 ha->host_no, mb0); 209 base_vha->host_no, mb0);
208 qla2x00_dump_regs(ha); 210 qla2x00_dump_regs(base_vha);
209#endif 211#endif
210 212
211 rval = QLA_FUNCTION_TIMEOUT; 213 rval = QLA_FUNCTION_TIMEOUT;
@@ -218,10 +220,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
218 220
219 if (abort_active || !io_lock_on) { 221 if (abort_active || !io_lock_on) {
220 DEBUG11(printk("%s(%ld): checking for additional resp " 222 DEBUG11(printk("%s(%ld): checking for additional resp "
221 "interrupt.\n", __func__, ha->host_no)); 223 "interrupt.\n", __func__, base_vha->host_no));
222 224
223 /* polling mode for non isp_abort commands. */ 225 /* polling mode for non isp_abort commands. */
224 qla2x00_poll(ha); 226 qla2x00_poll(ha->rsp_q_map[0]);
225 } 227 }
226 228
227 if (rval == QLA_FUNCTION_TIMEOUT && 229 if (rval == QLA_FUNCTION_TIMEOUT &&
@@ -229,35 +231,37 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
229 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) { 231 if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
230 /* not in dpc. schedule it for dpc to take over. */ 232 /* not in dpc. schedule it for dpc to take over. */
231 DEBUG(printk("%s(%ld): timeout schedule " 233 DEBUG(printk("%s(%ld): timeout schedule "
232 "isp_abort_needed.\n", __func__, ha->host_no)); 234 "isp_abort_needed.\n", __func__,
235 base_vha->host_no));
233 DEBUG2_3_11(printk("%s(%ld): timeout schedule " 236 DEBUG2_3_11(printk("%s(%ld): timeout schedule "
234 "isp_abort_needed.\n", __func__, ha->host_no)); 237 "isp_abort_needed.\n", __func__,
238 base_vha->host_no));
235 qla_printk(KERN_WARNING, ha, 239 qla_printk(KERN_WARNING, ha,
236 "Mailbox command timeout occurred. Scheduling ISP " 240 "Mailbox command timeout occurred. Scheduling ISP "
237 "abort.\n"); 241 "abort.\n");
238 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 242 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
239 qla2xxx_wake_dpc(ha); 243 qla2xxx_wake_dpc(vha);
240 } else if (!abort_active) { 244 } else if (!abort_active) {
241 /* call abort directly since we are in the DPC thread */ 245 /* call abort directly since we are in the DPC thread */
242 DEBUG(printk("%s(%ld): timeout calling abort_isp\n", 246 DEBUG(printk("%s(%ld): timeout calling abort_isp\n",
243 __func__, ha->host_no)); 247 __func__, base_vha->host_no));
244 DEBUG2_3_11(printk("%s(%ld): timeout calling " 248 DEBUG2_3_11(printk("%s(%ld): timeout calling "
245 "abort_isp\n", __func__, ha->host_no)); 249 "abort_isp\n", __func__, base_vha->host_no));
246 qla_printk(KERN_WARNING, ha, 250 qla_printk(KERN_WARNING, ha,
247 "Mailbox command timeout occurred. Issuing ISP " 251 "Mailbox command timeout occurred. Issuing ISP "
248 "abort.\n"); 252 "abort.\n");
249 253
250 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 254 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
251 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 255 clear_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
252 if (qla2x00_abort_isp(ha)) { 256 if (qla2x00_abort_isp(base_vha)) {
253 /* Failed. retry later. */ 257 /* Failed. retry later. */
254 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 258 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
255 } 259 }
256 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 260 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
257 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__, 261 DEBUG(printk("%s(%ld): finished abort_isp\n", __func__,
258 ha->host_no)); 262 base_vha->host_no));
259 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n", 263 DEBUG2_3_11(printk("%s(%ld): finished abort_isp\n",
260 __func__, ha->host_no)); 264 __func__, base_vha->host_no));
261 } 265 }
262 } 266 }
263 267
@@ -267,24 +271,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
267 271
268 if (rval) { 272 if (rval) {
269 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, " 273 DEBUG2_3_11(printk("%s(%ld): **** FAILED. mbx0=%x, mbx1=%x, "
270 "mbx2=%x, cmd=%x ****\n", __func__, ha->host_no, 274 "mbx2=%x, cmd=%x ****\n", __func__, base_vha->host_no,
271 mcp->mb[0], mcp->mb[1], mcp->mb[2], command)); 275 mcp->mb[0], mcp->mb[1], mcp->mb[2], command));
272 } else { 276 } else {
273 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 277 DEBUG11(printk("%s(%ld): done.\n", __func__,
278 base_vha->host_no));
274 } 279 }
275 280
276 return rval; 281 return rval;
277} 282}
278 283
279int 284int
280qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr, 285qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
281 uint32_t risc_code_size) 286 uint32_t risc_code_size)
282{ 287{
283 int rval; 288 int rval;
289 struct qla_hw_data *ha = vha->hw;
284 mbx_cmd_t mc; 290 mbx_cmd_t mc;
285 mbx_cmd_t *mcp = &mc; 291 mbx_cmd_t *mcp = &mc;
286 292
287 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 293 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
288 294
289 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 295 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
290 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 296 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -312,13 +318,13 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
312 mcp->in_mb = MBX_0; 318 mcp->in_mb = MBX_0;
313 mcp->tov = MBX_TOV_SECONDS; 319 mcp->tov = MBX_TOV_SECONDS;
314 mcp->flags = 0; 320 mcp->flags = 0;
315 rval = qla2x00_mailbox_command(ha, mcp); 321 rval = qla2x00_mailbox_command(vha, mcp);
316 322
317 if (rval != QLA_SUCCESS) { 323 if (rval != QLA_SUCCESS) {
318 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 324 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
319 ha->host_no, rval, mcp->mb[0])); 325 vha->host_no, rval, mcp->mb[0]));
320 } else { 326 } else {
321 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 327 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
322 } 328 }
323 329
324 return rval; 330 return rval;
@@ -340,13 +346,14 @@ qla2x00_load_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t risc_addr,
340 * Kernel context. 346 * Kernel context.
341 */ 347 */
342int 348int
343qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr) 349qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
344{ 350{
345 int rval; 351 int rval;
352 struct qla_hw_data *ha = vha->hw;
346 mbx_cmd_t mc; 353 mbx_cmd_t mc;
347 mbx_cmd_t *mcp = &mc; 354 mbx_cmd_t *mcp = &mc;
348 355
349 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 356 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
350 357
351 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 358 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
352 mcp->out_mb = MBX_0; 359 mcp->out_mb = MBX_0;
@@ -369,18 +376,18 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
369 376
370 mcp->tov = MBX_TOV_SECONDS; 377 mcp->tov = MBX_TOV_SECONDS;
371 mcp->flags = 0; 378 mcp->flags = 0;
372 rval = qla2x00_mailbox_command(ha, mcp); 379 rval = qla2x00_mailbox_command(vha, mcp);
373 380
374 if (rval != QLA_SUCCESS) { 381 if (rval != QLA_SUCCESS) {
375 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 382 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
376 ha->host_no, rval, mcp->mb[0])); 383 vha->host_no, rval, mcp->mb[0]));
377 } else { 384 } else {
378 if (IS_FWI2_CAPABLE(ha)) { 385 if (IS_FWI2_CAPABLE(ha)) {
379 DEBUG11(printk("%s(%ld): done exchanges=%x.\n", 386 DEBUG11(printk("%s(%ld): done exchanges=%x.\n",
380 __func__, ha->host_no, mcp->mb[1])); 387 __func__, vha->host_no, mcp->mb[1]));
381 } else { 388 } else {
382 DEBUG11(printk("%s(%ld): done.\n", __func__, 389 DEBUG11(printk("%s(%ld): done.\n", __func__,
383 ha->host_no)); 390 vha->host_no));
384 } 391 }
385 } 392 }
386 393
@@ -404,28 +411,28 @@ qla2x00_execute_fw(scsi_qla_host_t *ha, uint32_t risc_addr)
404 * Kernel context. 411 * Kernel context.
405 */ 412 */
406void 413void
407qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor, 414qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
408 uint16_t *subminor, uint16_t *attributes, uint32_t *memory) 415 uint16_t *subminor, uint16_t *attributes, uint32_t *memory)
409{ 416{
410 int rval; 417 int rval;
411 mbx_cmd_t mc; 418 mbx_cmd_t mc;
412 mbx_cmd_t *mcp = &mc; 419 mbx_cmd_t *mcp = &mc;
413 420
414 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 421 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
415 422
416 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 423 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
417 mcp->out_mb = MBX_0; 424 mcp->out_mb = MBX_0;
418 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 425 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
419 mcp->flags = 0; 426 mcp->flags = 0;
420 mcp->tov = MBX_TOV_SECONDS; 427 mcp->tov = MBX_TOV_SECONDS;
421 rval = qla2x00_mailbox_command(ha, mcp); 428 rval = qla2x00_mailbox_command(vha, mcp);
422 429
423 /* Return mailbox data. */ 430 /* Return mailbox data. */
424 *major = mcp->mb[1]; 431 *major = mcp->mb[1];
425 *minor = mcp->mb[2]; 432 *minor = mcp->mb[2];
426 *subminor = mcp->mb[3]; 433 *subminor = mcp->mb[3];
427 *attributes = mcp->mb[6]; 434 *attributes = mcp->mb[6];
428 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 435 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
429 *memory = 0x1FFFF; /* Defaults to 128KB. */ 436 *memory = 0x1FFFF; /* Defaults to 128KB. */
430 else 437 else
431 *memory = (mcp->mb[5] << 16) | mcp->mb[4]; 438 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
@@ -433,10 +440,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
433 if (rval != QLA_SUCCESS) { 440 if (rval != QLA_SUCCESS) {
434 /*EMPTY*/ 441 /*EMPTY*/
435 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 442 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
436 ha->host_no, rval)); 443 vha->host_no, rval));
437 } else { 444 } else {
438 /*EMPTY*/ 445 /*EMPTY*/
439 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 446 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
440 } 447 }
441} 448}
442 449
@@ -455,32 +462,32 @@ qla2x00_get_fw_version(scsi_qla_host_t *ha, uint16_t *major, uint16_t *minor,
455 * Kernel context. 462 * Kernel context.
456 */ 463 */
457int 464int
458qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) 465qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
459{ 466{
460 int rval; 467 int rval;
461 mbx_cmd_t mc; 468 mbx_cmd_t mc;
462 mbx_cmd_t *mcp = &mc; 469 mbx_cmd_t *mcp = &mc;
463 470
464 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 471 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
465 472
466 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 473 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
467 mcp->out_mb = MBX_0; 474 mcp->out_mb = MBX_0;
468 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 475 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
469 mcp->tov = MBX_TOV_SECONDS; 476 mcp->tov = MBX_TOV_SECONDS;
470 mcp->flags = 0; 477 mcp->flags = 0;
471 rval = qla2x00_mailbox_command(ha, mcp); 478 rval = qla2x00_mailbox_command(vha, mcp);
472 479
473 if (rval != QLA_SUCCESS) { 480 if (rval != QLA_SUCCESS) {
474 /*EMPTY*/ 481 /*EMPTY*/
475 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 482 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
476 ha->host_no, rval)); 483 vha->host_no, rval));
477 } else { 484 } else {
478 fwopts[0] = mcp->mb[0]; 485 fwopts[0] = mcp->mb[0];
479 fwopts[1] = mcp->mb[1]; 486 fwopts[1] = mcp->mb[1];
480 fwopts[2] = mcp->mb[2]; 487 fwopts[2] = mcp->mb[2];
481 fwopts[3] = mcp->mb[3]; 488 fwopts[3] = mcp->mb[3];
482 489
483 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 490 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
484 } 491 }
485 492
486 return rval; 493 return rval;
@@ -502,13 +509,13 @@ qla2x00_get_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
502 * Kernel context. 509 * Kernel context.
503 */ 510 */
504int 511int
505qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts) 512qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
506{ 513{
507 int rval; 514 int rval;
508 mbx_cmd_t mc; 515 mbx_cmd_t mc;
509 mbx_cmd_t *mcp = &mc; 516 mbx_cmd_t *mcp = &mc;
510 517
511 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
512 519
513 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 520 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
514 mcp->mb[1] = fwopts[1]; 521 mcp->mb[1] = fwopts[1];
@@ -516,7 +523,7 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
516 mcp->mb[3] = fwopts[3]; 523 mcp->mb[3] = fwopts[3];
517 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 524 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
518 mcp->in_mb = MBX_0; 525 mcp->in_mb = MBX_0;
519 if (IS_FWI2_CAPABLE(ha)) { 526 if (IS_FWI2_CAPABLE(vha->hw)) {
520 mcp->in_mb |= MBX_1; 527 mcp->in_mb |= MBX_1;
521 } else { 528 } else {
522 mcp->mb[10] = fwopts[10]; 529 mcp->mb[10] = fwopts[10];
@@ -526,17 +533,17 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
526 } 533 }
527 mcp->tov = MBX_TOV_SECONDS; 534 mcp->tov = MBX_TOV_SECONDS;
528 mcp->flags = 0; 535 mcp->flags = 0;
529 rval = qla2x00_mailbox_command(ha, mcp); 536 rval = qla2x00_mailbox_command(vha, mcp);
530 537
531 fwopts[0] = mcp->mb[0]; 538 fwopts[0] = mcp->mb[0];
532 539
533 if (rval != QLA_SUCCESS) { 540 if (rval != QLA_SUCCESS) {
534 /*EMPTY*/ 541 /*EMPTY*/
535 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__, 542 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x/%x).\n", __func__,
536 ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 543 vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
537 } else { 544 } else {
538 /*EMPTY*/ 545 /*EMPTY*/
539 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 546 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
540 } 547 }
541 548
542 return rval; 549 return rval;
@@ -558,13 +565,14 @@ qla2x00_set_fw_options(scsi_qla_host_t *ha, uint16_t *fwopts)
558 * Kernel context. 565 * Kernel context.
559 */ 566 */
560int 567int
561qla2x00_mbx_reg_test(scsi_qla_host_t *ha) 568qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
562{ 569{
563 int rval; 570 int rval;
571 struct qla_hw_data *ha = vha->hw;
564 mbx_cmd_t mc; 572 mbx_cmd_t mc;
565 mbx_cmd_t *mcp = &mc; 573 mbx_cmd_t *mcp = &mc;
566 574
567 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", ha->host_no)); 575 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): entered.\n", vha->host_no));
568 576
569 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 577 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
570 mcp->mb[1] = 0xAAAA; 578 mcp->mb[1] = 0xAAAA;
@@ -578,7 +586,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
578 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 586 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
579 mcp->tov = MBX_TOV_SECONDS; 587 mcp->tov = MBX_TOV_SECONDS;
580 mcp->flags = 0; 588 mcp->flags = 0;
581 rval = qla2x00_mailbox_command(ha, mcp); 589 rval = qla2x00_mailbox_command(vha, mcp);
582 590
583 if (rval == QLA_SUCCESS) { 591 if (rval == QLA_SUCCESS) {
584 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 || 592 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
@@ -591,7 +599,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
591 struct device_reg_24xx __iomem *reg = 599 struct device_reg_24xx __iomem *reg =
592 &ha->iobase->isp24; 600 &ha->iobase->isp24;
593 601
594 qla2xxx_hw_event_log(ha, HW_EVENT_ISP_ERR, 0, 602 qla2xxx_hw_event_log(vha, HW_EVENT_ISP_ERR, 0,
595 LSW(RD_REG_DWORD(&reg->hccr)), 603 LSW(RD_REG_DWORD(&reg->hccr)),
596 LSW(RD_REG_DWORD(&reg->istatus))); 604 LSW(RD_REG_DWORD(&reg->istatus)));
597 } 605 }
@@ -600,11 +608,11 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
600 if (rval != QLA_SUCCESS) { 608 if (rval != QLA_SUCCESS) {
601 /*EMPTY*/ 609 /*EMPTY*/
602 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n", 610 DEBUG2_3_11(printk("qla2x00_mbx_reg_test(%ld): failed=%x.\n",
603 ha->host_no, rval)); 611 vha->host_no, rval));
604 } else { 612 } else {
605 /*EMPTY*/ 613 /*EMPTY*/
606 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n", 614 DEBUG11(printk("qla2x00_mbx_reg_test(%ld): done.\n",
607 ha->host_no)); 615 vha->host_no));
608 } 616 }
609 617
610 return rval; 618 return rval;
@@ -626,18 +634,18 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *ha)
626 * Kernel context. 634 * Kernel context.
627 */ 635 */
628int 636int
629qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr) 637qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
630{ 638{
631 int rval; 639 int rval;
632 mbx_cmd_t mc; 640 mbx_cmd_t mc;
633 mbx_cmd_t *mcp = &mc; 641 mbx_cmd_t *mcp = &mc;
634 642
635 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 643 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
636 644
637 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 645 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
638 mcp->out_mb = MBX_0; 646 mcp->out_mb = MBX_0;
639 mcp->in_mb = MBX_0; 647 mcp->in_mb = MBX_0;
640 if (IS_FWI2_CAPABLE(ha)) { 648 if (IS_FWI2_CAPABLE(vha->hw)) {
641 mcp->mb[1] = MSW(risc_addr); 649 mcp->mb[1] = MSW(risc_addr);
642 mcp->mb[2] = LSW(risc_addr); 650 mcp->mb[2] = LSW(risc_addr);
643 mcp->out_mb |= MBX_2|MBX_1; 651 mcp->out_mb |= MBX_2|MBX_1;
@@ -650,14 +658,14 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
650 658
651 mcp->tov = MBX_TOV_SECONDS; 659 mcp->tov = MBX_TOV_SECONDS;
652 mcp->flags = 0; 660 mcp->flags = 0;
653 rval = qla2x00_mailbox_command(ha, mcp); 661 rval = qla2x00_mailbox_command(vha, mcp);
654 662
655 if (rval != QLA_SUCCESS) { 663 if (rval != QLA_SUCCESS) {
656 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__, 664 DEBUG2_3_11(printk("%s(%ld): failed=%x chk sum=%x.\n", __func__,
657 ha->host_no, rval, IS_FWI2_CAPABLE(ha) ? 665 vha->host_no, rval, IS_FWI2_CAPABLE(vha->hw) ?
658 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1])); 666 (mcp->mb[2] << 16) | mcp->mb[1]: mcp->mb[1]));
659 } else { 667 } else {
660 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 668 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
661 } 669 }
662 670
663 return rval; 671 return rval;
@@ -682,7 +690,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
682 * Kernel context. 690 * Kernel context.
683 */ 691 */
684static int 692static int
685qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer, 693qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
686 dma_addr_t phys_addr, size_t size, uint32_t tov) 694 dma_addr_t phys_addr, size_t size, uint32_t tov)
687{ 695{
688 int rval; 696 int rval;
@@ -699,30 +707,28 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
699 mcp->in_mb = MBX_2|MBX_0; 707 mcp->in_mb = MBX_2|MBX_0;
700 mcp->tov = tov; 708 mcp->tov = tov;
701 mcp->flags = 0; 709 mcp->flags = 0;
702 rval = qla2x00_mailbox_command(ha, mcp); 710 rval = qla2x00_mailbox_command(vha, mcp);
703 711
704 if (rval != QLA_SUCCESS) { 712 if (rval != QLA_SUCCESS) {
705 /*EMPTY*/ 713 /*EMPTY*/
706 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n", 714 DEBUG(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
707 ha->host_no, rval)); 715 vha->host_no, rval));
708 DEBUG2(printk("qla2x00_issue_iocb(%ld): failed rval 0x%x\n",
709 ha->host_no, rval));
710 } else { 716 } else {
711 sts_entry_t *sts_entry = (sts_entry_t *) buffer; 717 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
712 718
713 /* Mask reserved bits. */ 719 /* Mask reserved bits. */
714 sts_entry->entry_status &= 720 sts_entry->entry_status &=
715 IS_FWI2_CAPABLE(ha) ? RF_MASK_24XX :RF_MASK; 721 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
716 } 722 }
717 723
718 return rval; 724 return rval;
719} 725}
720 726
721int 727int
722qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr, 728qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
723 size_t size) 729 size_t size)
724{ 730{
725 return qla2x00_issue_iocb_timeout(ha, buffer, phys_addr, size, 731 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
726 MBX_TOV_SECONDS); 732 MBX_TOV_SECONDS);
727} 733}
728 734
@@ -741,22 +747,23 @@ qla2x00_issue_iocb(scsi_qla_host_t *ha, void *buffer, dma_addr_t phys_addr,
741 * Kernel context. 747 * Kernel context.
742 */ 748 */
743int 749int
744qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp) 750qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
745{ 751{
746 unsigned long flags = 0; 752 unsigned long flags = 0;
747 fc_port_t *fcport; 753 fc_port_t *fcport;
748 int rval; 754 int rval;
749 uint32_t handle; 755 uint32_t handle = 0;
750 mbx_cmd_t mc; 756 mbx_cmd_t mc;
751 mbx_cmd_t *mcp = &mc; 757 mbx_cmd_t *mcp = &mc;
758 struct qla_hw_data *ha = vha->hw;
752 759
753 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", ha->host_no)); 760 DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
754 761
755 fcport = sp->fcport; 762 fcport = sp->fcport;
756 763
757 spin_lock_irqsave(&ha->hardware_lock, flags); 764 spin_lock_irqsave(&ha->hardware_lock, flags);
758 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 765 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
759 if (ha->outstanding_cmds[handle] == sp) 766 if (req->outstanding_cmds[handle] == sp)
760 break; 767 break;
761 } 768 }
762 spin_unlock_irqrestore(&ha->hardware_lock, flags); 769 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -778,14 +785,14 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
778 mcp->in_mb = MBX_0; 785 mcp->in_mb = MBX_0;
779 mcp->tov = MBX_TOV_SECONDS; 786 mcp->tov = MBX_TOV_SECONDS;
780 mcp->flags = 0; 787 mcp->flags = 0;
781 rval = qla2x00_mailbox_command(ha, mcp); 788 rval = qla2x00_mailbox_command(vha, mcp);
782 789
783 if (rval != QLA_SUCCESS) { 790 if (rval != QLA_SUCCESS) {
784 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n", 791 DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
785 ha->host_no, rval)); 792 vha->host_no, rval));
786 } else { 793 } else {
787 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n", 794 DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
788 ha->host_no)); 795 vha->host_no));
789 } 796 }
790 797
791 return rval; 798 return rval;
@@ -797,40 +804,45 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
797 int rval, rval2; 804 int rval, rval2;
798 mbx_cmd_t mc; 805 mbx_cmd_t mc;
799 mbx_cmd_t *mcp = &mc; 806 mbx_cmd_t *mcp = &mc;
800 scsi_qla_host_t *ha; 807 scsi_qla_host_t *vha;
808 struct req_que *req;
809 struct rsp_que *rsp;
801 810
802 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 811 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
803 812
804 l = l; 813 l = l;
805 ha = fcport->ha; 814 vha = fcport->vha;
815 req = vha->hw->req_q_map[0];
816 rsp = vha->hw->rsp_q_map[0];
806 mcp->mb[0] = MBC_ABORT_TARGET; 817 mcp->mb[0] = MBC_ABORT_TARGET;
807 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; 818 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
808 if (HAS_EXTENDED_IDS(ha)) { 819 if (HAS_EXTENDED_IDS(vha->hw)) {
809 mcp->mb[1] = fcport->loop_id; 820 mcp->mb[1] = fcport->loop_id;
810 mcp->mb[10] = 0; 821 mcp->mb[10] = 0;
811 mcp->out_mb |= MBX_10; 822 mcp->out_mb |= MBX_10;
812 } else { 823 } else {
813 mcp->mb[1] = fcport->loop_id << 8; 824 mcp->mb[1] = fcport->loop_id << 8;
814 } 825 }
815 mcp->mb[2] = ha->loop_reset_delay; 826 mcp->mb[2] = vha->hw->loop_reset_delay;
816 mcp->mb[9] = ha->vp_idx; 827 mcp->mb[9] = vha->vp_idx;
817 828
818 mcp->in_mb = MBX_0; 829 mcp->in_mb = MBX_0;
819 mcp->tov = MBX_TOV_SECONDS; 830 mcp->tov = MBX_TOV_SECONDS;
820 mcp->flags = 0; 831 mcp->flags = 0;
821 rval = qla2x00_mailbox_command(ha, mcp); 832 rval = qla2x00_mailbox_command(vha, mcp);
822 if (rval != QLA_SUCCESS) { 833 if (rval != QLA_SUCCESS) {
823 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 834 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
824 ha->host_no, rval)); 835 vha->host_no, rval));
825 } 836 }
826 837
827 /* Issue marker IOCB. */ 838 /* Issue marker IOCB. */
828 rval2 = qla2x00_marker(ha, fcport->loop_id, 0, MK_SYNC_ID); 839 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
840 MK_SYNC_ID);
829 if (rval2 != QLA_SUCCESS) { 841 if (rval2 != QLA_SUCCESS) {
830 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 842 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
831 "(%x).\n", __func__, ha->host_no, rval2)); 843 "(%x).\n", __func__, vha->host_no, rval2));
832 } else { 844 } else {
833 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 845 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
834 } 846 }
835 847
836 return rval; 848 return rval;
@@ -842,37 +854,42 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
842 int rval, rval2; 854 int rval, rval2;
843 mbx_cmd_t mc; 855 mbx_cmd_t mc;
844 mbx_cmd_t *mcp = &mc; 856 mbx_cmd_t *mcp = &mc;
845 scsi_qla_host_t *ha; 857 scsi_qla_host_t *vha;
858 struct req_que *req;
859 struct rsp_que *rsp;
846 860
847 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 861 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
848 862
849 ha = fcport->ha; 863 vha = fcport->vha;
864 req = vha->hw->req_q_map[0];
865 rsp = vha->hw->rsp_q_map[0];
850 mcp->mb[0] = MBC_LUN_RESET; 866 mcp->mb[0] = MBC_LUN_RESET;
851 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; 867 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
852 if (HAS_EXTENDED_IDS(ha)) 868 if (HAS_EXTENDED_IDS(vha->hw))
853 mcp->mb[1] = fcport->loop_id; 869 mcp->mb[1] = fcport->loop_id;
854 else 870 else
855 mcp->mb[1] = fcport->loop_id << 8; 871 mcp->mb[1] = fcport->loop_id << 8;
856 mcp->mb[2] = l; 872 mcp->mb[2] = l;
857 mcp->mb[3] = 0; 873 mcp->mb[3] = 0;
858 mcp->mb[9] = ha->vp_idx; 874 mcp->mb[9] = vha->vp_idx;
859 875
860 mcp->in_mb = MBX_0; 876 mcp->in_mb = MBX_0;
861 mcp->tov = MBX_TOV_SECONDS; 877 mcp->tov = MBX_TOV_SECONDS;
862 mcp->flags = 0; 878 mcp->flags = 0;
863 rval = qla2x00_mailbox_command(ha, mcp); 879 rval = qla2x00_mailbox_command(vha, mcp);
864 if (rval != QLA_SUCCESS) { 880 if (rval != QLA_SUCCESS) {
865 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 881 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
866 ha->host_no, rval)); 882 vha->host_no, rval));
867 } 883 }
868 884
869 /* Issue marker IOCB. */ 885 /* Issue marker IOCB. */
870 rval2 = qla2x00_marker(ha, fcport->loop_id, l, MK_SYNC_ID_LUN); 886 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
887 MK_SYNC_ID_LUN);
871 if (rval2 != QLA_SUCCESS) { 888 if (rval2 != QLA_SUCCESS) {
872 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 889 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
873 "(%x).\n", __func__, ha->host_no, rval2)); 890 "(%x).\n", __func__, vha->host_no, rval2));
874 } else { 891 } else {
875 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 892 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
876 } 893 }
877 894
878 return rval; 895 return rval;
@@ -899,7 +916,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
899 * Kernel context. 916 * Kernel context.
900 */ 917 */
901int 918int
902qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa, 919qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
903 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap) 920 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
904{ 921{
905 int rval; 922 int rval;
@@ -907,15 +924,15 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
907 mbx_cmd_t *mcp = &mc; 924 mbx_cmd_t *mcp = &mc;
908 925
909 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n", 926 DEBUG11(printk("qla2x00_get_adapter_id(%ld): entered.\n",
910 ha->host_no)); 927 vha->host_no));
911 928
912 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 929 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
913 mcp->mb[9] = ha->vp_idx; 930 mcp->mb[9] = vha->vp_idx;
914 mcp->out_mb = MBX_9|MBX_0; 931 mcp->out_mb = MBX_9|MBX_0;
915 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 932 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
916 mcp->tov = MBX_TOV_SECONDS; 933 mcp->tov = MBX_TOV_SECONDS;
917 mcp->flags = 0; 934 mcp->flags = 0;
918 rval = qla2x00_mailbox_command(ha, mcp); 935 rval = qla2x00_mailbox_command(vha, mcp);
919 if (mcp->mb[0] == MBS_COMMAND_ERROR) 936 if (mcp->mb[0] == MBS_COMMAND_ERROR)
920 rval = QLA_COMMAND_ERROR; 937 rval = QLA_COMMAND_ERROR;
921 else if (mcp->mb[0] == MBS_INVALID_COMMAND) 938 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
@@ -932,11 +949,11 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
932 if (rval != QLA_SUCCESS) { 949 if (rval != QLA_SUCCESS) {
933 /*EMPTY*/ 950 /*EMPTY*/
934 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", 951 DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
935 ha->host_no, rval)); 952 vha->host_no, rval));
936 } else { 953 } else {
937 /*EMPTY*/ 954 /*EMPTY*/
938 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", 955 DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
939 ha->host_no)); 956 vha->host_no));
940 } 957 }
941 958
942 return rval; 959 return rval;
@@ -958,7 +975,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
958 * Kernel context. 975 * Kernel context.
959 */ 976 */
960int 977int
961qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov, 978qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
962 uint16_t *r_a_tov) 979 uint16_t *r_a_tov)
963{ 980{
964 int rval; 981 int rval;
@@ -967,19 +984,19 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
967 mbx_cmd_t *mcp = &mc; 984 mbx_cmd_t *mcp = &mc;
968 985
969 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n", 986 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): entered.\n",
970 ha->host_no)); 987 vha->host_no));
971 988
972 mcp->mb[0] = MBC_GET_RETRY_COUNT; 989 mcp->mb[0] = MBC_GET_RETRY_COUNT;
973 mcp->out_mb = MBX_0; 990 mcp->out_mb = MBX_0;
974 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 991 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
975 mcp->tov = MBX_TOV_SECONDS; 992 mcp->tov = MBX_TOV_SECONDS;
976 mcp->flags = 0; 993 mcp->flags = 0;
977 rval = qla2x00_mailbox_command(ha, mcp); 994 rval = qla2x00_mailbox_command(vha, mcp);
978 995
979 if (rval != QLA_SUCCESS) { 996 if (rval != QLA_SUCCESS) {
980 /*EMPTY*/ 997 /*EMPTY*/
981 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n", 998 DEBUG2_3_11(printk("qla2x00_get_retry_cnt(%ld): failed = %x.\n",
982 ha->host_no, mcp->mb[0])); 999 vha->host_no, mcp->mb[0]));
983 } else { 1000 } else {
984 /* Convert returned data and check our values. */ 1001 /* Convert returned data and check our values. */
985 *r_a_tov = mcp->mb[3] / 2; 1002 *r_a_tov = mcp->mb[3] / 2;
@@ -991,7 +1008,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
991 } 1008 }
992 1009
993 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d " 1010 DEBUG11(printk("qla2x00_get_retry_cnt(%ld): done. mb3=%d "
994 "ratov=%d.\n", ha->host_no, mcp->mb[3], ratov)); 1011 "ratov=%d.\n", vha->host_no, mcp->mb[3], ratov));
995 } 1012 }
996 1013
997 return rval; 1014 return rval;
@@ -1015,14 +1032,15 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *ha, uint8_t *retry_cnt, uint8_t *tov,
1015 * Kernel context. 1032 * Kernel context.
1016 */ 1033 */
1017int 1034int
1018qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size) 1035qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1019{ 1036{
1020 int rval; 1037 int rval;
1021 mbx_cmd_t mc; 1038 mbx_cmd_t mc;
1022 mbx_cmd_t *mcp = &mc; 1039 mbx_cmd_t *mcp = &mc;
1040 struct qla_hw_data *ha = vha->hw;
1023 1041
1024 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1042 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1025 ha->host_no)); 1043 vha->host_no));
1026 1044
1027 if (ha->flags.npiv_supported) 1045 if (ha->flags.npiv_supported)
1028 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE; 1046 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
@@ -1040,17 +1058,17 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1040 mcp->buf_size = size; 1058 mcp->buf_size = size;
1041 mcp->flags = MBX_DMA_OUT; 1059 mcp->flags = MBX_DMA_OUT;
1042 mcp->tov = MBX_TOV_SECONDS; 1060 mcp->tov = MBX_TOV_SECONDS;
1043 rval = qla2x00_mailbox_command(ha, mcp); 1061 rval = qla2x00_mailbox_command(vha, mcp);
1044 1062
1045 if (rval != QLA_SUCCESS) { 1063 if (rval != QLA_SUCCESS) {
1046 /*EMPTY*/ 1064 /*EMPTY*/
1047 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x " 1065 DEBUG2_3_11(printk("qla2x00_init_firmware(%ld): failed=%x "
1048 "mb0=%x.\n", 1066 "mb0=%x.\n",
1049 ha->host_no, rval, mcp->mb[0])); 1067 vha->host_no, rval, mcp->mb[0]));
1050 } else { 1068 } else {
1051 /*EMPTY*/ 1069 /*EMPTY*/
1052 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n", 1070 DEBUG11(printk("qla2x00_init_firmware(%ld): done.\n",
1053 ha->host_no)); 1071 vha->host_no));
1054 } 1072 }
1055 1073
1056 return rval; 1074 return rval;
@@ -1073,7 +1091,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1073 * Kernel context. 1091 * Kernel context.
1074 */ 1092 */
1075int 1093int
1076qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt) 1094qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1077{ 1095{
1078 int rval; 1096 int rval;
1079 mbx_cmd_t mc; 1097 mbx_cmd_t mc;
@@ -1081,14 +1099,15 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1081 port_database_t *pd; 1099 port_database_t *pd;
1082 struct port_database_24xx *pd24; 1100 struct port_database_24xx *pd24;
1083 dma_addr_t pd_dma; 1101 dma_addr_t pd_dma;
1102 struct qla_hw_data *ha = vha->hw;
1084 1103
1085 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1104 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1086 1105
1087 pd24 = NULL; 1106 pd24 = NULL;
1088 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1107 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1089 if (pd == NULL) { 1108 if (pd == NULL) {
1090 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database " 1109 DEBUG2_3(printk("%s(%ld): failed to allocate Port Database "
1091 "structure.\n", __func__, ha->host_no)); 1110 "structure.\n", __func__, vha->host_no));
1092 return QLA_MEMORY_ALLOC_FAILED; 1111 return QLA_MEMORY_ALLOC_FAILED;
1093 } 1112 }
1094 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); 1113 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -1100,7 +1119,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1100 mcp->mb[3] = LSW(pd_dma); 1119 mcp->mb[3] = LSW(pd_dma);
1101 mcp->mb[6] = MSW(MSD(pd_dma)); 1120 mcp->mb[6] = MSW(MSD(pd_dma));
1102 mcp->mb[7] = LSW(MSD(pd_dma)); 1121 mcp->mb[7] = LSW(MSD(pd_dma));
1103 mcp->mb[9] = ha->vp_idx; 1122 mcp->mb[9] = vha->vp_idx;
1104 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1123 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1105 mcp->in_mb = MBX_0; 1124 mcp->in_mb = MBX_0;
1106 if (IS_FWI2_CAPABLE(ha)) { 1125 if (IS_FWI2_CAPABLE(ha)) {
@@ -1120,7 +1139,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1120 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE; 1139 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1121 mcp->flags = MBX_DMA_IN; 1140 mcp->flags = MBX_DMA_IN;
1122 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1141 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1123 rval = qla2x00_mailbox_command(ha, mcp); 1142 rval = qla2x00_mailbox_command(vha, mcp);
1124 if (rval != QLA_SUCCESS) 1143 if (rval != QLA_SUCCESS)
1125 goto gpd_error_out; 1144 goto gpd_error_out;
1126 1145
@@ -1132,7 +1151,7 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1132 pd24->last_login_state != PDS_PRLI_COMPLETE) { 1151 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1133 DEBUG2(printk("%s(%ld): Unable to verify " 1152 DEBUG2(printk("%s(%ld): Unable to verify "
1134 "login-state (%x/%x) for loop_id %x\n", 1153 "login-state (%x/%x) for loop_id %x\n",
1135 __func__, ha->host_no, 1154 __func__, vha->host_no,
1136 pd24->current_login_state, 1155 pd24->current_login_state,
1137 pd24->last_login_state, fcport->loop_id)); 1156 pd24->last_login_state, fcport->loop_id));
1138 rval = QLA_FUNCTION_FAILED; 1157 rval = QLA_FUNCTION_FAILED;
@@ -1192,9 +1211,9 @@ gpd_error_out:
1192 1211
1193 if (rval != QLA_SUCCESS) { 1212 if (rval != QLA_SUCCESS) {
1194 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 1213 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
1195 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1214 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1196 } else { 1215 } else {
1197 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1216 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1198 } 1217 }
1199 1218
1200 return rval; 1219 return rval;
@@ -1217,21 +1236,21 @@ gpd_error_out:
1217 * Kernel context. 1236 * Kernel context.
1218 */ 1237 */
1219int 1238int
1220qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states) 1239qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1221{ 1240{
1222 int rval; 1241 int rval;
1223 mbx_cmd_t mc; 1242 mbx_cmd_t mc;
1224 mbx_cmd_t *mcp = &mc; 1243 mbx_cmd_t *mcp = &mc;
1225 1244
1226 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n", 1245 DEBUG11(printk("qla2x00_get_firmware_state(%ld): entered.\n",
1227 ha->host_no)); 1246 vha->host_no));
1228 1247
1229 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1248 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1230 mcp->out_mb = MBX_0; 1249 mcp->out_mb = MBX_0;
1231 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1250 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1232 mcp->tov = MBX_TOV_SECONDS; 1251 mcp->tov = MBX_TOV_SECONDS;
1233 mcp->flags = 0; 1252 mcp->flags = 0;
1234 rval = qla2x00_mailbox_command(ha, mcp); 1253 rval = qla2x00_mailbox_command(vha, mcp);
1235 1254
1236 /* Return firmware states. */ 1255 /* Return firmware states. */
1237 states[0] = mcp->mb[1]; 1256 states[0] = mcp->mb[1];
@@ -1241,11 +1260,11 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
1241 if (rval != QLA_SUCCESS) { 1260 if (rval != QLA_SUCCESS) {
1242 /*EMPTY*/ 1261 /*EMPTY*/
1243 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): " 1262 DEBUG2_3_11(printk("qla2x00_get_firmware_state(%ld): "
1244 "failed=%x.\n", ha->host_no, rval)); 1263 "failed=%x.\n", vha->host_no, rval));
1245 } else { 1264 } else {
1246 /*EMPTY*/ 1265 /*EMPTY*/
1247 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n", 1266 DEBUG11(printk("qla2x00_get_firmware_state(%ld): done.\n",
1248 ha->host_no)); 1267 vha->host_no));
1249 } 1268 }
1250 1269
1251 return rval; 1270 return rval;
@@ -1270,7 +1289,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *ha, uint16_t *states)
1270 * Kernel context. 1289 * Kernel context.
1271 */ 1290 */
1272int 1291int
1273qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name, 1292qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1274 uint8_t opt) 1293 uint8_t opt)
1275{ 1294{
1276 int rval; 1295 int rval;
@@ -1278,12 +1297,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1278 mbx_cmd_t *mcp = &mc; 1297 mbx_cmd_t *mcp = &mc;
1279 1298
1280 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n", 1299 DEBUG11(printk("qla2x00_get_port_name(%ld): entered.\n",
1281 ha->host_no)); 1300 vha->host_no));
1282 1301
1283 mcp->mb[0] = MBC_GET_PORT_NAME; 1302 mcp->mb[0] = MBC_GET_PORT_NAME;
1284 mcp->mb[9] = ha->vp_idx; 1303 mcp->mb[9] = vha->vp_idx;
1285 mcp->out_mb = MBX_9|MBX_1|MBX_0; 1304 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1286 if (HAS_EXTENDED_IDS(ha)) { 1305 if (HAS_EXTENDED_IDS(vha->hw)) {
1287 mcp->mb[1] = loop_id; 1306 mcp->mb[1] = loop_id;
1288 mcp->mb[10] = opt; 1307 mcp->mb[10] = opt;
1289 mcp->out_mb |= MBX_10; 1308 mcp->out_mb |= MBX_10;
@@ -1294,12 +1313,12 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1294 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1313 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1295 mcp->tov = MBX_TOV_SECONDS; 1314 mcp->tov = MBX_TOV_SECONDS;
1296 mcp->flags = 0; 1315 mcp->flags = 0;
1297 rval = qla2x00_mailbox_command(ha, mcp); 1316 rval = qla2x00_mailbox_command(vha, mcp);
1298 1317
1299 if (rval != QLA_SUCCESS) { 1318 if (rval != QLA_SUCCESS) {
1300 /*EMPTY*/ 1319 /*EMPTY*/
1301 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n", 1320 DEBUG2_3_11(printk("qla2x00_get_port_name(%ld): failed=%x.\n",
1302 ha->host_no, rval)); 1321 vha->host_no, rval));
1303 } else { 1322 } else {
1304 if (name != NULL) { 1323 if (name != NULL) {
1305 /* This function returns name in big endian. */ 1324 /* This function returns name in big endian. */
@@ -1314,7 +1333,7 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1314 } 1333 }
1315 1334
1316 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n", 1335 DEBUG11(printk("qla2x00_get_port_name(%ld): done.\n",
1317 ha->host_no)); 1336 vha->host_no));
1318 } 1337 }
1319 1338
1320 return rval; 1339 return rval;
@@ -1336,45 +1355,45 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1336 * Kernel context. 1355 * Kernel context.
1337 */ 1356 */
1338int 1357int
1339qla2x00_lip_reset(scsi_qla_host_t *ha) 1358qla2x00_lip_reset(scsi_qla_host_t *vha)
1340{ 1359{
1341 int rval; 1360 int rval;
1342 mbx_cmd_t mc; 1361 mbx_cmd_t mc;
1343 mbx_cmd_t *mcp = &mc; 1362 mbx_cmd_t *mcp = &mc;
1344 1363
1345 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1364 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1346 1365
1347 if (IS_FWI2_CAPABLE(ha)) { 1366 if (IS_FWI2_CAPABLE(vha->hw)) {
1348 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1367 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1349 mcp->mb[1] = BIT_6; 1368 mcp->mb[1] = BIT_6;
1350 mcp->mb[2] = 0; 1369 mcp->mb[2] = 0;
1351 mcp->mb[3] = ha->loop_reset_delay; 1370 mcp->mb[3] = vha->hw->loop_reset_delay;
1352 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1371 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1353 } else { 1372 } else {
1354 mcp->mb[0] = MBC_LIP_RESET; 1373 mcp->mb[0] = MBC_LIP_RESET;
1355 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1374 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1356 if (HAS_EXTENDED_IDS(ha)) { 1375 if (HAS_EXTENDED_IDS(vha->hw)) {
1357 mcp->mb[1] = 0x00ff; 1376 mcp->mb[1] = 0x00ff;
1358 mcp->mb[10] = 0; 1377 mcp->mb[10] = 0;
1359 mcp->out_mb |= MBX_10; 1378 mcp->out_mb |= MBX_10;
1360 } else { 1379 } else {
1361 mcp->mb[1] = 0xff00; 1380 mcp->mb[1] = 0xff00;
1362 } 1381 }
1363 mcp->mb[2] = ha->loop_reset_delay; 1382 mcp->mb[2] = vha->hw->loop_reset_delay;
1364 mcp->mb[3] = 0; 1383 mcp->mb[3] = 0;
1365 } 1384 }
1366 mcp->in_mb = MBX_0; 1385 mcp->in_mb = MBX_0;
1367 mcp->tov = MBX_TOV_SECONDS; 1386 mcp->tov = MBX_TOV_SECONDS;
1368 mcp->flags = 0; 1387 mcp->flags = 0;
1369 rval = qla2x00_mailbox_command(ha, mcp); 1388 rval = qla2x00_mailbox_command(vha, mcp);
1370 1389
1371 if (rval != QLA_SUCCESS) { 1390 if (rval != QLA_SUCCESS) {
1372 /*EMPTY*/ 1391 /*EMPTY*/
1373 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", 1392 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n",
1374 __func__, ha->host_no, rval)); 1393 __func__, vha->host_no, rval));
1375 } else { 1394 } else {
1376 /*EMPTY*/ 1395 /*EMPTY*/
1377 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1396 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1378 } 1397 }
1379 1398
1380 return rval; 1399 return rval;
@@ -1399,7 +1418,7 @@ qla2x00_lip_reset(scsi_qla_host_t *ha)
1399 * Kernel context. 1418 * Kernel context.
1400 */ 1419 */
1401int 1420int
1402qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address, 1421qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1403 uint16_t cmd_size, size_t buf_size) 1422 uint16_t cmd_size, size_t buf_size)
1404{ 1423{
1405 int rval; 1424 int rval;
@@ -1407,10 +1426,11 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1407 mbx_cmd_t *mcp = &mc; 1426 mbx_cmd_t *mcp = &mc;
1408 1427
1409 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n", 1428 DEBUG11(printk("qla2x00_send_sns(%ld): entered.\n",
1410 ha->host_no)); 1429 vha->host_no));
1411 1430
1412 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total " 1431 DEBUG11(printk("qla2x00_send_sns: retry cnt=%d ratov=%d total "
1413 "tov=%d.\n", ha->retry_count, ha->login_timeout, mcp->tov)); 1432 "tov=%d.\n", vha->hw->retry_count, vha->hw->login_timeout,
1433 mcp->tov));
1414 1434
1415 mcp->mb[0] = MBC_SEND_SNS_COMMAND; 1435 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1416 mcp->mb[1] = cmd_size; 1436 mcp->mb[1] = cmd_size;
@@ -1422,25 +1442,25 @@ qla2x00_send_sns(scsi_qla_host_t *ha, dma_addr_t sns_phys_address,
1422 mcp->in_mb = MBX_0|MBX_1; 1442 mcp->in_mb = MBX_0|MBX_1;
1423 mcp->buf_size = buf_size; 1443 mcp->buf_size = buf_size;
1424 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN; 1444 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1425 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1445 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1426 rval = qla2x00_mailbox_command(ha, mcp); 1446 rval = qla2x00_mailbox_command(vha, mcp);
1427 1447
1428 if (rval != QLA_SUCCESS) { 1448 if (rval != QLA_SUCCESS) {
1429 /*EMPTY*/ 1449 /*EMPTY*/
1430 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1450 DEBUG(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1431 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1451 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1432 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x " 1452 DEBUG2_3_11(printk("qla2x00_send_sns(%ld): failed=%x mb[0]=%x "
1433 "mb[1]=%x.\n", ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 1453 "mb[1]=%x.\n", vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
1434 } else { 1454 } else {
1435 /*EMPTY*/ 1455 /*EMPTY*/
1436 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", ha->host_no)); 1456 DEBUG11(printk("qla2x00_send_sns(%ld): done.\n", vha->host_no));
1437 } 1457 }
1438 1458
1439 return rval; 1459 return rval;
1440} 1460}
1441 1461
1442int 1462int
1443qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1463qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1444 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 1464 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1445{ 1465{
1446 int rval; 1466 int rval;
@@ -1448,13 +1468,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1448 struct logio_entry_24xx *lg; 1468 struct logio_entry_24xx *lg;
1449 dma_addr_t lg_dma; 1469 dma_addr_t lg_dma;
1450 uint32_t iop[2]; 1470 uint32_t iop[2];
1471 struct qla_hw_data *ha = vha->hw;
1451 1472
1452 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1473 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1453 1474
1454 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1475 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1455 if (lg == NULL) { 1476 if (lg == NULL) {
1456 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", 1477 DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
1457 __func__, ha->host_no)); 1478 __func__, vha->host_no));
1458 return QLA_MEMORY_ALLOC_FAILED; 1479 return QLA_MEMORY_ALLOC_FAILED;
1459 } 1480 }
1460 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1481 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1470,14 +1491,14 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1470 lg->port_id[0] = al_pa; 1491 lg->port_id[0] = al_pa;
1471 lg->port_id[1] = area; 1492 lg->port_id[1] = area;
1472 lg->port_id[2] = domain; 1493 lg->port_id[2] = domain;
1473 lg->vp_index = ha->vp_idx; 1494 lg->vp_index = vha->vp_idx;
1474 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1495 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1475 if (rval != QLA_SUCCESS) { 1496 if (rval != QLA_SUCCESS) {
1476 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1497 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
1477 "(%x).\n", __func__, ha->host_no, rval)); 1498 "(%x).\n", __func__, vha->host_no, rval));
1478 } else if (lg->entry_status != 0) { 1499 } else if (lg->entry_status != 0) {
1479 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1500 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1480 "-- error status (%x).\n", __func__, ha->host_no, 1501 "-- error status (%x).\n", __func__, vha->host_no,
1481 lg->entry_status)); 1502 lg->entry_status));
1482 rval = QLA_FUNCTION_FAILED; 1503 rval = QLA_FUNCTION_FAILED;
1483 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1504 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
@@ -1486,7 +1507,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1486 1507
1487 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1508 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1488 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1509 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1489 ha->host_no, le16_to_cpu(lg->comp_status), iop[0], 1510 vha->host_no, le16_to_cpu(lg->comp_status), iop[0],
1490 iop[1])); 1511 iop[1]));
1491 1512
1492 switch (iop[0]) { 1513 switch (iop[0]) {
@@ -1515,7 +1536,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1515 break; 1536 break;
1516 } 1537 }
1517 } else { 1538 } else {
1518 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1539 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1519 1540
1520 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1541 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1521 1542
@@ -1562,14 +1583,15 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1562 * Kernel context. 1583 * Kernel context.
1563 */ 1584 */
1564int 1585int
1565qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1586qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1566 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt) 1587 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1567{ 1588{
1568 int rval; 1589 int rval;
1569 mbx_cmd_t mc; 1590 mbx_cmd_t mc;
1570 mbx_cmd_t *mcp = &mc; 1591 mbx_cmd_t *mcp = &mc;
1592 struct qla_hw_data *ha = vha->hw;
1571 1593
1572 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", ha->host_no)); 1594 DEBUG11(printk("qla2x00_login_fabric(%ld): entered.\n", vha->host_no));
1573 1595
1574 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1596 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1575 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1597 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1586,7 +1608,7 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1586 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0; 1608 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1587 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1609 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1588 mcp->flags = 0; 1610 mcp->flags = 0;
1589 rval = qla2x00_mailbox_command(ha, mcp); 1611 rval = qla2x00_mailbox_command(vha, mcp);
1590 1612
1591 /* Return mailbox statuses. */ 1613 /* Return mailbox statuses. */
1592 if (mb != NULL) { 1614 if (mb != NULL) {
@@ -1613,12 +1635,12 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1613 1635
1614 /*EMPTY*/ 1636 /*EMPTY*/
1615 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x " 1637 DEBUG2_3_11(printk("qla2x00_login_fabric(%ld): failed=%x "
1616 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", ha->host_no, rval, 1638 "mb[0]=%x mb[1]=%x mb[2]=%x.\n", vha->host_no, rval,
1617 mcp->mb[0], mcp->mb[1], mcp->mb[2])); 1639 mcp->mb[0], mcp->mb[1], mcp->mb[2]));
1618 } else { 1640 } else {
1619 /*EMPTY*/ 1641 /*EMPTY*/
1620 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n", 1642 DEBUG11(printk("qla2x00_login_fabric(%ld): done.\n",
1621 ha->host_no)); 1643 vha->host_no));
1622 } 1644 }
1623 1645
1624 return rval; 1646 return rval;
@@ -1641,19 +1663,20 @@ qla2x00_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1641 * 1663 *
1642 */ 1664 */
1643int 1665int
1644qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport, 1666qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1645 uint16_t *mb_ret, uint8_t opt) 1667 uint16_t *mb_ret, uint8_t opt)
1646{ 1668{
1647 int rval; 1669 int rval;
1648 mbx_cmd_t mc; 1670 mbx_cmd_t mc;
1649 mbx_cmd_t *mcp = &mc; 1671 mbx_cmd_t *mcp = &mc;
1672 struct qla_hw_data *ha = vha->hw;
1650 1673
1651 if (IS_FWI2_CAPABLE(ha)) 1674 if (IS_FWI2_CAPABLE(ha))
1652 return qla24xx_login_fabric(ha, fcport->loop_id, 1675 return qla24xx_login_fabric(vha, fcport->loop_id,
1653 fcport->d_id.b.domain, fcport->d_id.b.area, 1676 fcport->d_id.b.domain, fcport->d_id.b.area,
1654 fcport->d_id.b.al_pa, mb_ret, opt); 1677 fcport->d_id.b.al_pa, mb_ret, opt);
1655 1678
1656 DEBUG3(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1679 DEBUG3(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1657 1680
1658 mcp->mb[0] = MBC_LOGIN_LOOP_PORT; 1681 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1659 if (HAS_EXTENDED_IDS(ha)) 1682 if (HAS_EXTENDED_IDS(ha))
@@ -1665,7 +1688,7 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1665 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0; 1688 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
1666 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 1689 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1667 mcp->flags = 0; 1690 mcp->flags = 0;
1668 rval = qla2x00_mailbox_command(ha, mcp); 1691 rval = qla2x00_mailbox_command(vha, mcp);
1669 1692
1670 /* Return mailbox statuses. */ 1693 /* Return mailbox statuses. */
1671 if (mb_ret != NULL) { 1694 if (mb_ret != NULL) {
@@ -1686,33 +1709,34 @@ qla2x00_login_local_device(scsi_qla_host_t *ha, fc_port_t *fcport,
1686 rval = QLA_SUCCESS; 1709 rval = QLA_SUCCESS;
1687 1710
1688 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1711 DEBUG(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1689 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1712 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1690 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1713 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1691 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x " 1714 DEBUG2_3(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x "
1692 "mb[6]=%x mb[7]=%x.\n", __func__, ha->host_no, rval, 1715 "mb[6]=%x mb[7]=%x.\n", __func__, vha->host_no, rval,
1693 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7])); 1716 mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]));
1694 } else { 1717 } else {
1695 /*EMPTY*/ 1718 /*EMPTY*/
1696 DEBUG3(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1719 DEBUG3(printk("%s(%ld): done.\n", __func__, vha->host_no));
1697 } 1720 }
1698 1721
1699 return (rval); 1722 return (rval);
1700} 1723}
1701 1724
1702int 1725int
1703qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1726qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1704 uint8_t area, uint8_t al_pa) 1727 uint8_t area, uint8_t al_pa)
1705{ 1728{
1706 int rval; 1729 int rval;
1707 struct logio_entry_24xx *lg; 1730 struct logio_entry_24xx *lg;
1708 dma_addr_t lg_dma; 1731 dma_addr_t lg_dma;
1732 struct qla_hw_data *ha = vha->hw;
1709 1733
1710 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1734 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1711 1735
1712 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1736 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1713 if (lg == NULL) { 1737 if (lg == NULL) {
1714 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n", 1738 DEBUG2_3(printk("%s(%ld): failed to allocate Logout IOCB.\n",
1715 __func__, ha->host_no)); 1739 __func__, vha->host_no));
1716 return QLA_MEMORY_ALLOC_FAILED; 1740 return QLA_MEMORY_ALLOC_FAILED;
1717 } 1741 }
1718 memset(lg, 0, sizeof(struct logio_entry_24xx)); 1742 memset(lg, 0, sizeof(struct logio_entry_24xx));
@@ -1725,25 +1749,26 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1725 lg->port_id[0] = al_pa; 1749 lg->port_id[0] = al_pa;
1726 lg->port_id[1] = area; 1750 lg->port_id[1] = area;
1727 lg->port_id[2] = domain; 1751 lg->port_id[2] = domain;
1728 lg->vp_index = ha->vp_idx; 1752 lg->vp_index = vha->vp_idx;
1729 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1753
1754 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1730 if (rval != QLA_SUCCESS) { 1755 if (rval != QLA_SUCCESS) {
1731 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1756 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
1732 "(%x).\n", __func__, ha->host_no, rval)); 1757 "(%x).\n", __func__, vha->host_no, rval));
1733 } else if (lg->entry_status != 0) { 1758 } else if (lg->entry_status != 0) {
1734 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1759 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
1735 "-- error status (%x).\n", __func__, ha->host_no, 1760 "-- error status (%x).\n", __func__, vha->host_no,
1736 lg->entry_status)); 1761 lg->entry_status));
1737 rval = QLA_FUNCTION_FAILED; 1762 rval = QLA_FUNCTION_FAILED;
1738 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1763 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1739 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 1764 DEBUG2_3_11(printk("%s(%ld %d): failed to complete IOCB "
1740 "-- completion status (%x) ioparam=%x/%x.\n", __func__, 1765 "-- completion status (%x) ioparam=%x/%x.\n", __func__,
1741 ha->host_no, le16_to_cpu(lg->comp_status), 1766 vha->host_no, vha->vp_idx, le16_to_cpu(lg->comp_status),
1742 le32_to_cpu(lg->io_parameter[0]), 1767 le32_to_cpu(lg->io_parameter[0]),
1743 le32_to_cpu(lg->io_parameter[1]))); 1768 le32_to_cpu(lg->io_parameter[1])));
1744 } else { 1769 } else {
1745 /*EMPTY*/ 1770 /*EMPTY*/
1746 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 1771 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
1747 } 1772 }
1748 1773
1749 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1774 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1768,7 +1793,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1768 * Kernel context. 1793 * Kernel context.
1769 */ 1794 */
1770int 1795int
1771qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain, 1796qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1772 uint8_t area, uint8_t al_pa) 1797 uint8_t area, uint8_t al_pa)
1773{ 1798{
1774 int rval; 1799 int rval;
@@ -1776,11 +1801,11 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1776 mbx_cmd_t *mcp = &mc; 1801 mbx_cmd_t *mcp = &mc;
1777 1802
1778 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n", 1803 DEBUG11(printk("qla2x00_fabric_logout(%ld): entered.\n",
1779 ha->host_no)); 1804 vha->host_no));
1780 1805
1781 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 1806 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1782 mcp->out_mb = MBX_1|MBX_0; 1807 mcp->out_mb = MBX_1|MBX_0;
1783 if (HAS_EXTENDED_IDS(ha)) { 1808 if (HAS_EXTENDED_IDS(vha->hw)) {
1784 mcp->mb[1] = loop_id; 1809 mcp->mb[1] = loop_id;
1785 mcp->mb[10] = 0; 1810 mcp->mb[10] = 0;
1786 mcp->out_mb |= MBX_10; 1811 mcp->out_mb |= MBX_10;
@@ -1791,16 +1816,16 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1791 mcp->in_mb = MBX_1|MBX_0; 1816 mcp->in_mb = MBX_1|MBX_0;
1792 mcp->tov = MBX_TOV_SECONDS; 1817 mcp->tov = MBX_TOV_SECONDS;
1793 mcp->flags = 0; 1818 mcp->flags = 0;
1794 rval = qla2x00_mailbox_command(ha, mcp); 1819 rval = qla2x00_mailbox_command(vha, mcp);
1795 1820
1796 if (rval != QLA_SUCCESS) { 1821 if (rval != QLA_SUCCESS) {
1797 /*EMPTY*/ 1822 /*EMPTY*/
1798 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x " 1823 DEBUG2_3_11(printk("qla2x00_fabric_logout(%ld): failed=%x "
1799 "mbx1=%x.\n", ha->host_no, rval, mcp->mb[1])); 1824 "mbx1=%x.\n", vha->host_no, rval, mcp->mb[1]));
1800 } else { 1825 } else {
1801 /*EMPTY*/ 1826 /*EMPTY*/
1802 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n", 1827 DEBUG11(printk("qla2x00_fabric_logout(%ld): done.\n",
1803 ha->host_no)); 1828 vha->host_no));
1804 } 1829 }
1805 1830
1806 return rval; 1831 return rval;
@@ -1822,33 +1847,33 @@ qla2x00_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1822 * Kernel context. 1847 * Kernel context.
1823 */ 1848 */
1824int 1849int
1825qla2x00_full_login_lip(scsi_qla_host_t *ha) 1850qla2x00_full_login_lip(scsi_qla_host_t *vha)
1826{ 1851{
1827 int rval; 1852 int rval;
1828 mbx_cmd_t mc; 1853 mbx_cmd_t mc;
1829 mbx_cmd_t *mcp = &mc; 1854 mbx_cmd_t *mcp = &mc;
1830 1855
1831 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", 1856 DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
1832 ha->host_no)); 1857 vha->host_no));
1833 1858
1834 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1859 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1835 mcp->mb[1] = IS_FWI2_CAPABLE(ha) ? BIT_3: 0; 1860 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
1836 mcp->mb[2] = 0; 1861 mcp->mb[2] = 0;
1837 mcp->mb[3] = 0; 1862 mcp->mb[3] = 0;
1838 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1863 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1839 mcp->in_mb = MBX_0; 1864 mcp->in_mb = MBX_0;
1840 mcp->tov = MBX_TOV_SECONDS; 1865 mcp->tov = MBX_TOV_SECONDS;
1841 mcp->flags = 0; 1866 mcp->flags = 0;
1842 rval = qla2x00_mailbox_command(ha, mcp); 1867 rval = qla2x00_mailbox_command(vha, mcp);
1843 1868
1844 if (rval != QLA_SUCCESS) { 1869 if (rval != QLA_SUCCESS) {
1845 /*EMPTY*/ 1870 /*EMPTY*/
1846 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n", 1871 DEBUG2_3_11(printk("qla2x00_full_login_lip(%ld): failed=%x.\n",
1847 ha->host_no, rval)); 1872 vha->host_no, rval));
1848 } else { 1873 } else {
1849 /*EMPTY*/ 1874 /*EMPTY*/
1850 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n", 1875 DEBUG11(printk("qla2x00_full_login_lip(%ld): done.\n",
1851 ha->host_no)); 1876 vha->host_no));
1852 } 1877 }
1853 1878
1854 return rval; 1879 return rval;
@@ -1867,7 +1892,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *ha)
1867 * Kernel context. 1892 * Kernel context.
1868 */ 1893 */
1869int 1894int
1870qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma, 1895qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
1871 uint16_t *entries) 1896 uint16_t *entries)
1872{ 1897{
1873 int rval; 1898 int rval;
@@ -1875,20 +1900,20 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1875 mbx_cmd_t *mcp = &mc; 1900 mbx_cmd_t *mcp = &mc;
1876 1901
1877 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n", 1902 DEBUG11(printk("qla2x00_get_id_list(%ld): entered.\n",
1878 ha->host_no)); 1903 vha->host_no));
1879 1904
1880 if (id_list == NULL) 1905 if (id_list == NULL)
1881 return QLA_FUNCTION_FAILED; 1906 return QLA_FUNCTION_FAILED;
1882 1907
1883 mcp->mb[0] = MBC_GET_ID_LIST; 1908 mcp->mb[0] = MBC_GET_ID_LIST;
1884 mcp->out_mb = MBX_0; 1909 mcp->out_mb = MBX_0;
1885 if (IS_FWI2_CAPABLE(ha)) { 1910 if (IS_FWI2_CAPABLE(vha->hw)) {
1886 mcp->mb[2] = MSW(id_list_dma); 1911 mcp->mb[2] = MSW(id_list_dma);
1887 mcp->mb[3] = LSW(id_list_dma); 1912 mcp->mb[3] = LSW(id_list_dma);
1888 mcp->mb[6] = MSW(MSD(id_list_dma)); 1913 mcp->mb[6] = MSW(MSD(id_list_dma));
1889 mcp->mb[7] = LSW(MSD(id_list_dma)); 1914 mcp->mb[7] = LSW(MSD(id_list_dma));
1890 mcp->mb[8] = 0; 1915 mcp->mb[8] = 0;
1891 mcp->mb[9] = ha->vp_idx; 1916 mcp->mb[9] = vha->vp_idx;
1892 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 1917 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1893 } else { 1918 } else {
1894 mcp->mb[1] = MSW(id_list_dma); 1919 mcp->mb[1] = MSW(id_list_dma);
@@ -1900,16 +1925,16 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1900 mcp->in_mb = MBX_1|MBX_0; 1925 mcp->in_mb = MBX_1|MBX_0;
1901 mcp->tov = MBX_TOV_SECONDS; 1926 mcp->tov = MBX_TOV_SECONDS;
1902 mcp->flags = 0; 1927 mcp->flags = 0;
1903 rval = qla2x00_mailbox_command(ha, mcp); 1928 rval = qla2x00_mailbox_command(vha, mcp);
1904 1929
1905 if (rval != QLA_SUCCESS) { 1930 if (rval != QLA_SUCCESS) {
1906 /*EMPTY*/ 1931 /*EMPTY*/
1907 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n", 1932 DEBUG2_3_11(printk("qla2x00_get_id_list(%ld): failed=%x.\n",
1908 ha->host_no, rval)); 1933 vha->host_no, rval));
1909 } else { 1934 } else {
1910 *entries = mcp->mb[1]; 1935 *entries = mcp->mb[1];
1911 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n", 1936 DEBUG11(printk("qla2x00_get_id_list(%ld): done.\n",
1912 ha->host_no)); 1937 vha->host_no));
1913 } 1938 }
1914 1939
1915 return rval; 1940 return rval;
@@ -1929,7 +1954,7 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1929 * Kernel context. 1954 * Kernel context.
1930 */ 1955 */
1931int 1956int
1932qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt, 1957qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
1933 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt, 1958 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
1934 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports) 1959 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports)
1935{ 1960{
@@ -1937,22 +1962,22 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1937 mbx_cmd_t mc; 1962 mbx_cmd_t mc;
1938 mbx_cmd_t *mcp = &mc; 1963 mbx_cmd_t *mcp = &mc;
1939 1964
1940 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 1965 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
1941 1966
1942 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 1967 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
1943 mcp->out_mb = MBX_0; 1968 mcp->out_mb = MBX_0;
1944 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1969 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1945 mcp->tov = MBX_TOV_SECONDS; 1970 mcp->tov = MBX_TOV_SECONDS;
1946 mcp->flags = 0; 1971 mcp->flags = 0;
1947 rval = qla2x00_mailbox_command(ha, mcp); 1972 rval = qla2x00_mailbox_command(vha, mcp);
1948 1973
1949 if (rval != QLA_SUCCESS) { 1974 if (rval != QLA_SUCCESS) {
1950 /*EMPTY*/ 1975 /*EMPTY*/
1951 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__, 1976 DEBUG2_3_11(printk("%s(%ld): failed = %x.\n", __func__,
1952 ha->host_no, mcp->mb[0])); 1977 vha->host_no, mcp->mb[0]));
1953 } else { 1978 } else {
1954 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x " 1979 DEBUG11(printk("%s(%ld): done. mb1=%x mb2=%x mb3=%x mb6=%x "
1955 "mb7=%x mb10=%x mb11=%x.\n", __func__, ha->host_no, 1980 "mb7=%x mb10=%x mb11=%x.\n", __func__, vha->host_no,
1956 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7], 1981 mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[6], mcp->mb[7],
1957 mcp->mb[10], mcp->mb[11])); 1982 mcp->mb[10], mcp->mb[11]));
1958 1983
@@ -1964,7 +1989,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1964 *cur_iocb_cnt = mcp->mb[7]; 1989 *cur_iocb_cnt = mcp->mb[7];
1965 if (orig_iocb_cnt) 1990 if (orig_iocb_cnt)
1966 *orig_iocb_cnt = mcp->mb[10]; 1991 *orig_iocb_cnt = mcp->mb[10];
1967 if (ha->flags.npiv_supported && max_npiv_vports) 1992 if (vha->hw->flags.npiv_supported && max_npiv_vports)
1968 *max_npiv_vports = mcp->mb[11]; 1993 *max_npiv_vports = mcp->mb[11];
1969 } 1994 }
1970 1995
@@ -1987,18 +2012,19 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *ha, uint16_t *cur_xchg_cnt,
1987 * Kernel context. 2012 * Kernel context.
1988 */ 2013 */
1989int 2014int
1990qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map) 2015qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
1991{ 2016{
1992 int rval; 2017 int rval;
1993 mbx_cmd_t mc; 2018 mbx_cmd_t mc;
1994 mbx_cmd_t *mcp = &mc; 2019 mbx_cmd_t *mcp = &mc;
1995 char *pmap; 2020 char *pmap;
1996 dma_addr_t pmap_dma; 2021 dma_addr_t pmap_dma;
2022 struct qla_hw_data *ha = vha->hw;
1997 2023
1998 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2024 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
1999 if (pmap == NULL) { 2025 if (pmap == NULL) {
2000 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****", 2026 DEBUG2_3_11(printk("%s(%ld): **** Mem Alloc Failed ****",
2001 __func__, ha->host_no)); 2027 __func__, vha->host_no));
2002 return QLA_MEMORY_ALLOC_FAILED; 2028 return QLA_MEMORY_ALLOC_FAILED;
2003 } 2029 }
2004 memset(pmap, 0, FCAL_MAP_SIZE); 2030 memset(pmap, 0, FCAL_MAP_SIZE);
@@ -2013,11 +2039,11 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2013 mcp->buf_size = FCAL_MAP_SIZE; 2039 mcp->buf_size = FCAL_MAP_SIZE;
2014 mcp->flags = MBX_DMA_IN; 2040 mcp->flags = MBX_DMA_IN;
2015 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2); 2041 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2016 rval = qla2x00_mailbox_command(ha, mcp); 2042 rval = qla2x00_mailbox_command(vha, mcp);
2017 2043
2018 if (rval == QLA_SUCCESS) { 2044 if (rval == QLA_SUCCESS) {
2019 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map " 2045 DEBUG11(printk("%s(%ld): (mb0=%x/mb1=%x) FC/AL Position Map "
2020 "size (%x)\n", __func__, ha->host_no, mcp->mb[0], 2046 "size (%x)\n", __func__, vha->host_no, mcp->mb[0],
2021 mcp->mb[1], (unsigned)pmap[0])); 2047 mcp->mb[1], (unsigned)pmap[0]));
2022 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1)); 2048 DEBUG11(qla2x00_dump_buffer(pmap, pmap[0] + 1));
2023 2049
@@ -2028,9 +2054,9 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2028 2054
2029 if (rval != QLA_SUCCESS) { 2055 if (rval != QLA_SUCCESS) {
2030 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2056 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2031 ha->host_no, rval)); 2057 vha->host_no, rval));
2032 } else { 2058 } else {
2033 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2059 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2034 } 2060 }
2035 2061
2036 return rval; 2062 return rval;
@@ -2051,15 +2077,16 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map)
2051 * BIT_1 = mailbox error. 2077 * BIT_1 = mailbox error.
2052 */ 2078 */
2053int 2079int
2054qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id, 2080qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2055 struct link_statistics *stats, dma_addr_t stats_dma) 2081 struct link_statistics *stats, dma_addr_t stats_dma)
2056{ 2082{
2057 int rval; 2083 int rval;
2058 mbx_cmd_t mc; 2084 mbx_cmd_t mc;
2059 mbx_cmd_t *mcp = &mc; 2085 mbx_cmd_t *mcp = &mc;
2060 uint32_t *siter, *diter, dwords; 2086 uint32_t *siter, *diter, dwords;
2087 struct qla_hw_data *ha = vha->hw;
2061 2088
2062 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2089 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2063 2090
2064 mcp->mb[0] = MBC_GET_LINK_STATUS; 2091 mcp->mb[0] = MBC_GET_LINK_STATUS;
2065 mcp->mb[2] = MSW(stats_dma); 2092 mcp->mb[2] = MSW(stats_dma);
@@ -2084,12 +2111,12 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2084 } 2111 }
2085 mcp->tov = MBX_TOV_SECONDS; 2112 mcp->tov = MBX_TOV_SECONDS;
2086 mcp->flags = IOCTL_CMD; 2113 mcp->flags = IOCTL_CMD;
2087 rval = qla2x00_mailbox_command(ha, mcp); 2114 rval = qla2x00_mailbox_command(vha, mcp);
2088 2115
2089 if (rval == QLA_SUCCESS) { 2116 if (rval == QLA_SUCCESS) {
2090 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2117 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2091 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2118 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2092 __func__, ha->host_no, mcp->mb[0])); 2119 __func__, vha->host_no, mcp->mb[0]));
2093 rval = QLA_FUNCTION_FAILED; 2120 rval = QLA_FUNCTION_FAILED;
2094 } else { 2121 } else {
2095 /* Copy over data -- firmware data is LE. */ 2122 /* Copy over data -- firmware data is LE. */
@@ -2101,14 +2128,14 @@ qla2x00_get_link_status(scsi_qla_host_t *ha, uint16_t loop_id,
2101 } else { 2128 } else {
2102 /* Failed. */ 2129 /* Failed. */
2103 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2130 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2104 ha->host_no, rval)); 2131 vha->host_no, rval));
2105 } 2132 }
2106 2133
2107 return rval; 2134 return rval;
2108} 2135}
2109 2136
2110int 2137int
2111qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats, 2138qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2112 dma_addr_t stats_dma) 2139 dma_addr_t stats_dma)
2113{ 2140{
2114 int rval; 2141 int rval;
@@ -2116,7 +2143,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2116 mbx_cmd_t *mcp = &mc; 2143 mbx_cmd_t *mcp = &mc;
2117 uint32_t *siter, *diter, dwords; 2144 uint32_t *siter, *diter, dwords;
2118 2145
2119 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2146 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2120 2147
2121 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2148 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2122 mcp->mb[2] = MSW(stats_dma); 2149 mcp->mb[2] = MSW(stats_dma);
@@ -2124,18 +2151,18 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2124 mcp->mb[6] = MSW(MSD(stats_dma)); 2151 mcp->mb[6] = MSW(MSD(stats_dma));
2125 mcp->mb[7] = LSW(MSD(stats_dma)); 2152 mcp->mb[7] = LSW(MSD(stats_dma));
2126 mcp->mb[8] = sizeof(struct link_statistics) / 4; 2153 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2127 mcp->mb[9] = ha->vp_idx; 2154 mcp->mb[9] = vha->vp_idx;
2128 mcp->mb[10] = 0; 2155 mcp->mb[10] = 0;
2129 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 2156 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2130 mcp->in_mb = MBX_2|MBX_1|MBX_0; 2157 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2131 mcp->tov = MBX_TOV_SECONDS; 2158 mcp->tov = MBX_TOV_SECONDS;
2132 mcp->flags = IOCTL_CMD; 2159 mcp->flags = IOCTL_CMD;
2133 rval = qla2x00_mailbox_command(ha, mcp); 2160 rval = qla2x00_mailbox_command(vha, mcp);
2134 2161
2135 if (rval == QLA_SUCCESS) { 2162 if (rval == QLA_SUCCESS) {
2136 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2163 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2137 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n", 2164 DEBUG2_3_11(printk("%s(%ld): cmd failed. mbx0=%x.\n",
2138 __func__, ha->host_no, mcp->mb[0])); 2165 __func__, vha->host_no, mcp->mb[0]));
2139 rval = QLA_FUNCTION_FAILED; 2166 rval = QLA_FUNCTION_FAILED;
2140 } else { 2167 } else {
2141 /* Copy over data -- firmware data is LE. */ 2168 /* Copy over data -- firmware data is LE. */
@@ -2147,14 +2174,14 @@ qla24xx_get_isp_stats(scsi_qla_host_t *ha, struct link_statistics *stats,
2147 } else { 2174 } else {
2148 /* Failed. */ 2175 /* Failed. */
2149 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2176 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2150 ha->host_no, rval)); 2177 vha->host_no, rval));
2151 } 2178 }
2152 2179
2153 return rval; 2180 return rval;
2154} 2181}
2155 2182
2156int 2183int
2157qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp) 2184qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
2158{ 2185{
2159 int rval; 2186 int rval;
2160 fc_port_t *fcport; 2187 fc_port_t *fcport;
@@ -2163,18 +2190,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2163 struct abort_entry_24xx *abt; 2190 struct abort_entry_24xx *abt;
2164 dma_addr_t abt_dma; 2191 dma_addr_t abt_dma;
2165 uint32_t handle; 2192 uint32_t handle;
2166 scsi_qla_host_t *pha = to_qla_parent(ha); 2193 struct qla_hw_data *ha = vha->hw;
2167 2194
2168 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2195 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2169 2196
2170 fcport = sp->fcport; 2197 fcport = sp->fcport;
2171 2198
2172 spin_lock_irqsave(&pha->hardware_lock, flags); 2199 spin_lock_irqsave(&ha->hardware_lock, flags);
2173 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2200 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2174 if (pha->outstanding_cmds[handle] == sp) 2201 if (req->outstanding_cmds[handle] == sp)
2175 break; 2202 break;
2176 } 2203 }
2177 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2178 if (handle == MAX_OUTSTANDING_COMMANDS) { 2205 if (handle == MAX_OUTSTANDING_COMMANDS) {
2179 /* Command not found. */ 2206 /* Command not found. */
2180 return QLA_FUNCTION_FAILED; 2207 return QLA_FUNCTION_FAILED;
@@ -2183,7 +2210,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2183 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); 2210 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2184 if (abt == NULL) { 2211 if (abt == NULL) {
2185 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n", 2212 DEBUG2_3(printk("%s(%ld): failed to allocate Abort IOCB.\n",
2186 __func__, ha->host_no)); 2213 __func__, vha->host_no));
2187 return QLA_MEMORY_ALLOC_FAILED; 2214 return QLA_MEMORY_ALLOC_FAILED;
2188 } 2215 }
2189 memset(abt, 0, sizeof(struct abort_entry_24xx)); 2216 memset(abt, 0, sizeof(struct abort_entry_24xx));
@@ -2196,22 +2223,25 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2196 abt->port_id[1] = fcport->d_id.b.area; 2223 abt->port_id[1] = fcport->d_id.b.area;
2197 abt->port_id[2] = fcport->d_id.b.domain; 2224 abt->port_id[2] = fcport->d_id.b.domain;
2198 abt->vp_index = fcport->vp_idx; 2225 abt->vp_index = fcport->vp_idx;
2199 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0); 2226
2227 abt->req_que_no = cpu_to_le16(req->id);
2228
2229 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2200 if (rval != QLA_SUCCESS) { 2230 if (rval != QLA_SUCCESS) {
2201 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2231 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
2202 __func__, ha->host_no, rval)); 2232 __func__, vha->host_no, rval));
2203 } else if (abt->entry_status != 0) { 2233 } else if (abt->entry_status != 0) {
2204 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2234 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2205 "-- error status (%x).\n", __func__, ha->host_no, 2235 "-- error status (%x).\n", __func__, vha->host_no,
2206 abt->entry_status)); 2236 abt->entry_status));
2207 rval = QLA_FUNCTION_FAILED; 2237 rval = QLA_FUNCTION_FAILED;
2208 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2238 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2209 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2239 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2210 "-- completion status (%x).\n", __func__, ha->host_no, 2240 "-- completion status (%x).\n", __func__, vha->host_no,
2211 le16_to_cpu(abt->nport_handle))); 2241 le16_to_cpu(abt->nport_handle)));
2212 rval = QLA_FUNCTION_FAILED; 2242 rval = QLA_FUNCTION_FAILED;
2213 } else { 2243 } else {
2214 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2244 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2215 } 2245 }
2216 2246
2217 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2247 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2233,16 +2263,21 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2233 int rval, rval2; 2263 int rval, rval2;
2234 struct tsk_mgmt_cmd *tsk; 2264 struct tsk_mgmt_cmd *tsk;
2235 dma_addr_t tsk_dma; 2265 dma_addr_t tsk_dma;
2236 scsi_qla_host_t *ha, *pha; 2266 scsi_qla_host_t *vha;
2267 struct qla_hw_data *ha;
2268 struct req_que *req;
2269 struct rsp_que *rsp;
2237 2270
2238 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 2271 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
2239 2272
2240 ha = fcport->ha; 2273 vha = fcport->vha;
2241 pha = to_qla_parent(ha); 2274 ha = vha->hw;
2242 tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2275 req = ha->req_q_map[0];
2276 rsp = ha->rsp_q_map[0];
2277 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2243 if (tsk == NULL) { 2278 if (tsk == NULL) {
2244 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2279 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
2245 "IOCB.\n", __func__, ha->host_no)); 2280 "IOCB.\n", __func__, vha->host_no));
2246 return QLA_MEMORY_ALLOC_FAILED; 2281 return QLA_MEMORY_ALLOC_FAILED;
2247 } 2282 }
2248 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd)); 2283 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
@@ -2262,34 +2297,34 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2262 sizeof(tsk->p.tsk.lun)); 2297 sizeof(tsk->p.tsk.lun));
2263 } 2298 }
2264 2299
2265 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); 2300 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2266 if (rval != QLA_SUCCESS) { 2301 if (rval != QLA_SUCCESS) {
2267 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB " 2302 DEBUG2_3_11(printk("%s(%ld): failed to issue %s Reset IOCB "
2268 "(%x).\n", __func__, ha->host_no, name, rval)); 2303 "(%x).\n", __func__, vha->host_no, name, rval));
2269 } else if (tsk->p.sts.entry_status != 0) { 2304 } else if (tsk->p.sts.entry_status != 0) {
2270 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2305 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2271 "-- error status (%x).\n", __func__, ha->host_no, 2306 "-- error status (%x).\n", __func__, vha->host_no,
2272 tsk->p.sts.entry_status)); 2307 tsk->p.sts.entry_status));
2273 rval = QLA_FUNCTION_FAILED; 2308 rval = QLA_FUNCTION_FAILED;
2274 } else if (tsk->p.sts.comp_status != 2309 } else if (tsk->p.sts.comp_status !=
2275 __constant_cpu_to_le16(CS_COMPLETE)) { 2310 __constant_cpu_to_le16(CS_COMPLETE)) {
2276 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2311 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2277 "-- completion status (%x).\n", __func__, 2312 "-- completion status (%x).\n", __func__,
2278 ha->host_no, le16_to_cpu(tsk->p.sts.comp_status))); 2313 vha->host_no, le16_to_cpu(tsk->p.sts.comp_status)));
2279 rval = QLA_FUNCTION_FAILED; 2314 rval = QLA_FUNCTION_FAILED;
2280 } 2315 }
2281 2316
2282 /* Issue marker IOCB. */ 2317 /* Issue marker IOCB. */
2283 rval2 = qla2x00_marker(ha, fcport->loop_id, l, 2318 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2284 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); 2319 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2285 if (rval2 != QLA_SUCCESS) { 2320 if (rval2 != QLA_SUCCESS) {
2286 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB " 2321 DEBUG2_3_11(printk("%s(%ld): failed to issue Marker IOCB "
2287 "(%x).\n", __func__, ha->host_no, rval2)); 2322 "(%x).\n", __func__, vha->host_no, rval2));
2288 } else { 2323 } else {
2289 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2324 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2290 } 2325 }
2291 2326
2292 dma_pool_free(pha->s_dma_pool, tsk, tsk_dma); 2327 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2293 2328
2294 return rval; 2329 return rval;
2295} 2330}
@@ -2307,29 +2342,30 @@ qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
2307} 2342}
2308 2343
2309int 2344int
2310qla2x00_system_error(scsi_qla_host_t *ha) 2345qla2x00_system_error(scsi_qla_host_t *vha)
2311{ 2346{
2312 int rval; 2347 int rval;
2313 mbx_cmd_t mc; 2348 mbx_cmd_t mc;
2314 mbx_cmd_t *mcp = &mc; 2349 mbx_cmd_t *mcp = &mc;
2350 struct qla_hw_data *ha = vha->hw;
2315 2351
2316 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2352 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2317 return QLA_FUNCTION_FAILED; 2353 return QLA_FUNCTION_FAILED;
2318 2354
2319 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2355 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2320 2356
2321 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2357 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2322 mcp->out_mb = MBX_0; 2358 mcp->out_mb = MBX_0;
2323 mcp->in_mb = MBX_0; 2359 mcp->in_mb = MBX_0;
2324 mcp->tov = 5; 2360 mcp->tov = 5;
2325 mcp->flags = 0; 2361 mcp->flags = 0;
2326 rval = qla2x00_mailbox_command(ha, mcp); 2362 rval = qla2x00_mailbox_command(vha, mcp);
2327 2363
2328 if (rval != QLA_SUCCESS) { 2364 if (rval != QLA_SUCCESS) {
2329 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2365 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2330 ha->host_no, rval)); 2366 vha->host_no, rval));
2331 } else { 2367 } else {
2332 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2368 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2333 } 2369 }
2334 2370
2335 return rval; 2371 return rval;
@@ -2342,14 +2378,14 @@ qla2x00_system_error(scsi_qla_host_t *ha)
2342 * Returns 2378 * Returns
2343 */ 2379 */
2344int 2380int
2345qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g, 2381qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2346 uint16_t sw_em_2g, uint16_t sw_em_4g) 2382 uint16_t sw_em_2g, uint16_t sw_em_4g)
2347{ 2383{
2348 int rval; 2384 int rval;
2349 mbx_cmd_t mc; 2385 mbx_cmd_t mc;
2350 mbx_cmd_t *mcp = &mc; 2386 mbx_cmd_t *mcp = &mc;
2351 2387
2352 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2388 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2353 2389
2354 mcp->mb[0] = MBC_SERDES_PARAMS; 2390 mcp->mb[0] = MBC_SERDES_PARAMS;
2355 mcp->mb[1] = BIT_0; 2391 mcp->mb[1] = BIT_0;
@@ -2360,61 +2396,61 @@ qla2x00_set_serdes_params(scsi_qla_host_t *ha, uint16_t sw_em_1g,
2360 mcp->in_mb = MBX_0; 2396 mcp->in_mb = MBX_0;
2361 mcp->tov = MBX_TOV_SECONDS; 2397 mcp->tov = MBX_TOV_SECONDS;
2362 mcp->flags = 0; 2398 mcp->flags = 0;
2363 rval = qla2x00_mailbox_command(ha, mcp); 2399 rval = qla2x00_mailbox_command(vha, mcp);
2364 2400
2365 if (rval != QLA_SUCCESS) { 2401 if (rval != QLA_SUCCESS) {
2366 /*EMPTY*/ 2402 /*EMPTY*/
2367 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2403 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2368 ha->host_no, rval, mcp->mb[0])); 2404 vha->host_no, rval, mcp->mb[0]));
2369 } else { 2405 } else {
2370 /*EMPTY*/ 2406 /*EMPTY*/
2371 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2407 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2372 } 2408 }
2373 2409
2374 return rval; 2410 return rval;
2375} 2411}
2376 2412
2377int 2413int
2378qla2x00_stop_firmware(scsi_qla_host_t *ha) 2414qla2x00_stop_firmware(scsi_qla_host_t *vha)
2379{ 2415{
2380 int rval; 2416 int rval;
2381 mbx_cmd_t mc; 2417 mbx_cmd_t mc;
2382 mbx_cmd_t *mcp = &mc; 2418 mbx_cmd_t *mcp = &mc;
2383 2419
2384 if (!IS_FWI2_CAPABLE(ha)) 2420 if (!IS_FWI2_CAPABLE(vha->hw))
2385 return QLA_FUNCTION_FAILED; 2421 return QLA_FUNCTION_FAILED;
2386 2422
2387 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2423 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2388 2424
2389 mcp->mb[0] = MBC_STOP_FIRMWARE; 2425 mcp->mb[0] = MBC_STOP_FIRMWARE;
2390 mcp->out_mb = MBX_0; 2426 mcp->out_mb = MBX_0;
2391 mcp->in_mb = MBX_0; 2427 mcp->in_mb = MBX_0;
2392 mcp->tov = 5; 2428 mcp->tov = 5;
2393 mcp->flags = 0; 2429 mcp->flags = 0;
2394 rval = qla2x00_mailbox_command(ha, mcp); 2430 rval = qla2x00_mailbox_command(vha, mcp);
2395 2431
2396 if (rval != QLA_SUCCESS) { 2432 if (rval != QLA_SUCCESS) {
2397 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2433 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2398 ha->host_no, rval)); 2434 vha->host_no, rval));
2399 } else { 2435 } else {
2400 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2436 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2401 } 2437 }
2402 2438
2403 return rval; 2439 return rval;
2404} 2440}
2405 2441
2406int 2442int
2407qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma, 2443qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2408 uint16_t buffers) 2444 uint16_t buffers)
2409{ 2445{
2410 int rval; 2446 int rval;
2411 mbx_cmd_t mc; 2447 mbx_cmd_t mc;
2412 mbx_cmd_t *mcp = &mc; 2448 mbx_cmd_t *mcp = &mc;
2413 2449
2414 if (!IS_FWI2_CAPABLE(ha)) 2450 if (!IS_FWI2_CAPABLE(vha->hw))
2415 return QLA_FUNCTION_FAILED; 2451 return QLA_FUNCTION_FAILED;
2416 2452
2417 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2453 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2418 2454
2419 mcp->mb[0] = MBC_TRACE_CONTROL; 2455 mcp->mb[0] = MBC_TRACE_CONTROL;
2420 mcp->mb[1] = TC_EFT_ENABLE; 2456 mcp->mb[1] = TC_EFT_ENABLE;
@@ -2428,28 +2464,28 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *ha, dma_addr_t eft_dma,
2428 mcp->in_mb = MBX_1|MBX_0; 2464 mcp->in_mb = MBX_1|MBX_0;
2429 mcp->tov = MBX_TOV_SECONDS; 2465 mcp->tov = MBX_TOV_SECONDS;
2430 mcp->flags = 0; 2466 mcp->flags = 0;
2431 rval = qla2x00_mailbox_command(ha, mcp); 2467 rval = qla2x00_mailbox_command(vha, mcp);
2432 if (rval != QLA_SUCCESS) { 2468 if (rval != QLA_SUCCESS) {
2433 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2469 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2434 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2470 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2435 } else { 2471 } else {
2436 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2472 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2437 } 2473 }
2438 2474
2439 return rval; 2475 return rval;
2440} 2476}
2441 2477
2442int 2478int
2443qla2x00_disable_eft_trace(scsi_qla_host_t *ha) 2479qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2444{ 2480{
2445 int rval; 2481 int rval;
2446 mbx_cmd_t mc; 2482 mbx_cmd_t mc;
2447 mbx_cmd_t *mcp = &mc; 2483 mbx_cmd_t *mcp = &mc;
2448 2484
2449 if (!IS_FWI2_CAPABLE(ha)) 2485 if (!IS_FWI2_CAPABLE(vha->hw))
2450 return QLA_FUNCTION_FAILED; 2486 return QLA_FUNCTION_FAILED;
2451 2487
2452 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2488 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2453 2489
2454 mcp->mb[0] = MBC_TRACE_CONTROL; 2490 mcp->mb[0] = MBC_TRACE_CONTROL;
2455 mcp->mb[1] = TC_EFT_DISABLE; 2491 mcp->mb[1] = TC_EFT_DISABLE;
@@ -2457,29 +2493,29 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *ha)
2457 mcp->in_mb = MBX_1|MBX_0; 2493 mcp->in_mb = MBX_1|MBX_0;
2458 mcp->tov = MBX_TOV_SECONDS; 2494 mcp->tov = MBX_TOV_SECONDS;
2459 mcp->flags = 0; 2495 mcp->flags = 0;
2460 rval = qla2x00_mailbox_command(ha, mcp); 2496 rval = qla2x00_mailbox_command(vha, mcp);
2461 if (rval != QLA_SUCCESS) { 2497 if (rval != QLA_SUCCESS) {
2462 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2498 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2463 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2499 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2464 } else { 2500 } else {
2465 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2501 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2466 } 2502 }
2467 2503
2468 return rval; 2504 return rval;
2469} 2505}
2470 2506
2471int 2507int
2472qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma, 2508qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2473 uint16_t buffers, uint16_t *mb, uint32_t *dwords) 2509 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2474{ 2510{
2475 int rval; 2511 int rval;
2476 mbx_cmd_t mc; 2512 mbx_cmd_t mc;
2477 mbx_cmd_t *mcp = &mc; 2513 mbx_cmd_t *mcp = &mc;
2478 2514
2479 if (!IS_QLA25XX(ha)) 2515 if (!IS_QLA25XX(vha->hw))
2480 return QLA_FUNCTION_FAILED; 2516 return QLA_FUNCTION_FAILED;
2481 2517
2482 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2518 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2483 2519
2484 mcp->mb[0] = MBC_TRACE_CONTROL; 2520 mcp->mb[0] = MBC_TRACE_CONTROL;
2485 mcp->mb[1] = TC_FCE_ENABLE; 2521 mcp->mb[1] = TC_FCE_ENABLE;
@@ -2497,12 +2533,12 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2497 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 2533 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2498 mcp->tov = MBX_TOV_SECONDS; 2534 mcp->tov = MBX_TOV_SECONDS;
2499 mcp->flags = 0; 2535 mcp->flags = 0;
2500 rval = qla2x00_mailbox_command(ha, mcp); 2536 rval = qla2x00_mailbox_command(vha, mcp);
2501 if (rval != QLA_SUCCESS) { 2537 if (rval != QLA_SUCCESS) {
2502 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2538 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2503 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2539 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2504 } else { 2540 } else {
2505 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2541 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2506 2542
2507 if (mb) 2543 if (mb)
2508 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2544 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2514,16 +2550,16 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *ha, dma_addr_t fce_dma,
2514} 2550}
2515 2551
2516int 2552int
2517qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd) 2553qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2518{ 2554{
2519 int rval; 2555 int rval;
2520 mbx_cmd_t mc; 2556 mbx_cmd_t mc;
2521 mbx_cmd_t *mcp = &mc; 2557 mbx_cmd_t *mcp = &mc;
2522 2558
2523 if (!IS_FWI2_CAPABLE(ha)) 2559 if (!IS_FWI2_CAPABLE(vha->hw))
2524 return QLA_FUNCTION_FAILED; 2560 return QLA_FUNCTION_FAILED;
2525 2561
2526 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2562 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2527 2563
2528 mcp->mb[0] = MBC_TRACE_CONTROL; 2564 mcp->mb[0] = MBC_TRACE_CONTROL;
2529 mcp->mb[1] = TC_FCE_DISABLE; 2565 mcp->mb[1] = TC_FCE_DISABLE;
@@ -2533,12 +2569,12 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2533 MBX_1|MBX_0; 2569 MBX_1|MBX_0;
2534 mcp->tov = MBX_TOV_SECONDS; 2570 mcp->tov = MBX_TOV_SECONDS;
2535 mcp->flags = 0; 2571 mcp->flags = 0;
2536 rval = qla2x00_mailbox_command(ha, mcp); 2572 rval = qla2x00_mailbox_command(vha, mcp);
2537 if (rval != QLA_SUCCESS) { 2573 if (rval != QLA_SUCCESS) {
2538 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n", 2574 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x mb[1]=%x.\n",
2539 __func__, ha->host_no, rval, mcp->mb[0], mcp->mb[1])); 2575 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1]));
2540 } else { 2576 } else {
2541 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2577 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2542 2578
2543 if (wr) 2579 if (wr)
2544 *wr = (uint64_t) mcp->mb[5] << 48 | 2580 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2556,17 +2592,17 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *ha, uint64_t *wr, uint64_t *rd)
2556} 2592}
2557 2593
2558int 2594int
2559qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr, 2595qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint16_t addr,
2560 uint16_t off, uint16_t count) 2596 uint16_t off, uint16_t count)
2561{ 2597{
2562 int rval; 2598 int rval;
2563 mbx_cmd_t mc; 2599 mbx_cmd_t mc;
2564 mbx_cmd_t *mcp = &mc; 2600 mbx_cmd_t *mcp = &mc;
2565 2601
2566 if (!IS_FWI2_CAPABLE(ha)) 2602 if (!IS_FWI2_CAPABLE(vha->hw))
2567 return QLA_FUNCTION_FAILED; 2603 return QLA_FUNCTION_FAILED;
2568 2604
2569 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2605 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2570 2606
2571 mcp->mb[0] = MBC_READ_SFP; 2607 mcp->mb[0] = MBC_READ_SFP;
2572 mcp->mb[1] = addr; 2608 mcp->mb[1] = addr;
@@ -2581,30 +2617,30 @@ qla2x00_read_sfp(scsi_qla_host_t *ha, dma_addr_t sfp_dma, uint16_t addr,
2581 mcp->in_mb = MBX_0; 2617 mcp->in_mb = MBX_0;
2582 mcp->tov = MBX_TOV_SECONDS; 2618 mcp->tov = MBX_TOV_SECONDS;
2583 mcp->flags = 0; 2619 mcp->flags = 0;
2584 rval = qla2x00_mailbox_command(ha, mcp); 2620 rval = qla2x00_mailbox_command(vha, mcp);
2585 2621
2586 if (rval != QLA_SUCCESS) { 2622 if (rval != QLA_SUCCESS) {
2587 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, 2623 DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
2588 ha->host_no, rval, mcp->mb[0])); 2624 vha->host_no, rval, mcp->mb[0]));
2589 } else { 2625 } else {
2590 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2626 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2591 } 2627 }
2592 2628
2593 return rval; 2629 return rval;
2594} 2630}
2595 2631
2596int 2632int
2597qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id, 2633qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2598 uint16_t port_speed, uint16_t *mb) 2634 uint16_t port_speed, uint16_t *mb)
2599{ 2635{
2600 int rval; 2636 int rval;
2601 mbx_cmd_t mc; 2637 mbx_cmd_t mc;
2602 mbx_cmd_t *mcp = &mc; 2638 mbx_cmd_t *mcp = &mc;
2603 2639
2604 if (!IS_IIDMA_CAPABLE(ha)) 2640 if (!IS_IIDMA_CAPABLE(vha->hw))
2605 return QLA_FUNCTION_FAILED; 2641 return QLA_FUNCTION_FAILED;
2606 2642
2607 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2643 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2608 2644
2609 mcp->mb[0] = MBC_PORT_PARAMS; 2645 mcp->mb[0] = MBC_PORT_PARAMS;
2610 mcp->mb[1] = loop_id; 2646 mcp->mb[1] = loop_id;
@@ -2615,7 +2651,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2615 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; 2651 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
2616 mcp->tov = MBX_TOV_SECONDS; 2652 mcp->tov = MBX_TOV_SECONDS;
2617 mcp->flags = 0; 2653 mcp->flags = 0;
2618 rval = qla2x00_mailbox_command(ha, mcp); 2654 rval = qla2x00_mailbox_command(vha, mcp);
2619 2655
2620 /* Return mailbox statuses. */ 2656 /* Return mailbox statuses. */
2621 if (mb != NULL) { 2657 if (mb != NULL) {
@@ -2628,28 +2664,29 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2628 2664
2629 if (rval != QLA_SUCCESS) { 2665 if (rval != QLA_SUCCESS) {
2630 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, 2666 DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
2631 ha->host_no, rval)); 2667 vha->host_no, rval));
2632 } else { 2668 } else {
2633 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2669 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2634 } 2670 }
2635 2671
2636 return rval; 2672 return rval;
2637} 2673}
2638 2674
2639void 2675void
2640qla24xx_report_id_acquisition(scsi_qla_host_t *ha, 2676qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2641 struct vp_rpt_id_entry_24xx *rptid_entry) 2677 struct vp_rpt_id_entry_24xx *rptid_entry)
2642{ 2678{
2643 uint8_t vp_idx; 2679 uint8_t vp_idx;
2644 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx); 2680 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2645 scsi_qla_host_t *vha; 2681 struct qla_hw_data *ha = vha->hw;
2682 scsi_qla_host_t *vp;
2646 2683
2647 if (rptid_entry->entry_status != 0) 2684 if (rptid_entry->entry_status != 0)
2648 return; 2685 return;
2649 2686
2650 if (rptid_entry->format == 0) { 2687 if (rptid_entry->format == 0) {
2651 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d," 2688 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
2652 " number of VPs acquired %d\n", __func__, ha->host_no, 2689 " number of VPs acquired %d\n", __func__, vha->host_no,
2653 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count))); 2690 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
2654 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__, 2691 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
2655 rptid_entry->port_id[2], rptid_entry->port_id[1], 2692 rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2658,7 +2695,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2658 vp_idx = LSB(stat); 2695 vp_idx = LSB(stat);
2659 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled " 2696 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
2660 "- status %d - " 2697 "- status %d - "
2661 "with port id %02x%02x%02x\n",__func__,ha->host_no, 2698 "with port id %02x%02x%02x\n", __func__, vha->host_no,
2662 vp_idx, MSB(stat), 2699 vp_idx, MSB(stat),
2663 rptid_entry->port_id[2], rptid_entry->port_id[1], 2700 rptid_entry->port_id[2], rptid_entry->port_id[1],
2664 rptid_entry->port_id[0])); 2701 rptid_entry->port_id[0]));
@@ -2668,25 +2705,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2668 if (MSB(stat) == 1) 2705 if (MSB(stat) == 1)
2669 return; 2706 return;
2670 2707
2671 list_for_each_entry(vha, &ha->vp_list, vp_list) 2708 list_for_each_entry(vp, &ha->vp_list, list)
2672 if (vp_idx == vha->vp_idx) 2709 if (vp_idx == vp->vp_idx)
2673 break; 2710 break;
2674 2711 if (!vp)
2675 if (!vha)
2676 return; 2712 return;
2677 2713
2678 vha->d_id.b.domain = rptid_entry->port_id[2]; 2714 vp->d_id.b.domain = rptid_entry->port_id[2];
2679 vha->d_id.b.area = rptid_entry->port_id[1]; 2715 vp->d_id.b.area = rptid_entry->port_id[1];
2680 vha->d_id.b.al_pa = rptid_entry->port_id[0]; 2716 vp->d_id.b.al_pa = rptid_entry->port_id[0];
2681 2717
2682 /* 2718 /*
2683 * Cannot configure here as we are still sitting on the 2719 * Cannot configure here as we are still sitting on the
2684 * response queue. Handle it in dpc context. 2720 * response queue. Handle it in dpc context.
2685 */ 2721 */
2686 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 2722 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2687 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 2723 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2688 2724
2689 qla2xxx_wake_dpc(ha); 2725 qla2xxx_wake_dpc(vha);
2690 } 2726 }
2691} 2727}
2692 2728
@@ -2709,15 +2745,15 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2709 int rval; 2745 int rval;
2710 struct vp_config_entry_24xx *vpmod; 2746 struct vp_config_entry_24xx *vpmod;
2711 dma_addr_t vpmod_dma; 2747 dma_addr_t vpmod_dma;
2712 scsi_qla_host_t *pha; 2748 struct qla_hw_data *ha = vha->hw;
2749 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2713 2750
2714 /* This can be called by the parent */ 2751 /* This can be called by the parent */
2715 pha = to_qla_parent(vha);
2716 2752
2717 vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 2753 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2718 if (!vpmod) { 2754 if (!vpmod) {
2719 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP " 2755 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
2720 "IOCB.\n", __func__, pha->host_no)); 2756 "IOCB.\n", __func__, vha->host_no));
2721 return QLA_MEMORY_ALLOC_FAILED; 2757 return QLA_MEMORY_ALLOC_FAILED;
2722 } 2758 }
2723 2759
@@ -2732,26 +2768,27 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2732 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 2768 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2733 vpmod->entry_count = 1; 2769 vpmod->entry_count = 1;
2734 2770
2735 rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0); 2771 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2736 if (rval != QLA_SUCCESS) { 2772 if (rval != QLA_SUCCESS) {
2737 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB" 2773 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
2738 "(%x).\n", __func__, pha->host_no, rval)); 2774 "(%x).\n", __func__, base_vha->host_no, rval));
2739 } else if (vpmod->comp_status != 0) { 2775 } else if (vpmod->comp_status != 0) {
2740 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2776 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2741 "-- error status (%x).\n", __func__, pha->host_no, 2777 "-- error status (%x).\n", __func__, base_vha->host_no,
2742 vpmod->comp_status)); 2778 vpmod->comp_status));
2743 rval = QLA_FUNCTION_FAILED; 2779 rval = QLA_FUNCTION_FAILED;
2744 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2780 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2745 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2781 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2746 "-- completion status (%x).\n", __func__, pha->host_no, 2782 "-- completion status (%x).\n", __func__, base_vha->host_no,
2747 le16_to_cpu(vpmod->comp_status))); 2783 le16_to_cpu(vpmod->comp_status)));
2748 rval = QLA_FUNCTION_FAILED; 2784 rval = QLA_FUNCTION_FAILED;
2749 } else { 2785 } else {
2750 /* EMPTY */ 2786 /* EMPTY */
2751 DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no)); 2787 DEBUG11(printk("%s(%ld): done.\n", __func__,
2788 base_vha->host_no));
2752 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 2789 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2753 } 2790 }
2754 dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma); 2791 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
2755 2792
2756 return rval; 2793 return rval;
2757} 2794}
@@ -2778,11 +2815,12 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2778 int map, pos; 2815 int map, pos;
2779 struct vp_ctrl_entry_24xx *vce; 2816 struct vp_ctrl_entry_24xx *vce;
2780 dma_addr_t vce_dma; 2817 dma_addr_t vce_dma;
2781 scsi_qla_host_t *ha = vha->parent; 2818 struct qla_hw_data *ha = vha->hw;
2782 int vp_index = vha->vp_idx; 2819 int vp_index = vha->vp_idx;
2820 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2783 2821
2784 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__, 2822 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
2785 ha->host_no, vp_index)); 2823 vha->host_no, vp_index));
2786 2824
2787 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 2825 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
2788 return QLA_PARAMETER_ERROR; 2826 return QLA_PARAMETER_ERROR;
@@ -2791,7 +2829,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2791 if (!vce) { 2829 if (!vce) {
2792 DEBUG2_3(printk("%s(%ld): " 2830 DEBUG2_3(printk("%s(%ld): "
2793 "failed to allocate VP Control IOCB.\n", __func__, 2831 "failed to allocate VP Control IOCB.\n", __func__,
2794 ha->host_no)); 2832 base_vha->host_no));
2795 return QLA_MEMORY_ALLOC_FAILED; 2833 return QLA_MEMORY_ALLOC_FAILED;
2796 } 2834 }
2797 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx)); 2835 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
@@ -2810,30 +2848,30 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2810 vce->vp_idx_map[map] |= 1 << pos; 2848 vce->vp_idx_map[map] |= 1 << pos;
2811 mutex_unlock(&ha->vport_lock); 2849 mutex_unlock(&ha->vport_lock);
2812 2850
2813 rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0); 2851 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
2814 if (rval != QLA_SUCCESS) { 2852 if (rval != QLA_SUCCESS) {
2815 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB" 2853 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
2816 "(%x).\n", __func__, ha->host_no, rval)); 2854 "(%x).\n", __func__, base_vha->host_no, rval));
2817 printk("%s(%ld): failed to issue VP control IOCB" 2855 printk("%s(%ld): failed to issue VP control IOCB"
2818 "(%x).\n", __func__, ha->host_no, rval); 2856 "(%x).\n", __func__, base_vha->host_no, rval);
2819 } else if (vce->entry_status != 0) { 2857 } else if (vce->entry_status != 0) {
2820 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2858 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2821 "-- error status (%x).\n", __func__, ha->host_no, 2859 "-- error status (%x).\n", __func__, base_vha->host_no,
2822 vce->entry_status)); 2860 vce->entry_status));
2823 printk("%s(%ld): failed to complete IOCB " 2861 printk("%s(%ld): failed to complete IOCB "
2824 "-- error status (%x).\n", __func__, ha->host_no, 2862 "-- error status (%x).\n", __func__, base_vha->host_no,
2825 vce->entry_status); 2863 vce->entry_status);
2826 rval = QLA_FUNCTION_FAILED; 2864 rval = QLA_FUNCTION_FAILED;
2827 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2865 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2828 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " 2866 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2829 "-- completion status (%x).\n", __func__, ha->host_no, 2867 "-- completion status (%x).\n", __func__, base_vha->host_no,
2830 le16_to_cpu(vce->comp_status))); 2868 le16_to_cpu(vce->comp_status)));
2831 printk("%s(%ld): failed to complete IOCB " 2869 printk("%s(%ld): failed to complete IOCB "
2832 "-- completion status (%x).\n", __func__, ha->host_no, 2870 "-- completion status (%x).\n", __func__, base_vha->host_no,
2833 le16_to_cpu(vce->comp_status)); 2871 le16_to_cpu(vce->comp_status));
2834 rval = QLA_FUNCTION_FAILED; 2872 rval = QLA_FUNCTION_FAILED;
2835 } else { 2873 } else {
2836 DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2874 DEBUG2(printk("%s(%ld): done.\n", __func__, base_vha->host_no));
2837 } 2875 }
2838 2876
2839 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 2877 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -2863,7 +2901,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2863 */ 2901 */
2864 2902
2865int 2903int
2866qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format, 2904qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
2867 uint16_t vp_idx) 2905 uint16_t vp_idx)
2868{ 2906{
2869 int rval; 2907 int rval;
@@ -2884,7 +2922,7 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2884 mcp->in_mb = MBX_0|MBX_1; 2922 mcp->in_mb = MBX_0|MBX_1;
2885 mcp->tov = MBX_TOV_SECONDS; 2923 mcp->tov = MBX_TOV_SECONDS;
2886 mcp->flags = 0; 2924 mcp->flags = 0;
2887 rval = qla2x00_mailbox_command(ha, mcp); 2925 rval = qla2x00_mailbox_command(vha, mcp);
2888 2926
2889 if (rval == QLA_SUCCESS) { 2927 if (rval == QLA_SUCCESS) {
2890 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { 2928 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -2897,16 +2935,16 @@ qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2897} 2935}
2898 2936
2899int 2937int
2900qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr, 2938qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
2901 uint32_t size) 2939 uint32_t size)
2902{ 2940{
2903 int rval; 2941 int rval;
2904 mbx_cmd_t mc; 2942 mbx_cmd_t mc;
2905 mbx_cmd_t *mcp = &mc; 2943 mbx_cmd_t *mcp = &mc;
2906 2944
2907 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2945 DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2908 2946
2909 if (MSW(addr) || IS_FWI2_CAPABLE(ha)) { 2947 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
2910 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 2948 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
2911 mcp->mb[8] = MSW(addr); 2949 mcp->mb[8] = MSW(addr);
2912 mcp->out_mb = MBX_8|MBX_0; 2950 mcp->out_mb = MBX_8|MBX_0;
@@ -2920,7 +2958,7 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
2920 mcp->mb[6] = MSW(MSD(req_dma)); 2958 mcp->mb[6] = MSW(MSD(req_dma));
2921 mcp->mb[7] = LSW(MSD(req_dma)); 2959 mcp->mb[7] = LSW(MSD(req_dma));
2922 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1; 2960 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
2923 if (IS_FWI2_CAPABLE(ha)) { 2961 if (IS_FWI2_CAPABLE(vha->hw)) {
2924 mcp->mb[4] = MSW(size); 2962 mcp->mb[4] = MSW(size);
2925 mcp->mb[5] = LSW(size); 2963 mcp->mb[5] = LSW(size);
2926 mcp->out_mb |= MBX_5|MBX_4; 2964 mcp->out_mb |= MBX_5|MBX_4;
@@ -2932,13 +2970,13 @@ qla2x00_dump_ram(scsi_qla_host_t *ha, dma_addr_t req_dma, uint32_t addr,
2932 mcp->in_mb = MBX_0; 2970 mcp->in_mb = MBX_0;
2933 mcp->tov = MBX_TOV_SECONDS; 2971 mcp->tov = MBX_TOV_SECONDS;
2934 mcp->flags = 0; 2972 mcp->flags = 0;
2935 rval = qla2x00_mailbox_command(ha, mcp); 2973 rval = qla2x00_mailbox_command(vha, mcp);
2936 2974
2937 if (rval != QLA_SUCCESS) { 2975 if (rval != QLA_SUCCESS) {
2938 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, 2976 DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
2939 ha->host_no, rval, mcp->mb[0])); 2977 vha->host_no, rval, mcp->mb[0]));
2940 } else { 2978 } else {
2941 DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); 2979 DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
2942 } 2980 }
2943 2981
2944 return rval; 2982 return rval;
@@ -2954,20 +2992,21 @@ struct cs84xx_mgmt_cmd {
2954}; 2992};
2955 2993
2956int 2994int
2957qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status) 2995qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
2958{ 2996{
2959 int rval, retry; 2997 int rval, retry;
2960 struct cs84xx_mgmt_cmd *mn; 2998 struct cs84xx_mgmt_cmd *mn;
2961 dma_addr_t mn_dma; 2999 dma_addr_t mn_dma;
2962 uint16_t options; 3000 uint16_t options;
2963 unsigned long flags; 3001 unsigned long flags;
3002 struct qla_hw_data *ha = vha->hw;
2964 3003
2965 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 3004 DEBUG16(printk("%s(%ld): entered.\n", __func__, vha->host_no));
2966 3005
2967 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3006 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
2968 if (mn == NULL) { 3007 if (mn == NULL) {
2969 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX " 3008 DEBUG2_3(printk("%s(%ld): failed to allocate Verify ISP84XX "
2970 "IOCB.\n", __func__, ha->host_no)); 3009 "IOCB.\n", __func__, vha->host_no));
2971 return QLA_MEMORY_ALLOC_FAILED; 3010 return QLA_MEMORY_ALLOC_FAILED;
2972 } 3011 }
2973 3012
@@ -2986,19 +3025,19 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
2986 mn->p.req.options = cpu_to_le16(options); 3025 mn->p.req.options = cpu_to_le16(options);
2987 3026
2988 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__, 3027 DEBUG16(printk("%s(%ld): Dump of Verify Request.\n", __func__,
2989 ha->host_no)); 3028 vha->host_no));
2990 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3029 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
2991 sizeof(*mn))); 3030 sizeof(*mn)));
2992 3031
2993 rval = qla2x00_issue_iocb_timeout(ha, mn, mn_dma, 0, 120); 3032 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
2994 if (rval != QLA_SUCCESS) { 3033 if (rval != QLA_SUCCESS) {
2995 DEBUG2_16(printk("%s(%ld): failed to issue Verify " 3034 DEBUG2_16(printk("%s(%ld): failed to issue Verify "
2996 "IOCB (%x).\n", __func__, ha->host_no, rval)); 3035 "IOCB (%x).\n", __func__, vha->host_no, rval));
2997 goto verify_done; 3036 goto verify_done;
2998 } 3037 }
2999 3038
3000 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__, 3039 DEBUG16(printk("%s(%ld): Dump of Verify Response.\n", __func__,
3001 ha->host_no)); 3040 vha->host_no));
3002 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn, 3041 DEBUG16(qla2x00_dump_buffer((uint8_t *)mn,
3003 sizeof(*mn))); 3042 sizeof(*mn)));
3004 3043
@@ -3006,21 +3045,21 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
3006 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3045 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3007 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3046 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3008 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__, 3047 DEBUG2_16(printk("%s(%ld): cs=%x fc=%x\n", __func__,
3009 ha->host_no, status[0], status[1])); 3048 vha->host_no, status[0], status[1]));
3010 3049
3011 if (status[0] != CS_COMPLETE) { 3050 if (status[0] != CS_COMPLETE) {
3012 rval = QLA_FUNCTION_FAILED; 3051 rval = QLA_FUNCTION_FAILED;
3013 if (!(options & VCO_DONT_UPDATE_FW)) { 3052 if (!(options & VCO_DONT_UPDATE_FW)) {
3014 DEBUG2_16(printk("%s(%ld): Firmware update " 3053 DEBUG2_16(printk("%s(%ld): Firmware update "
3015 "failed. Retrying without update " 3054 "failed. Retrying without update "
3016 "firmware.\n", __func__, ha->host_no)); 3055 "firmware.\n", __func__, vha->host_no));
3017 options |= VCO_DONT_UPDATE_FW; 3056 options |= VCO_DONT_UPDATE_FW;
3018 options &= ~VCO_FORCE_UPDATE; 3057 options &= ~VCO_FORCE_UPDATE;
3019 retry = 1; 3058 retry = 1;
3020 } 3059 }
3021 } else { 3060 } else {
3022 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n", 3061 DEBUG2_16(printk("%s(%ld): firmware updated to %x.\n",
3023 __func__, ha->host_no, 3062 __func__, vha->host_no,
3024 le32_to_cpu(mn->p.rsp.fw_ver))); 3063 le32_to_cpu(mn->p.rsp.fw_ver)));
3025 3064
3026 /* NOTE: we only update OP firmware. */ 3065 /* NOTE: we only update OP firmware. */
@@ -3037,10 +3076,115 @@ verify_done:
3037 3076
3038 if (rval != QLA_SUCCESS) { 3077 if (rval != QLA_SUCCESS) {
3039 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__, 3078 DEBUG2_16(printk("%s(%ld): failed=%x.\n", __func__,
3040 ha->host_no, rval)); 3079 vha->host_no, rval));
3041 } else { 3080 } else {
3042 DEBUG16(printk("%s(%ld): done.\n", __func__, ha->host_no)); 3081 DEBUG16(printk("%s(%ld): done.\n", __func__, vha->host_no));
3082 }
3083
3084 return rval;
3085}
3086
3087int
3088qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req,
3089 uint8_t options)
3090{
3091 int rval;
3092 unsigned long flags;
3093 mbx_cmd_t mc;
3094 mbx_cmd_t *mcp = &mc;
3095 struct device_reg_25xxmq __iomem *reg;
3096 struct qla_hw_data *ha = vha->hw;
3097
3098 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3099 mcp->mb[1] = options;
3100 mcp->mb[2] = MSW(LSD(req->dma));
3101 mcp->mb[3] = LSW(LSD(req->dma));
3102 mcp->mb[6] = MSW(MSD(req->dma));
3103 mcp->mb[7] = LSW(MSD(req->dma));
3104 mcp->mb[5] = req->length;
3105 if (req->rsp)
3106 mcp->mb[10] = req->rsp->id;
3107 mcp->mb[12] = req->qos;
3108 mcp->mb[11] = req->vp_idx;
3109 mcp->mb[13] = req->rid;
3110
3111 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3112 QLA_QUE_PAGE * req->id);
3113
3114 mcp->mb[4] = req->id;
3115 /* que in ptr index */
3116 mcp->mb[8] = 0;
3117 /* que out ptr index */
3118 mcp->mb[9] = 0;
3119 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3120 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3121 mcp->in_mb = MBX_0;
3122 mcp->flags = MBX_DMA_OUT;
3123 mcp->tov = 60;
3124
3125 spin_lock_irqsave(&ha->hardware_lock, flags);
3126 if (!(options & BIT_0)) {
3127 WRT_REG_DWORD(&reg->req_q_in, 0);
3128 WRT_REG_DWORD(&reg->req_q_out, 0);
3129 }
3130 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3131
3132 rval = qla2x00_mailbox_command(vha, mcp);
3133 if (rval != QLA_SUCCESS)
3134 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x mb0=%x.\n",
3135 __func__, vha->host_no, rval, mcp->mb[0]));
3136 return rval;
3137}
3138
3139int
3140qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp,
3141 uint8_t options)
3142{
3143 int rval;
3144 unsigned long flags;
3145 mbx_cmd_t mc;
3146 mbx_cmd_t *mcp = &mc;
3147 struct device_reg_25xxmq __iomem *reg;
3148 struct qla_hw_data *ha = vha->hw;
3149
3150 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3151 mcp->mb[1] = options;
3152 mcp->mb[2] = MSW(LSD(rsp->dma));
3153 mcp->mb[3] = LSW(LSD(rsp->dma));
3154 mcp->mb[6] = MSW(MSD(rsp->dma));
3155 mcp->mb[7] = LSW(MSD(rsp->dma));
3156 mcp->mb[5] = rsp->length;
3157 mcp->mb[11] = rsp->vp_idx;
3158 mcp->mb[14] = rsp->msix->vector;
3159 mcp->mb[13] = rsp->rid;
3160
3161 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3162 QLA_QUE_PAGE * rsp->id);
3163
3164 mcp->mb[4] = rsp->id;
3165 /* que in ptr index */
3166 mcp->mb[8] = 0;
3167 /* que out ptr index */
3168 mcp->mb[9] = 0;
3169 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
3170 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3171 mcp->in_mb = MBX_0;
3172 mcp->flags = MBX_DMA_OUT;
3173 mcp->tov = 60;
3174
3175 spin_lock_irqsave(&ha->hardware_lock, flags);
3176 if (!(options & BIT_0)) {
3177 WRT_REG_DWORD(&reg->rsp_q_out, 0);
3178 WRT_REG_DWORD(&reg->rsp_q_in, 0);
3043 } 3179 }
3044 3180
3181 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3182
3183 rval = qla2x00_mailbox_command(vha, mcp);
3184 if (rval != QLA_SUCCESS)
3185 DEBUG2_3_11(printk(KERN_WARNING "%s(%ld): failed=%x "
3186 "mb0=%x.\n", __func__,
3187 vha->host_no, rval, mcp->mb[0]));
3045 return rval; 3188 return rval;
3046} 3189}
3190
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 93560cd72784..386ffeae5b5a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h"
8 9
9#include <linux/moduleparam.h> 10#include <linux/moduleparam.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -18,7 +19,7 @@
18void 19void
19qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 20qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
20{ 21{
21 if (vha->parent && vha->timer_active) { 22 if (vha->vp_idx && vha->timer_active) {
22 del_timer_sync(&vha->timer); 23 del_timer_sync(&vha->timer);
23 vha->timer_active = 0; 24 vha->timer_active = 0;
24 } 25 }
@@ -28,7 +29,7 @@ static uint32_t
28qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 29qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
29{ 30{
30 uint32_t vp_id; 31 uint32_t vp_id;
31 scsi_qla_host_t *ha = vha->parent; 32 struct qla_hw_data *ha = vha->hw;
32 33
33 /* Find an empty slot and assign an vp_id */ 34 /* Find an empty slot and assign an vp_id */
34 mutex_lock(&ha->vport_lock); 35 mutex_lock(&ha->vport_lock);
@@ -44,7 +45,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44 ha->num_vhosts++; 45 ha->num_vhosts++;
45 ha->cur_vport_count++; 46 ha->cur_vport_count++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
47 list_add_tail(&vha->vp_list, &ha->vp_list); 48 list_add_tail(&vha->list, &ha->vp_list);
48 mutex_unlock(&ha->vport_lock); 49 mutex_unlock(&ha->vport_lock);
49 return vp_id; 50 return vp_id;
50} 51}
@@ -53,24 +54,24 @@ void
53qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 54qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
54{ 55{
55 uint16_t vp_id; 56 uint16_t vp_id;
56 scsi_qla_host_t *ha = vha->parent; 57 struct qla_hw_data *ha = vha->hw;
57 58
58 mutex_lock(&ha->vport_lock); 59 mutex_lock(&ha->vport_lock);
59 vp_id = vha->vp_idx; 60 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 61 ha->num_vhosts--;
61 ha->cur_vport_count--; 62 ha->cur_vport_count--;
62 clear_bit(vp_id, ha->vp_idx_map); 63 clear_bit(vp_id, ha->vp_idx_map);
63 list_del(&vha->vp_list); 64 list_del(&vha->list);
64 mutex_unlock(&ha->vport_lock); 65 mutex_unlock(&ha->vport_lock);
65} 66}
66 67
67static scsi_qla_host_t * 68static scsi_qla_host_t *
68qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name) 69qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
69{ 70{
70 scsi_qla_host_t *vha; 71 scsi_qla_host_t *vha;
71 72
72 /* Locate matching device in database. */ 73 /* Locate matching device in database. */
73 list_for_each_entry(vha, &ha->vp_list, vp_list) { 74 list_for_each_entry(vha, &ha->vp_list, list) {
74 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) 75 if (!memcmp(port_name, vha->port_name, WWN_SIZE))
75 return vha; 76 return vha;
76 } 77 }
@@ -94,16 +95,13 @@ static void
94qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 95qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
95{ 96{
96 fc_port_t *fcport; 97 fc_port_t *fcport;
97 scsi_qla_host_t *pha = to_qla_parent(vha);
98
99 list_for_each_entry(fcport, &pha->fcports, list) {
100 if (fcport->vp_idx != vha->vp_idx)
101 continue;
102 98
99 list_for_each_entry(fcport, &vha->vp_fcports, list) {
103 DEBUG15(printk("scsi(%ld): Marking port dead, " 100 DEBUG15(printk("scsi(%ld): Marking port dead, "
104 "loop_id=0x%04x :%x\n", 101 "loop_id=0x%04x :%x\n",
105 vha->host_no, fcport->loop_id, fcport->vp_idx)); 102 vha->host_no, fcport->loop_id, fcport->vp_idx));
106 103
104 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
107 qla2x00_mark_device_lost(vha, fcport, 0, 0); 105 qla2x00_mark_device_lost(vha, fcport, 0, 0);
108 atomic_set(&fcport->state, FCS_UNCONFIGURED); 106 atomic_set(&fcport->state, FCS_UNCONFIGURED);
109 } 107 }
@@ -118,7 +116,6 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
118 atomic_set(&vha->loop_state, LOOP_DOWN); 116 atomic_set(&vha->loop_state, LOOP_DOWN);
119 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 117 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
120 118
121 /* Delete all vp's fcports from parent's list */
122 qla2x00_mark_vp_devices_dead(vha); 119 qla2x00_mark_vp_devices_dead(vha);
123 atomic_set(&vha->vp_state, VP_FAILED); 120 atomic_set(&vha->vp_state, VP_FAILED);
124 vha->flags.management_server_logged_in = 0; 121 vha->flags.management_server_logged_in = 0;
@@ -135,11 +132,12 @@ int
135qla24xx_enable_vp(scsi_qla_host_t *vha) 132qla24xx_enable_vp(scsi_qla_host_t *vha)
136{ 133{
137 int ret; 134 int ret;
138 scsi_qla_host_t *ha = vha->parent; 135 struct qla_hw_data *ha = vha->hw;
136 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
139 137
140 /* Check if physical ha port is Up */ 138 /* Check if physical ha port is Up */
141 if (atomic_read(&ha->loop_state) == LOOP_DOWN || 139 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
142 atomic_read(&ha->loop_state) == LOOP_DEAD ) { 140 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
143 vha->vp_err_state = VP_ERR_PORTDWN; 141 vha->vp_err_state = VP_ERR_PORTDWN;
144 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 142 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
145 goto enable_failed; 143 goto enable_failed;
@@ -177,8 +175,8 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
177 vha->host_no, __func__)); 175 vha->host_no, __func__));
178 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 176 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
179 if (ret != QLA_SUCCESS) { 177 if (ret != QLA_SUCCESS) {
180 DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving" 178 DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
181 " of RSCN requests: 0x%x\n", ret)); 179 "receiving of RSCN requests: 0x%x\n", ret));
182 return; 180 return;
183 } else { 181 } else {
184 /* Corresponds to SCR enabled */ 182 /* Corresponds to SCR enabled */
@@ -194,25 +192,14 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
194} 192}
195 193
196void 194void
197qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb) 195qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
198{ 196{
199 int i, vp_idx_matched;
200 scsi_qla_host_t *vha; 197 scsi_qla_host_t *vha;
198 struct qla_hw_data *ha = rsp->hw;
199 int i = 0;
201 200
202 if (ha->parent) 201 list_for_each_entry(vha, &ha->vp_list, list) {
203 return; 202 if (vha->vp_idx) {
204
205 for_each_mapped_vp_idx(ha, i) {
206 vp_idx_matched = 0;
207
208 list_for_each_entry(vha, &ha->vp_list, vp_list) {
209 if (i == vha->vp_idx) {
210 vp_idx_matched = 1;
211 break;
212 }
213 }
214
215 if (vp_idx_matched) {
216 switch (mb[0]) { 203 switch (mb[0]) {
217 case MBA_LIP_OCCURRED: 204 case MBA_LIP_OCCURRED:
218 case MBA_LOOP_UP: 205 case MBA_LOOP_UP:
@@ -223,16 +210,17 @@ qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
223 case MBA_PORT_UPDATE: 210 case MBA_PORT_UPDATE:
224 case MBA_RSCN_UPDATE: 211 case MBA_RSCN_UPDATE:
225 DEBUG15(printk("scsi(%ld)%s: Async_event for" 212 DEBUG15(printk("scsi(%ld)%s: Async_event for"
226 " VP[%d], mb = 0x%x, vha=%p\n", 213 " VP[%d], mb = 0x%x, vha=%p\n",
227 vha->host_no, __func__,i, *mb, vha)); 214 vha->host_no, __func__, i, *mb, vha));
228 qla2x00_async_event(vha, mb); 215 qla2x00_async_event(vha, rsp, mb);
229 break; 216 break;
230 } 217 }
231 } 218 }
219 i++;
232 } 220 }
233} 221}
234 222
235void 223int
236qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 224qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
237{ 225{
238 /* 226 /*
@@ -247,38 +235,56 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
247 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 235 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
248 } 236 }
249 237
238 /* To exclusively reset vport, we need to log it out first.*/
239 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
240 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
241
250 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n", 242 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
251 vha->host_no, vha->vp_idx)); 243 vha->host_no, vha->vp_idx));
252 qla24xx_enable_vp(vha); 244 return qla24xx_enable_vp(vha);
253} 245}
254 246
255static int 247static int
256qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 248qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
257{ 249{
258 scsi_qla_host_t *ha = vha->parent; 250 struct qla_hw_data *ha = vha->hw;
251 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
259 252
260 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 253 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
261 /* VP acquired. complete port configuration */ 254 /* VP acquired. complete port configuration */
262 if (atomic_read(&ha->loop_state) == LOOP_READY) { 255 if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
263 qla24xx_configure_vp(vha); 256 qla24xx_configure_vp(vha);
264 } else { 257 } else {
265 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags); 258 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
266 set_bit(VP_DPC_NEEDED, &ha->dpc_flags); 259 set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
267 } 260 }
268 261
269 return 0; 262 return 0;
270 } 263 }
271 264
272 if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) 265 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
273 qla2x00_vp_abort_isp(vha); 266 qla2x00_update_fcports(vha);
267 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
268 }
269
270 if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
271 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
272 atomic_read(&vha->loop_state) != LOOP_DOWN) {
273
274 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
275 vha->host_no));
276 qla2x00_relogin(vha);
277
278 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
279 vha->host_no));
280 }
274 281
275 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 282 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
276 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 283 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
277 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 284 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
278 } 285 }
279 286
280 if (atomic_read(&vha->vp_state) == VP_ACTIVE && 287 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
281 test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
282 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 288 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
283 qla2x00_loop_resync(vha); 289 qla2x00_loop_resync(vha);
284 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 290 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -289,38 +295,30 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
289} 295}
290 296
291void 297void
292qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha) 298qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
293{ 299{
294 int ret; 300 int ret;
295 int i, vp_idx_matched; 301 struct qla_hw_data *ha = vha->hw;
296 scsi_qla_host_t *vha; 302 scsi_qla_host_t *vp;
297 303
298 if (ha->parent) 304 if (vha->vp_idx)
299 return; 305 return;
300 if (list_empty(&ha->vp_list)) 306 if (list_empty(&ha->vp_list))
301 return; 307 return;
302 308
303 clear_bit(VP_DPC_NEEDED, &ha->dpc_flags); 309 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
304
305 for_each_mapped_vp_idx(ha, i) {
306 vp_idx_matched = 0;
307
308 list_for_each_entry(vha, &ha->vp_list, vp_list) {
309 if (i == vha->vp_idx) {
310 vp_idx_matched = 1;
311 break;
312 }
313 }
314 310
315 if (vp_idx_matched) 311 list_for_each_entry(vp, &ha->vp_list, list) {
316 ret = qla2x00_do_dpc_vp(vha); 312 if (vp->vp_idx)
313 ret = qla2x00_do_dpc_vp(vp);
317 } 314 }
318} 315}
319 316
320int 317int
321qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 318qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
322{ 319{
323 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 320 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
321 struct qla_hw_data *ha = base_vha->hw;
324 scsi_qla_host_t *vha; 322 scsi_qla_host_t *vha;
325 uint8_t port_name[WWN_SIZE]; 323 uint8_t port_name[WWN_SIZE];
326 324
@@ -337,7 +335,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
337 335
338 /* Check up unique WWPN */ 336 /* Check up unique WWPN */
339 u64_to_wwn(fc_vport->port_name, port_name); 337 u64_to_wwn(fc_vport->port_name, port_name);
340 if (!memcmp(port_name, ha->port_name, WWN_SIZE)) 338 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
341 return VPCERR_BAD_WWN; 339 return VPCERR_BAD_WWN;
342 vha = qla24xx_find_vhost_by_name(ha, port_name); 340 vha = qla24xx_find_vhost_by_name(ha, port_name);
343 if (vha) 341 if (vha)
@@ -346,7 +344,7 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
346 /* Check up max-npiv-supports */ 344 /* Check up max-npiv-supports */
347 if (ha->num_vhosts > ha->max_npiv_vports) { 345 if (ha->num_vhosts > ha->max_npiv_vports) {
348 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than " 346 DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
349 "max_npv_vports %ud.\n", ha->host_no, 347 "max_npv_vports %ud.\n", base_vha->host_no,
350 ha->num_vhosts, ha->max_npiv_vports)); 348 ha->num_vhosts, ha->max_npiv_vports));
351 return VPCERR_UNSUPPORTED; 349 return VPCERR_UNSUPPORTED;
352 } 350 }
@@ -356,59 +354,34 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
356scsi_qla_host_t * 354scsi_qla_host_t *
357qla24xx_create_vhost(struct fc_vport *fc_vport) 355qla24xx_create_vhost(struct fc_vport *fc_vport)
358{ 356{
359 scsi_qla_host_t *ha = shost_priv(fc_vport->shost); 357 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
358 struct qla_hw_data *ha = base_vha->hw;
360 scsi_qla_host_t *vha; 359 scsi_qla_host_t *vha;
360 struct scsi_host_template *sht = &qla24xx_driver_template;
361 struct Scsi_Host *host; 361 struct Scsi_Host *host;
362 362
363 host = scsi_host_alloc(&qla24xx_driver_template, 363 vha = qla2x00_create_host(sht, ha);
364 sizeof(scsi_qla_host_t)); 364 if (!vha) {
365 if (!host) { 365 DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
366 printk(KERN_WARNING
367 "qla2xxx: scsi_host_alloc() failed for vport\n");
368 return(NULL); 366 return(NULL);
369 } 367 }
370 368
371 vha = shost_priv(host); 369 host = vha->host;
372
373 /* clone the parent hba */
374 memcpy(vha, ha, sizeof (scsi_qla_host_t));
375
376 fc_vport->dd_data = vha; 370 fc_vport->dd_data = vha;
377
378 vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
379 if (!vha->node_name)
380 goto create_vhost_failed_1;
381
382 vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
383 if (!vha->port_name)
384 goto create_vhost_failed_2;
385
386 /* New host info */ 371 /* New host info */
387 u64_to_wwn(fc_vport->node_name, vha->node_name); 372 u64_to_wwn(fc_vport->node_name, vha->node_name);
388 u64_to_wwn(fc_vport->port_name, vha->port_name); 373 u64_to_wwn(fc_vport->port_name, vha->port_name);
389 374
390 vha->host = host;
391 vha->host_no = host->host_no;
392 vha->parent = ha;
393 vha->fc_vport = fc_vport; 375 vha->fc_vport = fc_vport;
394 vha->device_flags = 0; 376 vha->device_flags = 0;
395 vha->vp_idx = qla24xx_allocate_vp_id(vha); 377 vha->vp_idx = qla24xx_allocate_vp_id(vha);
396 if (vha->vp_idx > ha->max_npiv_vports) { 378 if (vha->vp_idx > ha->max_npiv_vports) {
397 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 379 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
398 vha->host_no)); 380 vha->host_no));
399 goto create_vhost_failed_3; 381 goto create_vhost_failed;
400 } 382 }
401 vha->mgmt_svr_loop_id = 10 + vha->vp_idx; 383 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
402 384
403 init_completion(&vha->mbx_cmd_comp);
404 complete(&vha->mbx_cmd_comp);
405 init_completion(&vha->mbx_intr_comp);
406
407 INIT_LIST_HEAD(&vha->list);
408 INIT_LIST_HEAD(&vha->fcports);
409 INIT_LIST_HEAD(&vha->vp_fcports);
410 INIT_LIST_HEAD(&vha->work_list);
411
412 vha->dpc_flags = 0L; 385 vha->dpc_flags = 0L;
413 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 386 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
414 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 387 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -423,7 +396,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
423 396
424 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); 397 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
425 398
426 host->can_queue = vha->request_q_length + 128; 399 memset(vha->req_ques, 0, sizeof(vha->req_ques) * QLA_MAX_HOST_QUES);
400 vha->req_ques[0] = ha->req_q_map[0]->id;
401 host->can_queue = ha->req_q_map[0]->length + 128;
427 host->this_id = 255; 402 host->this_id = 255;
428 host->cmd_per_lun = 3; 403 host->cmd_per_lun = 3;
429 host->max_cmd_len = MAX_CMDSZ; 404 host->max_cmd_len = MAX_CMDSZ;
@@ -440,12 +415,341 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
440 415
441 return vha; 416 return vha;
442 417
443create_vhost_failed_3: 418create_vhost_failed:
444 kfree(vha->port_name); 419 return NULL;
420}
445 421
446create_vhost_failed_2: 422static void
447 kfree(vha->node_name); 423qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
424{
425 struct qla_hw_data *ha = vha->hw;
426 uint16_t que_id = req->id;
427
428 dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
429 sizeof(request_t), req->ring, req->dma);
430 req->ring = NULL;
431 req->dma = 0;
432 if (que_id) {
433 ha->req_q_map[que_id] = NULL;
434 mutex_lock(&ha->vport_lock);
435 clear_bit(que_id, ha->req_qid_map);
436 mutex_unlock(&ha->vport_lock);
437 }
438 kfree(req);
439 req = NULL;
440}
448 441
449create_vhost_failed_1: 442static void
450 return NULL; 443qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
444{
445 struct qla_hw_data *ha = vha->hw;
446 uint16_t que_id = rsp->id;
447
448 if (rsp->msix && rsp->msix->have_irq) {
449 free_irq(rsp->msix->vector, rsp);
450 rsp->msix->have_irq = 0;
451 rsp->msix->rsp = NULL;
452 }
453 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
454 sizeof(response_t), rsp->ring, rsp->dma);
455 rsp->ring = NULL;
456 rsp->dma = 0;
457 if (que_id) {
458 ha->rsp_q_map[que_id] = NULL;
459 mutex_lock(&ha->vport_lock);
460 clear_bit(que_id, ha->rsp_qid_map);
461 mutex_unlock(&ha->vport_lock);
462 }
463 kfree(rsp);
464 rsp = NULL;
465}
466
467int
468qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
469{
470 int ret = -1;
471
472 if (req) {
473 req->options |= BIT_0;
474 ret = qla25xx_init_req_que(vha, req, req->options);
475 }
476 if (ret == QLA_SUCCESS)
477 qla25xx_free_req_que(vha, req);
478
479 return ret;
480}
481
482int
483qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
484{
485 int ret = -1;
486
487 if (rsp) {
488 rsp->options |= BIT_0;
489 ret = qla25xx_init_rsp_que(vha, rsp, rsp->options);
490 }
491 if (ret == QLA_SUCCESS)
492 qla25xx_free_rsp_que(vha, rsp);
493
494 return ret;
495}
496
497int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
498{
499 int ret = 0;
500 struct qla_hw_data *ha = vha->hw;
501 struct req_que *req = ha->req_q_map[que];
502
503 req->options |= BIT_3;
504 req->qos = qos;
505 ret = qla25xx_init_req_que(vha, req, req->options);
506 if (ret != QLA_SUCCESS)
507 DEBUG2_17(printk(KERN_WARNING "%s failed\n", __func__));
508 /* restore options bit */
509 req->options &= ~BIT_3;
510 return ret;
511}
512
513
514/* Delete all queues for a given vhost */
515int
516qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
517{
518 int cnt, ret = 0;
519 struct req_que *req = NULL;
520 struct rsp_que *rsp = NULL;
521 struct qla_hw_data *ha = vha->hw;
522
523 if (que_no) {
524 /* Delete request queue */
525 req = ha->req_q_map[que_no];
526 if (req) {
527 rsp = req->rsp;
528 ret = qla25xx_delete_req_que(vha, req);
529 if (ret != QLA_SUCCESS) {
530 qla_printk(KERN_WARNING, ha,
531 "Couldn't delete req que %d\n", req->id);
532 return ret;
533 }
534 /* Delete associated response queue */
535 if (rsp) {
536 ret = qla25xx_delete_rsp_que(vha, rsp);
537 if (ret != QLA_SUCCESS) {
538 qla_printk(KERN_WARNING, ha,
539 "Couldn't delete rsp que %d\n",
540 rsp->id);
541 return ret;
542 }
543 }
544 }
545 } else { /* delete all queues of this host */
546 for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
547 /* Delete request queues */
548 req = ha->req_q_map[vha->req_ques[cnt]];
549 if (req && req->id) {
550 rsp = req->rsp;
551 ret = qla25xx_delete_req_que(vha, req);
552 if (ret != QLA_SUCCESS) {
553 qla_printk(KERN_WARNING, ha,
554 "Couldn't delete req que %d\n",
555 vha->req_ques[cnt]);
556 return ret;
557 }
558 vha->req_ques[cnt] = ha->req_q_map[0]->id;
559 /* Delete associated response queue */
560 if (rsp && rsp->id) {
561 ret = qla25xx_delete_rsp_que(vha, rsp);
562 if (ret != QLA_SUCCESS) {
563 qla_printk(KERN_WARNING, ha,
564 "Couldn't delete rsp que %d\n",
565 rsp->id);
566 return ret;
567 }
568 }
569 }
570 }
571 }
572 qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
573 vha->vp_idx);
574 return ret;
575}
576
577int
578qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
579 uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
580{
581 int ret = 0;
582 struct req_que *req = NULL;
583 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
584 uint16_t que_id = 0;
585
586 req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
587 if (req == NULL) {
588 qla_printk(KERN_WARNING, ha, "could not allocate memory"
589 "for request que\n");
590 goto que_failed;
591 }
592
593 req->length = REQUEST_ENTRY_CNT_24XX;
594 req->ring = dma_alloc_coherent(&ha->pdev->dev,
595 (req->length + 1) * sizeof(request_t),
596 &req->dma, GFP_KERNEL);
597 if (req->ring == NULL) {
598 qla_printk(KERN_WARNING, ha,
599 "Memory Allocation failed - request_ring\n");
600 goto que_failed;
601 }
602
603 mutex_lock(&ha->vport_lock);
604 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
605 if (que_id >= ha->max_queues) {
606 mutex_unlock(&ha->vport_lock);
607 qla_printk(KERN_INFO, ha, "No resources to create "
608 "additional request queue\n");
609 goto que_failed;
610 }
611 set_bit(que_id, ha->req_qid_map);
612 ha->req_q_map[que_id] = req;
613 req->rid = rid;
614 req->vp_idx = vp_idx;
615 req->qos = qos;
616
617 if (ha->rsp_q_map[rsp_que])
618 req->rsp = ha->rsp_q_map[rsp_que];
619 /* Use alternate PCI bus number */
620 if (MSB(req->rid))
621 options |= BIT_4;
622 /* Use alternate PCI devfn */
623 if (LSB(req->rid))
624 options |= BIT_5;
625 req->options = options;
626 req->ring_ptr = req->ring;
627 req->ring_index = 0;
628 req->cnt = req->length;
629 req->id = que_id;
630 mutex_unlock(&ha->vport_lock);
631
632 ret = qla25xx_init_req_que(base_vha, req, options);
633 if (ret != QLA_SUCCESS) {
634 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
635 mutex_lock(&ha->vport_lock);
636 clear_bit(que_id, ha->req_qid_map);
637 mutex_unlock(&ha->vport_lock);
638 goto que_failed;
639 }
640
641 return req->id;
642
643que_failed:
644 qla25xx_free_req_que(base_vha, req);
645 return 0;
646}
647
648/* create response queue */
649int
650qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
651 uint8_t vp_idx, uint16_t rid)
652{
653 int ret = 0;
654 struct rsp_que *rsp = NULL;
655 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
656 uint16_t que_id = 0;;
657
658 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
659 if (rsp == NULL) {
660 qla_printk(KERN_WARNING, ha, "could not allocate memory for"
661 " response que\n");
662 goto que_failed;
663 }
664
665 rsp->length = RESPONSE_ENTRY_CNT_2300;
666 rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
667 (rsp->length + 1) * sizeof(response_t),
668 &rsp->dma, GFP_KERNEL);
669 if (rsp->ring == NULL) {
670 qla_printk(KERN_WARNING, ha,
671 "Memory Allocation failed - response_ring\n");
672 goto que_failed;
673 }
674
675 mutex_lock(&ha->vport_lock);
676 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
677 if (que_id >= ha->max_queues) {
678 mutex_unlock(&ha->vport_lock);
679 qla_printk(KERN_INFO, ha, "No resources to create "
680 "additional response queue\n");
681 goto que_failed;
682 }
683 set_bit(que_id, ha->rsp_qid_map);
684
685 if (ha->flags.msix_enabled)
686 rsp->msix = &ha->msix_entries[que_id + 1];
687 else
688 qla_printk(KERN_WARNING, ha, "msix not enabled\n");
689
690 ha->rsp_q_map[que_id] = rsp;
691 rsp->rid = rid;
692 rsp->vp_idx = vp_idx;
693 rsp->hw = ha;
694 /* Use alternate PCI bus number */
695 if (MSB(rsp->rid))
696 options |= BIT_4;
697 /* Use alternate PCI devfn */
698 if (LSB(rsp->rid))
699 options |= BIT_5;
700 rsp->options = options;
701 rsp->ring_ptr = rsp->ring;
702 rsp->ring_index = 0;
703 rsp->id = que_id;
704 mutex_unlock(&ha->vport_lock);
705
706 ret = qla25xx_request_irq(rsp);
707 if (ret)
708 goto que_failed;
709
710 ret = qla25xx_init_rsp_que(base_vha, rsp, options);
711 if (ret != QLA_SUCCESS) {
712 qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
713 mutex_lock(&ha->vport_lock);
714 clear_bit(que_id, ha->rsp_qid_map);
715 mutex_unlock(&ha->vport_lock);
716 goto que_failed;
717 }
718
719 qla2x00_init_response_q_entries(rsp);
720
721 return rsp->id;
722
723que_failed:
724 qla25xx_free_rsp_que(base_vha, rsp);
725 return 0;
726}
727
728int
729qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
730{
731 uint16_t options = 0;
732 uint8_t ret = 0;
733 struct qla_hw_data *ha = vha->hw;
734
735 options |= BIT_1;
736 ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
737 if (!ret) {
738 qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
739 return ret;
740 } else
741 qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
742
743 options = 0;
744 if (qos & BIT_7)
745 options |= BIT_8;
746 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
747 qos & ~BIT_7);
748 if (ret) {
749 vha->req_ques[0] = ret;
750 qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
751 } else
752 qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
753
754 return ret;
451} 755}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 35567203ef61..8ea927788b3f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -92,7 +92,12 @@ MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings " 92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA."); 93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94 94
95 95int ql2xmaxqueues = 1;
96module_param(ql2xmaxqueues, int, S_IRUGO|S_IRUSR);
97MODULE_PARM_DESC(ql2xmaxqueues,
98 "Enables MQ settings "
99 "Default is 1 for single queue. Set it to number \
100 of queues in MQ mode.");
96/* 101/*
97 * SCSI host template entry points 102 * SCSI host template entry points
98 */ 103 */
@@ -183,42 +188,108 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
183 */ 188 */
184 189
185__inline__ void 190__inline__ void
186qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 191qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval)
187{ 192{
188 init_timer(&ha->timer); 193 init_timer(&vha->timer);
189 ha->timer.expires = jiffies + interval * HZ; 194 vha->timer.expires = jiffies + interval * HZ;
190 ha->timer.data = (unsigned long)ha; 195 vha->timer.data = (unsigned long)vha;
191 ha->timer.function = (void (*)(unsigned long))func; 196 vha->timer.function = (void (*)(unsigned long))func;
192 add_timer(&ha->timer); 197 add_timer(&vha->timer);
193 ha->timer_active = 1; 198 vha->timer_active = 1;
194} 199}
195 200
196static inline void 201static inline void
197qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval) 202qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
198{ 203{
199 mod_timer(&ha->timer, jiffies + interval * HZ); 204 mod_timer(&vha->timer, jiffies + interval * HZ);
200} 205}
201 206
202static __inline__ void 207static __inline__ void
203qla2x00_stop_timer(scsi_qla_host_t *ha) 208qla2x00_stop_timer(scsi_qla_host_t *vha)
204{ 209{
205 del_timer_sync(&ha->timer); 210 del_timer_sync(&vha->timer);
206 ha->timer_active = 0; 211 vha->timer_active = 0;
207} 212}
208 213
209static int qla2x00_do_dpc(void *data); 214static int qla2x00_do_dpc(void *data);
210 215
211static void qla2x00_rst_aen(scsi_qla_host_t *); 216static void qla2x00_rst_aen(scsi_qla_host_t *);
212 217
213static int qla2x00_mem_alloc(scsi_qla_host_t *); 218static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
214static void qla2x00_mem_free(scsi_qla_host_t *ha); 219 struct req_que **, struct rsp_que **);
215static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 220static void qla2x00_mem_free(struct qla_hw_data *);
221static void qla2x00_sp_free_dma(srb_t *);
216 222
217/* -------------------------------------------------------------------------- */ 223/* -------------------------------------------------------------------------- */
224static int qla2x00_alloc_queues(struct qla_hw_data *ha)
225{
226 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
227 GFP_KERNEL);
228 if (!ha->req_q_map) {
229 qla_printk(KERN_WARNING, ha,
230 "Unable to allocate memory for request queue ptrs\n");
231 goto fail_req_map;
232 }
233
234 ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
235 GFP_KERNEL);
236 if (!ha->rsp_q_map) {
237 qla_printk(KERN_WARNING, ha,
238 "Unable to allocate memory for response queue ptrs\n");
239 goto fail_rsp_map;
240 }
241 set_bit(0, ha->rsp_qid_map);
242 set_bit(0, ha->req_qid_map);
243 return 1;
244
245fail_rsp_map:
246 kfree(ha->req_q_map);
247 ha->req_q_map = NULL;
248fail_req_map:
249 return -ENOMEM;
250}
251
252static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
253 struct rsp_que *rsp)
254{
255 if (rsp && rsp->ring)
256 dma_free_coherent(&ha->pdev->dev,
257 (rsp->length + 1) * sizeof(response_t),
258 rsp->ring, rsp->dma);
259
260 kfree(rsp);
261 rsp = NULL;
262 if (req && req->ring)
263 dma_free_coherent(&ha->pdev->dev,
264 (req->length + 1) * sizeof(request_t),
265 req->ring, req->dma);
266
267 kfree(req);
268 req = NULL;
269}
270
271static void qla2x00_free_queues(struct qla_hw_data *ha)
272{
273 struct req_que *req;
274 struct rsp_que *rsp;
275 int cnt;
276
277 for (cnt = 0; cnt < ha->max_queues; cnt++) {
278 rsp = ha->rsp_q_map[cnt];
279 req = ha->req_q_map[cnt];
280 qla2x00_free_que(ha, req, rsp);
281 }
282 kfree(ha->rsp_q_map);
283 ha->rsp_q_map = NULL;
284
285 kfree(ha->req_q_map);
286 ha->req_q_map = NULL;
287}
218 288
219static char * 289static char *
220qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str) 290qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
221{ 291{
292 struct qla_hw_data *ha = vha->hw;
222 static char *pci_bus_modes[] = { 293 static char *pci_bus_modes[] = {
223 "33", "66", "100", "133", 294 "33", "66", "100", "133",
224 }; 295 };
@@ -240,9 +311,10 @@ qla2x00_pci_info_str(struct scsi_qla_host *ha, char *str)
240} 311}
241 312
242static char * 313static char *
243qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str) 314qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
244{ 315{
245 static char *pci_bus_modes[] = { "33", "66", "100", "133", }; 316 static char *pci_bus_modes[] = { "33", "66", "100", "133", };
317 struct qla_hw_data *ha = vha->hw;
246 uint32_t pci_bus; 318 uint32_t pci_bus;
247 int pcie_reg; 319 int pcie_reg;
248 320
@@ -290,9 +362,10 @@ qla24xx_pci_info_str(struct scsi_qla_host *ha, char *str)
290} 362}
291 363
292static char * 364static char *
293qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str) 365qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
294{ 366{
295 char un_str[10]; 367 char un_str[10];
368 struct qla_hw_data *ha = vha->hw;
296 369
297 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 370 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
298 ha->fw_minor_version, 371 ha->fw_minor_version,
@@ -328,8 +401,9 @@ qla2x00_fw_version_str(struct scsi_qla_host *ha, char *str)
328} 401}
329 402
330static char * 403static char *
331qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str) 404qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
332{ 405{
406 struct qla_hw_data *ha = vha->hw;
333 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 407 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
334 ha->fw_minor_version, 408 ha->fw_minor_version,
335 ha->fw_subminor_version); 409 ha->fw_subminor_version);
@@ -354,18 +428,20 @@ qla24xx_fw_version_str(struct scsi_qla_host *ha, char *str)
354} 428}
355 429
356static inline srb_t * 430static inline srb_t *
357qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport, 431qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
358 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 432 struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
359{ 433{
360 srb_t *sp; 434 srb_t *sp;
435 struct qla_hw_data *ha = vha->hw;
361 436
362 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 437 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC);
363 if (!sp) 438 if (!sp)
364 return sp; 439 return sp;
365 440
366 sp->ha = ha; 441 sp->vha = vha;
367 sp->fcport = fcport; 442 sp->fcport = fcport;
368 sp->cmd = cmd; 443 sp->cmd = cmd;
444 sp->que = ha->req_q_map[0];
369 sp->flags = 0; 445 sp->flags = 0;
370 CMD_SP(cmd) = (void *)sp; 446 CMD_SP(cmd) = (void *)sp;
371 cmd->scsi_done = done; 447 cmd->scsi_done = done;
@@ -376,9 +452,10 @@ qla2x00_get_new_sp(scsi_qla_host_t *ha, fc_port_t *fcport,
376static int 452static int
377qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 453qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
378{ 454{
379 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 455 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
380 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 456 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
381 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 457 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
458 struct qla_hw_data *ha = vha->hw;
382 srb_t *sp; 459 srb_t *sp;
383 int rval; 460 int rval;
384 461
@@ -399,33 +476,33 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
399 476
400 if (atomic_read(&fcport->state) != FCS_ONLINE) { 477 if (atomic_read(&fcport->state) != FCS_ONLINE) {
401 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 478 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
402 atomic_read(&ha->loop_state) == LOOP_DEAD) { 479 atomic_read(&vha->loop_state) == LOOP_DEAD) {
403 cmd->result = DID_NO_CONNECT << 16; 480 cmd->result = DID_NO_CONNECT << 16;
404 goto qc_fail_command; 481 goto qc_fail_command;
405 } 482 }
406 goto qc_target_busy; 483 goto qc_target_busy;
407 } 484 }
408 485
409 spin_unlock_irq(ha->host->host_lock); 486 spin_unlock_irq(vha->host->host_lock);
410 487
411 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 488 sp = qla2x00_get_new_sp(vha, fcport, cmd, done);
412 if (!sp) 489 if (!sp)
413 goto qc_host_busy_lock; 490 goto qc_host_busy_lock;
414 491
415 rval = qla2x00_start_scsi(sp); 492 rval = ha->isp_ops->start_scsi(sp);
416 if (rval != QLA_SUCCESS) 493 if (rval != QLA_SUCCESS)
417 goto qc_host_busy_free_sp; 494 goto qc_host_busy_free_sp;
418 495
419 spin_lock_irq(ha->host->host_lock); 496 spin_lock_irq(vha->host->host_lock);
420 497
421 return 0; 498 return 0;
422 499
423qc_host_busy_free_sp: 500qc_host_busy_free_sp:
424 qla2x00_sp_free_dma(ha, sp); 501 qla2x00_sp_free_dma(sp);
425 mempool_free(sp, ha->srb_mempool); 502 mempool_free(sp, ha->srb_mempool);
426 503
427qc_host_busy_lock: 504qc_host_busy_lock:
428 spin_lock_irq(ha->host->host_lock); 505 spin_lock_irq(vha->host->host_lock);
429 return SCSI_MLQUEUE_HOST_BUSY; 506 return SCSI_MLQUEUE_HOST_BUSY;
430 507
431qc_target_busy: 508qc_target_busy:
@@ -441,14 +518,15 @@ qc_fail_command:
441static int 518static int
442qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 519qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443{ 520{
444 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 521 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
445 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 522 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
446 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 523 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
524 struct qla_hw_data *ha = vha->hw;
525 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
447 srb_t *sp; 526 srb_t *sp;
448 int rval; 527 int rval;
449 scsi_qla_host_t *pha = to_qla_parent(ha);
450 528
451 if (unlikely(pci_channel_offline(pha->pdev))) { 529 if (unlikely(pci_channel_offline(ha->pdev))) {
452 cmd->result = DID_REQUEUE << 16; 530 cmd->result = DID_REQUEUE << 16;
453 goto qc24_fail_command; 531 goto qc24_fail_command;
454 } 532 }
@@ -465,33 +543,33 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
465 543
466 if (atomic_read(&fcport->state) != FCS_ONLINE) { 544 if (atomic_read(&fcport->state) != FCS_ONLINE) {
467 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 545 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
468 atomic_read(&pha->loop_state) == LOOP_DEAD) { 546 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
469 cmd->result = DID_NO_CONNECT << 16; 547 cmd->result = DID_NO_CONNECT << 16;
470 goto qc24_fail_command; 548 goto qc24_fail_command;
471 } 549 }
472 goto qc24_target_busy; 550 goto qc24_target_busy;
473 } 551 }
474 552
475 spin_unlock_irq(ha->host->host_lock); 553 spin_unlock_irq(vha->host->host_lock);
476 554
477 sp = qla2x00_get_new_sp(pha, fcport, cmd, done); 555 sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
478 if (!sp) 556 if (!sp)
479 goto qc24_host_busy_lock; 557 goto qc24_host_busy_lock;
480 558
481 rval = qla24xx_start_scsi(sp); 559 rval = ha->isp_ops->start_scsi(sp);
482 if (rval != QLA_SUCCESS) 560 if (rval != QLA_SUCCESS)
483 goto qc24_host_busy_free_sp; 561 goto qc24_host_busy_free_sp;
484 562
485 spin_lock_irq(ha->host->host_lock); 563 spin_lock_irq(vha->host->host_lock);
486 564
487 return 0; 565 return 0;
488 566
489qc24_host_busy_free_sp: 567qc24_host_busy_free_sp:
490 qla2x00_sp_free_dma(pha, sp); 568 qla2x00_sp_free_dma(sp);
491 mempool_free(sp, pha->srb_mempool); 569 mempool_free(sp, ha->srb_mempool);
492 570
493qc24_host_busy_lock: 571qc24_host_busy_lock:
494 spin_lock_irq(ha->host->host_lock); 572 spin_lock_irq(vha->host->host_lock);
495 return SCSI_MLQUEUE_HOST_BUSY; 573 return SCSI_MLQUEUE_HOST_BUSY;
496 574
497qc24_target_busy: 575qc24_target_busy:
@@ -510,17 +588,14 @@ qc24_fail_command:
510 * max time. 588 * max time.
511 * 589 *
512 * Input: 590 * Input:
513 * ha = actual ha whose done queue will contain the command
514 * returned by firmware.
515 * cmd = Scsi Command to wait on. 591 * cmd = Scsi Command to wait on.
516 * flag = Abort/Reset(Bus or Device Reset)
517 * 592 *
518 * Return: 593 * Return:
519 * Not Found : 0 594 * Not Found : 0
520 * Found : 1 595 * Found : 1
521 */ 596 */
522static int 597static int
523qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd) 598qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
524{ 599{
525#define ABORT_POLLING_PERIOD 1000 600#define ABORT_POLLING_PERIOD 1000
526#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) 601#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
@@ -557,21 +632,22 @@ qla2x00_eh_wait_on_command(scsi_qla_host_t *ha, struct scsi_cmnd *cmd)
557 * Failed (Adapter is offline/disabled) : 1 632 * Failed (Adapter is offline/disabled) : 1
558 */ 633 */
559int 634int
560qla2x00_wait_for_hba_online(scsi_qla_host_t *ha) 635qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
561{ 636{
562 int return_status; 637 int return_status;
563 unsigned long wait_online; 638 unsigned long wait_online;
564 scsi_qla_host_t *pha = to_qla_parent(ha); 639 struct qla_hw_data *ha = vha->hw;
640 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
565 641
566 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 642 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
567 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) || 643 while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
568 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) || 644 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
569 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) || 645 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
570 pha->dpc_active) && time_before(jiffies, wait_online)) { 646 ha->dpc_active) && time_before(jiffies, wait_online)) {
571 647
572 msleep(1000); 648 msleep(1000);
573 } 649 }
574 if (pha->flags.online) 650 if (base_vha->flags.online)
575 return_status = QLA_SUCCESS; 651 return_status = QLA_SUCCESS;
576 else 652 else
577 return_status = QLA_FUNCTION_FAILED; 653 return_status = QLA_FUNCTION_FAILED;
@@ -596,19 +672,20 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
596 * Failed (LOOP_NOT_READY) : 1 672 * Failed (LOOP_NOT_READY) : 1
597 */ 673 */
598static inline int 674static inline int
599qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) 675qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
600{ 676{
601 int return_status = QLA_SUCCESS; 677 int return_status = QLA_SUCCESS;
602 unsigned long loop_timeout ; 678 unsigned long loop_timeout ;
603 scsi_qla_host_t *pha = to_qla_parent(ha); 679 struct qla_hw_data *ha = vha->hw;
680 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
604 681
605 /* wait for 5 min at the max for loop to be ready */ 682 /* wait for 5 min at the max for loop to be ready */
606 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 683 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
607 684
608 while ((!atomic_read(&pha->loop_down_timer) && 685 while ((!atomic_read(&base_vha->loop_down_timer) &&
609 atomic_read(&pha->loop_state) == LOOP_DOWN) || 686 atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
610 atomic_read(&pha->loop_state) != LOOP_READY) { 687 atomic_read(&base_vha->loop_state) != LOOP_READY) {
611 if (atomic_read(&pha->loop_state) == LOOP_DEAD) { 688 if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
612 return_status = QLA_FUNCTION_FAILED; 689 return_status = QLA_FUNCTION_FAILED;
613 break; 690 break;
614 } 691 }
@@ -624,35 +701,42 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
624void 701void
625qla2x00_abort_fcport_cmds(fc_port_t *fcport) 702qla2x00_abort_fcport_cmds(fc_port_t *fcport)
626{ 703{
627 int cnt; 704 int cnt, que, id;
628 unsigned long flags; 705 unsigned long flags;
629 srb_t *sp; 706 srb_t *sp;
630 scsi_qla_host_t *ha = fcport->ha; 707 scsi_qla_host_t *vha = fcport->vha;
631 scsi_qla_host_t *pha = to_qla_parent(ha); 708 struct qla_hw_data *ha = vha->hw;
709 struct req_que *req;
632 710
633 spin_lock_irqsave(&pha->hardware_lock, flags); 711 spin_lock_irqsave(&ha->hardware_lock, flags);
634 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 712 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
635 sp = pha->outstanding_cmds[cnt]; 713 id = vha->req_ques[que];
636 if (!sp) 714 req = ha->req_q_map[id];
637 continue; 715 if (!req)
638 if (sp->fcport != fcport)
639 continue; 716 continue;
717 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
718 sp = req->outstanding_cmds[cnt];
719 if (!sp)
720 continue;
721 if (sp->fcport != fcport)
722 continue;
640 723
641 spin_unlock_irqrestore(&pha->hardware_lock, flags); 724 spin_unlock_irqrestore(&ha->hardware_lock, flags);
642 if (ha->isp_ops->abort_command(ha, sp)) { 725 if (ha->isp_ops->abort_command(vha, sp, req)) {
643 DEBUG2(qla_printk(KERN_WARNING, ha,
644 "Abort failed -- %lx\n", sp->cmd->serial_number));
645 } else {
646 if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
647 QLA_SUCCESS)
648 DEBUG2(qla_printk(KERN_WARNING, ha, 726 DEBUG2(qla_printk(KERN_WARNING, ha,
649 "Abort failed while waiting -- %lx\n", 727 "Abort failed -- %lx\n",
650 sp->cmd->serial_number)); 728 sp->cmd->serial_number));
651 729 } else {
730 if (qla2x00_eh_wait_on_command(sp->cmd) !=
731 QLA_SUCCESS)
732 DEBUG2(qla_printk(KERN_WARNING, ha,
733 "Abort failed while waiting -- %lx\n",
734 sp->cmd->serial_number));
735 }
736 spin_lock_irqsave(&ha->hardware_lock, flags);
652 } 737 }
653 spin_lock_irqsave(&pha->hardware_lock, flags);
654 } 738 }
655 spin_unlock_irqrestore(&pha->hardware_lock, flags); 739 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656} 740}
657 741
658static void 742static void
@@ -690,14 +774,16 @@ qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
690static int 774static int
691qla2xxx_eh_abort(struct scsi_cmnd *cmd) 775qla2xxx_eh_abort(struct scsi_cmnd *cmd)
692{ 776{
693 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 777 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
694 srb_t *sp; 778 srb_t *sp;
695 int ret, i; 779 int ret, i;
696 unsigned int id, lun; 780 unsigned int id, lun;
697 unsigned long serial; 781 unsigned long serial;
698 unsigned long flags; 782 unsigned long flags;
699 int wait = 0; 783 int wait = 0;
700 scsi_qla_host_t *pha = to_qla_parent(ha); 784 struct qla_hw_data *ha = vha->hw;
785 struct req_que *req;
786 srb_t *spt;
701 787
702 qla2x00_block_error_handler(cmd); 788 qla2x00_block_error_handler(cmd);
703 789
@@ -709,11 +795,15 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
709 id = cmd->device->id; 795 id = cmd->device->id;
710 lun = cmd->device->lun; 796 lun = cmd->device->lun;
711 serial = cmd->serial_number; 797 serial = cmd->serial_number;
798 spt = (srb_t *) CMD_SP(cmd);
799 if (!spt)
800 return SUCCESS;
801 req = spt->que;
712 802
713 /* Check active list for command command. */ 803 /* Check active list for command command. */
714 spin_lock_irqsave(&pha->hardware_lock, flags); 804 spin_lock_irqsave(&ha->hardware_lock, flags);
715 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 805 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
716 sp = pha->outstanding_cmds[i]; 806 sp = req->outstanding_cmds[i];
717 807
718 if (sp == NULL) 808 if (sp == NULL)
719 continue; 809 continue;
@@ -721,38 +811,36 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
721 if (sp->cmd != cmd) 811 if (sp->cmd != cmd)
722 continue; 812 continue;
723 813
724 DEBUG2(printk("%s(%ld): aborting sp %p from RISC. pid=%ld.\n", 814 DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
725 __func__, ha->host_no, sp, serial)); 815 " pid=%ld.\n", __func__, vha->host_no, sp, serial));
726 816
727 spin_unlock_irqrestore(&pha->hardware_lock, flags); 817 spin_unlock_irqrestore(&ha->hardware_lock, flags);
728 if (ha->isp_ops->abort_command(ha, sp)) { 818 if (ha->isp_ops->abort_command(vha, sp, req)) {
729 DEBUG2(printk("%s(%ld): abort_command " 819 DEBUG2(printk("%s(%ld): abort_command "
730 "mbx failed.\n", __func__, ha->host_no)); 820 "mbx failed.\n", __func__, vha->host_no));
731 ret = FAILED;
732 } else { 821 } else {
733 DEBUG3(printk("%s(%ld): abort_command " 822 DEBUG3(printk("%s(%ld): abort_command "
734 "mbx success.\n", __func__, ha->host_no)); 823 "mbx success.\n", __func__, vha->host_no));
735 wait = 1; 824 wait = 1;
736 } 825 }
737 spin_lock_irqsave(&pha->hardware_lock, flags); 826 spin_lock_irqsave(&ha->hardware_lock, flags);
738
739 break; 827 break;
740 } 828 }
741 spin_unlock_irqrestore(&pha->hardware_lock, flags); 829 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 830
743 /* Wait for the command to be returned. */ 831 /* Wait for the command to be returned. */
744 if (wait) { 832 if (wait) {
745 if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { 833 if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
746 qla_printk(KERN_ERR, ha, 834 qla_printk(KERN_ERR, ha,
747 "scsi(%ld:%d:%d): Abort handler timed out -- %lx " 835 "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
748 "%x.\n", ha->host_no, id, lun, serial, ret); 836 "%x.\n", vha->host_no, id, lun, serial, ret);
749 ret = FAILED; 837 ret = FAILED;
750 } 838 }
751 } 839 }
752 840
753 qla_printk(KERN_INFO, ha, 841 qla_printk(KERN_INFO, ha,
754 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", 842 "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
755 ha->host_no, id, lun, wait, serial, ret); 843 vha->host_no, id, lun, wait, serial, ret);
756 844
757 return ret; 845 return ret;
758} 846}
@@ -764,23 +852,27 @@ enum nexus_wait_type {
764}; 852};
765 853
766static int 854static int
767qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t, 855qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
768 unsigned int l, enum nexus_wait_type type) 856 unsigned int l, srb_t *sp, enum nexus_wait_type type)
769{ 857{
770 int cnt, match, status; 858 int cnt, match, status;
771 srb_t *sp;
772 unsigned long flags; 859 unsigned long flags;
773 scsi_qla_host_t *pha = to_qla_parent(ha); 860 struct qla_hw_data *ha = vha->hw;
861 struct req_que *req;
774 862
775 status = QLA_SUCCESS; 863 status = QLA_SUCCESS;
776 spin_lock_irqsave(&pha->hardware_lock, flags); 864 if (!sp)
777 for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; 865 return status;
778 cnt++) { 866
779 sp = pha->outstanding_cmds[cnt]; 867 spin_lock_irqsave(&ha->hardware_lock, flags);
868 req = sp->que;
869 for (cnt = 1; status == QLA_SUCCESS &&
870 cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
871 sp = req->outstanding_cmds[cnt];
780 if (!sp) 872 if (!sp)
781 continue; 873 continue;
782 874
783 if (ha->vp_idx != sp->fcport->ha->vp_idx) 875 if (vha->vp_idx != sp->fcport->vha->vp_idx)
784 continue; 876 continue;
785 match = 0; 877 match = 0;
786 switch (type) { 878 switch (type) {
@@ -792,17 +884,17 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *ha, unsigned int t,
792 break; 884 break;
793 case WAIT_LUN: 885 case WAIT_LUN:
794 match = (sp->cmd->device->id == t && 886 match = (sp->cmd->device->id == t &&
795 sp->cmd->device->lun == l); 887 sp->cmd->device->lun == l);
796 break; 888 break;
797 } 889 }
798 if (!match) 890 if (!match)
799 continue; 891 continue;
800 892
801 spin_unlock_irqrestore(&pha->hardware_lock, flags); 893 spin_unlock_irqrestore(&ha->hardware_lock, flags);
802 status = qla2x00_eh_wait_on_command(ha, sp->cmd); 894 status = qla2x00_eh_wait_on_command(sp->cmd);
803 spin_lock_irqsave(&pha->hardware_lock, flags); 895 spin_lock_irqsave(&ha->hardware_lock, flags);
804 } 896 }
805 spin_unlock_irqrestore(&pha->hardware_lock, flags); 897 spin_unlock_irqrestore(&ha->hardware_lock, flags);
806 898
807 return status; 899 return status;
808} 900}
@@ -818,7 +910,7 @@ static int
818__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, 910__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
819 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) 911 struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
820{ 912{
821 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 913 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
822 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 914 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
823 int err; 915 int err;
824 916
@@ -827,31 +919,31 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
827 if (!fcport) 919 if (!fcport)
828 return FAILED; 920 return FAILED;
829 921
830 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", 922 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
831 ha->host_no, cmd->device->id, cmd->device->lun, name); 923 vha->host_no, cmd->device->id, cmd->device->lun, name);
832 924
833 err = 0; 925 err = 0;
834 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 926 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
835 goto eh_reset_failed; 927 goto eh_reset_failed;
836 err = 1; 928 err = 1;
837 if (qla2x00_wait_for_loop_ready(ha) != QLA_SUCCESS) 929 if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
838 goto eh_reset_failed; 930 goto eh_reset_failed;
839 err = 2; 931 err = 2;
840 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) 932 if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
841 goto eh_reset_failed; 933 goto eh_reset_failed;
842 err = 3; 934 err = 3;
843 if (qla2x00_eh_wait_for_pending_commands(ha, cmd->device->id, 935 if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
844 cmd->device->lun, type) != QLA_SUCCESS) 936 cmd->device->lun, (srb_t *) CMD_SP(cmd), type) != QLA_SUCCESS)
845 goto eh_reset_failed; 937 goto eh_reset_failed;
846 938
847 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n", 939 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET SUCCEEDED.\n",
848 ha->host_no, cmd->device->id, cmd->device->lun, name); 940 vha->host_no, cmd->device->id, cmd->device->lun, name);
849 941
850 return SUCCESS; 942 return SUCCESS;
851 943
852 eh_reset_failed: 944 eh_reset_failed:
853 qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n", 945 qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET FAILED: %s.\n"
854 ha->host_no, cmd->device->id, cmd->device->lun, name, 946 , vha->host_no, cmd->device->id, cmd->device->lun, name,
855 reset_errors[err]); 947 reset_errors[err]);
856 return FAILED; 948 return FAILED;
857} 949}
@@ -859,7 +951,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
859static int 951static int
860qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) 952qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
861{ 953{
862 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 954 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
955 struct qla_hw_data *ha = vha->hw;
863 956
864 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd, 957 return __qla2xxx_eh_generic_reset("DEVICE", WAIT_LUN, cmd,
865 ha->isp_ops->lun_reset); 958 ha->isp_ops->lun_reset);
@@ -868,7 +961,8 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
868static int 961static int
869qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) 962qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
870{ 963{
871 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 964 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
965 struct qla_hw_data *ha = vha->hw;
872 966
873 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd, 967 return __qla2xxx_eh_generic_reset("TARGET", WAIT_TARGET, cmd,
874 ha->isp_ops->target_reset); 968 ha->isp_ops->target_reset);
@@ -892,12 +986,12 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
892static int 986static int
893qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 987qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
894{ 988{
895 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 989 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
896 scsi_qla_host_t *pha = to_qla_parent(ha);
897 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 990 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
898 int ret = FAILED; 991 int ret = FAILED;
899 unsigned int id, lun; 992 unsigned int id, lun;
900 unsigned long serial; 993 unsigned long serial;
994 srb_t *sp = (srb_t *) CMD_SP(cmd);
901 995
902 qla2x00_block_error_handler(cmd); 996 qla2x00_block_error_handler(cmd);
903 997
@@ -908,28 +1002,28 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
908 if (!fcport) 1002 if (!fcport)
909 return ret; 1003 return ret;
910 1004
911 qla_printk(KERN_INFO, ha, 1005 qla_printk(KERN_INFO, vha->hw,
912 "scsi(%ld:%d:%d): LOOP RESET ISSUED.\n", ha->host_no, id, lun); 1006 "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
913 1007
914 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) { 1008 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
915 DEBUG2(printk("%s failed:board disabled\n",__func__)); 1009 DEBUG2(printk("%s failed:board disabled\n",__func__));
916 goto eh_bus_reset_done; 1010 goto eh_bus_reset_done;
917 } 1011 }
918 1012
919 if (qla2x00_wait_for_loop_ready(ha) == QLA_SUCCESS) { 1013 if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
920 if (qla2x00_loop_reset(ha) == QLA_SUCCESS) 1014 if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
921 ret = SUCCESS; 1015 ret = SUCCESS;
922 } 1016 }
923 if (ret == FAILED) 1017 if (ret == FAILED)
924 goto eh_bus_reset_done; 1018 goto eh_bus_reset_done;
925 1019
926 /* Flush outstanding commands. */ 1020 /* Flush outstanding commands. */
927 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) != 1021 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) !=
928 QLA_SUCCESS) 1022 QLA_SUCCESS)
929 ret = FAILED; 1023 ret = FAILED;
930 1024
931eh_bus_reset_done: 1025eh_bus_reset_done:
932 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1026 qla_printk(KERN_INFO, vha->hw, "%s: reset %s\n", __func__,
933 (ret == FAILED) ? "failed" : "succeded"); 1027 (ret == FAILED) ? "failed" : "succeded");
934 1028
935 return ret; 1029 return ret;
@@ -953,12 +1047,14 @@ eh_bus_reset_done:
953static int 1047static int
954qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) 1048qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
955{ 1049{
956 scsi_qla_host_t *ha = shost_priv(cmd->device->host); 1050 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
957 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 1051 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
1052 struct qla_hw_data *ha = vha->hw;
958 int ret = FAILED; 1053 int ret = FAILED;
959 unsigned int id, lun; 1054 unsigned int id, lun;
960 unsigned long serial; 1055 unsigned long serial;
961 scsi_qla_host_t *pha = to_qla_parent(ha); 1056 srb_t *sp = (srb_t *) CMD_SP(cmd);
1057 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
962 1058
963 qla2x00_block_error_handler(cmd); 1059 qla2x00_block_error_handler(cmd);
964 1060
@@ -970,9 +1066,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
970 return ret; 1066 return ret;
971 1067
972 qla_printk(KERN_INFO, ha, 1068 qla_printk(KERN_INFO, ha,
973 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", ha->host_no, id, lun); 1069 "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
974 1070
975 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 1071 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
976 goto eh_host_reset_lock; 1072 goto eh_host_reset_lock;
977 1073
978 /* 1074 /*
@@ -983,26 +1079,28 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
983 * devices as lost kicking of the port_down_timer 1079 * devices as lost kicking of the port_down_timer
984 * while dpc is stuck for the mailbox to complete. 1080 * while dpc is stuck for the mailbox to complete.
985 */ 1081 */
986 qla2x00_wait_for_loop_ready(ha); 1082 qla2x00_wait_for_loop_ready(vha);
987 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags); 1083 if (vha != base_vha) {
988 if (qla2x00_abort_isp(pha)) { 1084 if (qla2x00_vp_abort_isp(vha))
989 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
990 /* failed. schedule dpc to try */
991 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
992
993 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
994 goto eh_host_reset_lock; 1085 goto eh_host_reset_lock;
1086 } else {
1087 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1088 if (qla2x00_abort_isp(base_vha)) {
1089 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1090 /* failed. schedule dpc to try */
1091 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1092
1093 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
1094 goto eh_host_reset_lock;
1095 }
1096 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
995 } 1097 }
996 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
997 1098
998 /* Waiting for our command in done_queue to be returned to OS.*/ 1099 /* Waiting for command to be returned to OS.*/
999 if (qla2x00_eh_wait_for_pending_commands(pha, 0, 0, WAIT_HOST) == 1100 if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, sp, WAIT_HOST) ==
1000 QLA_SUCCESS) 1101 QLA_SUCCESS)
1001 ret = SUCCESS; 1102 ret = SUCCESS;
1002 1103
1003 if (ha->parent)
1004 qla2x00_vp_abort_isp(ha);
1005
1006eh_host_reset_lock: 1104eh_host_reset_lock:
1007 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1105 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1008 (ret == FAILED) ? "failed" : "succeded"); 1106 (ret == FAILED) ? "failed" : "succeded");
@@ -1021,35 +1119,36 @@ eh_host_reset_lock:
1021* 0 = success 1119* 0 = success
1022*/ 1120*/
1023int 1121int
1024qla2x00_loop_reset(scsi_qla_host_t *ha) 1122qla2x00_loop_reset(scsi_qla_host_t *vha)
1025{ 1123{
1026 int ret; 1124 int ret;
1027 struct fc_port *fcport; 1125 struct fc_port *fcport;
1126 struct qla_hw_data *ha = vha->hw;
1028 1127
1029 if (ha->flags.enable_lip_full_login) { 1128 if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
1030 ret = qla2x00_full_login_lip(ha); 1129 ret = qla2x00_full_login_lip(vha);
1031 if (ret != QLA_SUCCESS) { 1130 if (ret != QLA_SUCCESS) {
1032 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1131 DEBUG2_3(printk("%s(%ld): failed: "
1033 "full_login_lip=%d.\n", __func__, ha->host_no, 1132 "full_login_lip=%d.\n", __func__, vha->host_no,
1034 ret)); 1133 ret));
1035 } 1134 }
1036 atomic_set(&ha->loop_state, LOOP_DOWN); 1135 atomic_set(&vha->loop_state, LOOP_DOWN);
1037 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 1136 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1038 qla2x00_mark_all_devices_lost(ha, 0); 1137 qla2x00_mark_all_devices_lost(vha, 0);
1039 qla2x00_wait_for_loop_ready(ha); 1138 qla2x00_wait_for_loop_ready(vha);
1040 } 1139 }
1041 1140
1042 if (ha->flags.enable_lip_reset) { 1141 if (ha->flags.enable_lip_reset && !vha->vp_idx) {
1043 ret = qla2x00_lip_reset(ha); 1142 ret = qla2x00_lip_reset(vha);
1044 if (ret != QLA_SUCCESS) { 1143 if (ret != QLA_SUCCESS) {
1045 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1144 DEBUG2_3(printk("%s(%ld): failed: "
1046 "lip_reset=%d.\n", __func__, ha->host_no, ret)); 1145 "lip_reset=%d.\n", __func__, vha->host_no, ret));
1047 } 1146 } else
1048 qla2x00_wait_for_loop_ready(ha); 1147 qla2x00_wait_for_loop_ready(vha);
1049 } 1148 }
1050 1149
1051 if (ha->flags.enable_target_reset) { 1150 if (ha->flags.enable_target_reset) {
1052 list_for_each_entry(fcport, &ha->fcports, list) { 1151 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1053 if (fcport->port_type != FCT_TARGET) 1152 if (fcport->port_type != FCT_TARGET)
1054 continue; 1153 continue;
1055 1154
@@ -1057,31 +1156,37 @@ qla2x00_loop_reset(scsi_qla_host_t *ha)
1057 if (ret != QLA_SUCCESS) { 1156 if (ret != QLA_SUCCESS) {
1058 DEBUG2_3(printk("%s(%ld): bus_reset failed: " 1157 DEBUG2_3(printk("%s(%ld): bus_reset failed: "
1059 "target_reset=%d d_id=%x.\n", __func__, 1158 "target_reset=%d d_id=%x.\n", __func__,
1060 ha->host_no, ret, fcport->d_id.b24)); 1159 vha->host_no, ret, fcport->d_id.b24));
1061 } 1160 }
1062 } 1161 }
1063 } 1162 }
1064
1065 /* Issue marker command only when we are going to start the I/O */ 1163 /* Issue marker command only when we are going to start the I/O */
1066 ha->marker_needed = 1; 1164 vha->marker_needed = 1;
1067 1165
1068 return QLA_SUCCESS; 1166 return QLA_SUCCESS;
1069} 1167}
1070 1168
1071void 1169void
1072qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res) 1170qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1073{ 1171{
1074 int cnt; 1172 int que, cnt;
1075 unsigned long flags; 1173 unsigned long flags;
1076 srb_t *sp; 1174 srb_t *sp;
1175 struct qla_hw_data *ha = vha->hw;
1176 struct req_que *req;
1077 1177
1078 spin_lock_irqsave(&ha->hardware_lock, flags); 1178 spin_lock_irqsave(&ha->hardware_lock, flags);
1079 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1179 for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
1080 sp = ha->outstanding_cmds[cnt]; 1180 req = ha->req_q_map[vha->req_ques[que]];
1081 if (sp) { 1181 if (!req)
1082 ha->outstanding_cmds[cnt] = NULL; 1182 continue;
1083 sp->cmd->result = res; 1183 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1084 qla2x00_sp_compl(ha, sp); 1184 sp = req->outstanding_cmds[cnt];
1185 if (sp && sp->vha == vha) {
1186 req->outstanding_cmds[cnt] = NULL;
1187 sp->cmd->result = res;
1188 qla2x00_sp_compl(ha, sp);
1189 }
1085 } 1190 }
1086 } 1191 }
1087 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1192 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1103,13 +1208,15 @@ qla2xxx_slave_alloc(struct scsi_device *sdev)
1103static int 1208static int
1104qla2xxx_slave_configure(struct scsi_device *sdev) 1209qla2xxx_slave_configure(struct scsi_device *sdev)
1105{ 1210{
1106 scsi_qla_host_t *ha = shost_priv(sdev->host); 1211 scsi_qla_host_t *vha = shost_priv(sdev->host);
1212 struct qla_hw_data *ha = vha->hw;
1107 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1213 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1214 struct req_que *req = ha->req_q_map[0];
1108 1215
1109 if (sdev->tagged_supported) 1216 if (sdev->tagged_supported)
1110 scsi_activate_tcq(sdev, ha->max_q_depth); 1217 scsi_activate_tcq(sdev, req->max_q_depth);
1111 else 1218 else
1112 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1219 scsi_deactivate_tcq(sdev, req->max_q_depth);
1113 1220
1114 rport->dev_loss_tmo = ha->port_down_retry_count; 1221 rport->dev_loss_tmo = ha->port_down_retry_count;
1115 1222
@@ -1152,8 +1259,9 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
1152 * supported addressing method. 1259 * supported addressing method.
1153 */ 1260 */
1154static void 1261static void
1155qla2x00_config_dma_addressing(scsi_qla_host_t *ha) 1262qla2x00_config_dma_addressing(scsi_qla_host_t *vha)
1156{ 1263{
1264 struct qla_hw_data *ha = vha->hw;
1157 /* Assume a 32bit DMA mask. */ 1265 /* Assume a 32bit DMA mask. */
1158 ha->flags.enable_64bit_addressing = 0; 1266 ha->flags.enable_64bit_addressing = 0;
1159 1267
@@ -1174,7 +1282,7 @@ qla2x00_config_dma_addressing(scsi_qla_host_t *ha)
1174} 1282}
1175 1283
1176static void 1284static void
1177qla2x00_enable_intrs(scsi_qla_host_t *ha) 1285qla2x00_enable_intrs(struct qla_hw_data *ha)
1178{ 1286{
1179 unsigned long flags = 0; 1287 unsigned long flags = 0;
1180 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1189,7 +1297,7 @@ qla2x00_enable_intrs(scsi_qla_host_t *ha)
1189} 1297}
1190 1298
1191static void 1299static void
1192qla2x00_disable_intrs(scsi_qla_host_t *ha) 1300qla2x00_disable_intrs(struct qla_hw_data *ha)
1193{ 1301{
1194 unsigned long flags = 0; 1302 unsigned long flags = 0;
1195 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1303 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1203,7 +1311,7 @@ qla2x00_disable_intrs(scsi_qla_host_t *ha)
1203} 1311}
1204 1312
1205static void 1313static void
1206qla24xx_enable_intrs(scsi_qla_host_t *ha) 1314qla24xx_enable_intrs(struct qla_hw_data *ha)
1207{ 1315{
1208 unsigned long flags = 0; 1316 unsigned long flags = 0;
1209 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1317 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1216,7 +1324,7 @@ qla24xx_enable_intrs(scsi_qla_host_t *ha)
1216} 1324}
1217 1325
1218static void 1326static void
1219qla24xx_disable_intrs(scsi_qla_host_t *ha) 1327qla24xx_disable_intrs(struct qla_hw_data *ha)
1220{ 1328{
1221 unsigned long flags = 0; 1329 unsigned long flags = 0;
1222 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1330 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -1260,6 +1368,10 @@ static struct isp_operations qla2100_isp_ops = {
1260 .read_optrom = qla2x00_read_optrom_data, 1368 .read_optrom = qla2x00_read_optrom_data,
1261 .write_optrom = qla2x00_write_optrom_data, 1369 .write_optrom = qla2x00_write_optrom_data,
1262 .get_flash_version = qla2x00_get_flash_version, 1370 .get_flash_version = qla2x00_get_flash_version,
1371 .start_scsi = qla2x00_start_scsi,
1372 .wrt_req_reg = NULL,
1373 .wrt_rsp_reg = NULL,
1374 .rd_req_reg = NULL,
1263}; 1375};
1264 1376
1265static struct isp_operations qla2300_isp_ops = { 1377static struct isp_operations qla2300_isp_ops = {
@@ -1294,6 +1406,10 @@ static struct isp_operations qla2300_isp_ops = {
1294 .read_optrom = qla2x00_read_optrom_data, 1406 .read_optrom = qla2x00_read_optrom_data,
1295 .write_optrom = qla2x00_write_optrom_data, 1407 .write_optrom = qla2x00_write_optrom_data,
1296 .get_flash_version = qla2x00_get_flash_version, 1408 .get_flash_version = qla2x00_get_flash_version,
1409 .start_scsi = qla2x00_start_scsi,
1410 .wrt_req_reg = NULL,
1411 .wrt_rsp_reg = NULL,
1412 .rd_req_reg = NULL,
1297}; 1413};
1298 1414
1299static struct isp_operations qla24xx_isp_ops = { 1415static struct isp_operations qla24xx_isp_ops = {
@@ -1328,6 +1444,10 @@ static struct isp_operations qla24xx_isp_ops = {
1328 .read_optrom = qla24xx_read_optrom_data, 1444 .read_optrom = qla24xx_read_optrom_data,
1329 .write_optrom = qla24xx_write_optrom_data, 1445 .write_optrom = qla24xx_write_optrom_data,
1330 .get_flash_version = qla24xx_get_flash_version, 1446 .get_flash_version = qla24xx_get_flash_version,
1447 .start_scsi = qla24xx_start_scsi,
1448 .wrt_req_reg = qla24xx_wrt_req_reg,
1449 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1450 .rd_req_reg = qla24xx_rd_req_reg,
1331}; 1451};
1332 1452
1333static struct isp_operations qla25xx_isp_ops = { 1453static struct isp_operations qla25xx_isp_ops = {
@@ -1362,10 +1482,14 @@ static struct isp_operations qla25xx_isp_ops = {
1362 .read_optrom = qla25xx_read_optrom_data, 1482 .read_optrom = qla25xx_read_optrom_data,
1363 .write_optrom = qla24xx_write_optrom_data, 1483 .write_optrom = qla24xx_write_optrom_data,
1364 .get_flash_version = qla24xx_get_flash_version, 1484 .get_flash_version = qla24xx_get_flash_version,
1485 .start_scsi = qla24xx_start_scsi,
1486 .wrt_req_reg = qla24xx_wrt_req_reg,
1487 .wrt_rsp_reg = qla24xx_wrt_rsp_reg,
1488 .rd_req_reg = qla24xx_rd_req_reg,
1365}; 1489};
1366 1490
1367static inline void 1491static inline void
1368qla2x00_set_isp_flags(scsi_qla_host_t *ha) 1492qla2x00_set_isp_flags(struct qla_hw_data *ha)
1369{ 1493{
1370 ha->device_type = DT_EXTENDED_IDS; 1494 ha->device_type = DT_EXTENDED_IDS;
1371 switch (ha->pdev->device) { 1495 switch (ha->pdev->device) {
@@ -1447,9 +1571,10 @@ qla2x00_set_isp_flags(scsi_qla_host_t *ha)
1447} 1571}
1448 1572
1449static int 1573static int
1450qla2x00_iospace_config(scsi_qla_host_t *ha) 1574qla2x00_iospace_config(struct qla_hw_data *ha)
1451{ 1575{
1452 resource_size_t pio; 1576 resource_size_t pio;
1577 uint16_t msix;
1453 1578
1454 if (pci_request_selected_regions(ha->pdev, ha->bars, 1579 if (pci_request_selected_regions(ha->pdev, ha->bars,
1455 QLA2XXX_DRIVER_NAME)) { 1580 QLA2XXX_DRIVER_NAME)) {
@@ -1502,6 +1627,30 @@ skip_pio:
1502 goto iospace_error_exit; 1627 goto iospace_error_exit;
1503 } 1628 }
1504 1629
1630 /* Determine queue resources */
1631 ha->max_queues = 1;
1632 if (ql2xmaxqueues <= 1 || !IS_QLA25XX(ha))
1633 goto mqiobase_exit;
1634 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
1635 pci_resource_len(ha->pdev, 3));
1636 if (ha->mqiobase) {
1637 /* Read MSIX vector size of the board */
1638 pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
1639 ha->msix_count = msix;
1640 /* Max queues are bounded by available msix vectors */
1641 /* queue 0 uses two msix vectors */
1642 if (ha->msix_count - 1 < ql2xmaxqueues)
1643 ha->max_queues = ha->msix_count - 1;
1644 else if (ql2xmaxqueues > QLA_MQ_SIZE)
1645 ha->max_queues = QLA_MQ_SIZE;
1646 else
1647 ha->max_queues = ql2xmaxqueues;
1648 qla_printk(KERN_INFO, ha,
1649 "MSI-X vector count: %d\n", msix);
1650 }
1651
1652mqiobase_exit:
1653 ha->msix_count = ha->max_queues + 1;
1505 return (0); 1654 return (0);
1506 1655
1507iospace_error_exit: 1656iospace_error_exit:
@@ -1511,25 +1660,25 @@ iospace_error_exit:
1511static void 1660static void
1512qla2xxx_scan_start(struct Scsi_Host *shost) 1661qla2xxx_scan_start(struct Scsi_Host *shost)
1513{ 1662{
1514 scsi_qla_host_t *ha = shost_priv(shost); 1663 scsi_qla_host_t *vha = shost_priv(shost);
1515 1664
1516 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 1665 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 1666 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1518 set_bit(RSCN_UPDATE, &ha->dpc_flags); 1667 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1519 set_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 1668 set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
1520} 1669}
1521 1670
1522static int 1671static int
1523qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) 1672qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
1524{ 1673{
1525 scsi_qla_host_t *ha = shost_priv(shost); 1674 scsi_qla_host_t *vha = shost_priv(shost);
1526 1675
1527 if (!ha->host) 1676 if (!vha->host)
1528 return 1; 1677 return 1;
1529 if (time > ha->loop_reset_delay * HZ) 1678 if (time > vha->hw->loop_reset_delay * HZ)
1530 return 1; 1679 return 1;
1531 1680
1532 return atomic_read(&ha->loop_state) == LOOP_READY; 1681 return atomic_read(&vha->loop_state) == LOOP_READY;
1533} 1682}
1534 1683
1535/* 1684/*
@@ -1540,11 +1689,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1540{ 1689{
1541 int ret = -ENODEV; 1690 int ret = -ENODEV;
1542 struct Scsi_Host *host; 1691 struct Scsi_Host *host;
1543 scsi_qla_host_t *ha; 1692 scsi_qla_host_t *base_vha = NULL;
1693 struct qla_hw_data *ha;
1544 char pci_info[30]; 1694 char pci_info[30];
1545 char fw_str[30]; 1695 char fw_str[30];
1546 struct scsi_host_template *sht; 1696 struct scsi_host_template *sht;
1547 int bars, mem_only = 0; 1697 int bars, max_id, mem_only = 0;
1698 uint16_t req_length = 0, rsp_length = 0;
1699 struct req_que *req = NULL;
1700 struct rsp_que *rsp = NULL;
1548 1701
1549 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 1702 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1550 sht = &qla2x00_driver_template; 1703 sht = &qla2x00_driver_template;
@@ -1570,33 +1723,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1570 /* This may fail but that's ok */ 1723 /* This may fail but that's ok */
1571 pci_enable_pcie_error_reporting(pdev); 1724 pci_enable_pcie_error_reporting(pdev);
1572 1725
1573 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t)); 1726 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1574 if (host == NULL) { 1727 if (!ha) {
1575 printk(KERN_WARNING 1728 DEBUG(printk("Unable to allocate memory for ha\n"));
1576 "qla2xxx: Couldn't allocate host from scsi layer!\n"); 1729 goto probe_out;
1577 goto probe_disable_device;
1578 } 1730 }
1731 ha->pdev = pdev;
1579 1732
1580 /* Clear our data area */ 1733 /* Clear our data area */
1581 ha = shost_priv(host);
1582 memset(ha, 0, sizeof(scsi_qla_host_t));
1583
1584 ha->pdev = pdev;
1585 ha->host = host;
1586 ha->host_no = host->host_no;
1587 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
1588 ha->parent = NULL;
1589 ha->bars = bars; 1734 ha->bars = bars;
1590 ha->mem_only = mem_only; 1735 ha->mem_only = mem_only;
1591 spin_lock_init(&ha->hardware_lock); 1736 spin_lock_init(&ha->hardware_lock);
1592 1737
1593 /* Set ISP-type information. */ 1738 /* Set ISP-type information. */
1594 qla2x00_set_isp_flags(ha); 1739 qla2x00_set_isp_flags(ha);
1595
1596 /* Configure PCI I/O space */ 1740 /* Configure PCI I/O space */
1597 ret = qla2x00_iospace_config(ha); 1741 ret = qla2x00_iospace_config(ha);
1598 if (ret) 1742 if (ret)
1599 goto probe_failed; 1743 goto probe_hw_failed;
1600 1744
1601 qla_printk(KERN_INFO, ha, 1745 qla_printk(KERN_INFO, ha,
1602 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq, 1746 "Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
@@ -1604,95 +1748,137 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1604 1748
1605 ha->prev_topology = 0; 1749 ha->prev_topology = 0;
1606 ha->init_cb_size = sizeof(init_cb_t); 1750 ha->init_cb_size = sizeof(init_cb_t);
1607 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
1608 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1751 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1609 ha->optrom_size = OPTROM_SIZE_2300; 1752 ha->optrom_size = OPTROM_SIZE_2300;
1610 1753
1611 ha->max_q_depth = MAX_Q_DEPTH;
1612 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1613 ha->max_q_depth = ql2xmaxqdepth;
1614
1615 /* Assign ISP specific operations. */ 1754 /* Assign ISP specific operations. */
1755 max_id = MAX_TARGETS_2200;
1616 if (IS_QLA2100(ha)) { 1756 if (IS_QLA2100(ha)) {
1617 host->max_id = MAX_TARGETS_2100; 1757 max_id = MAX_TARGETS_2100;
1618 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 1758 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
1619 ha->request_q_length = REQUEST_ENTRY_CNT_2100; 1759 req_length = REQUEST_ENTRY_CNT_2100;
1620 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1760 rsp_length = RESPONSE_ENTRY_CNT_2100;
1621 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1761 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1622 host->sg_tablesize = 32;
1623 ha->gid_list_info_size = 4; 1762 ha->gid_list_info_size = 4;
1624 ha->isp_ops = &qla2100_isp_ops; 1763 ha->isp_ops = &qla2100_isp_ops;
1625 } else if (IS_QLA2200(ha)) { 1764 } else if (IS_QLA2200(ha)) {
1626 host->max_id = MAX_TARGETS_2200;
1627 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1765 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1628 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1766 req_length = REQUEST_ENTRY_CNT_2200;
1629 ha->response_q_length = RESPONSE_ENTRY_CNT_2100; 1767 rsp_length = RESPONSE_ENTRY_CNT_2100;
1630 ha->last_loop_id = SNS_LAST_LOOP_ID_2100; 1768 ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
1631 ha->gid_list_info_size = 4; 1769 ha->gid_list_info_size = 4;
1632 ha->isp_ops = &qla2100_isp_ops; 1770 ha->isp_ops = &qla2100_isp_ops;
1633 } else if (IS_QLA23XX(ha)) { 1771 } else if (IS_QLA23XX(ha)) {
1634 host->max_id = MAX_TARGETS_2200;
1635 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1772 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1636 ha->request_q_length = REQUEST_ENTRY_CNT_2200; 1773 req_length = REQUEST_ENTRY_CNT_2200;
1637 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1774 rsp_length = RESPONSE_ENTRY_CNT_2300;
1638 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1775 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1639 ha->gid_list_info_size = 6; 1776 ha->gid_list_info_size = 6;
1640 if (IS_QLA2322(ha) || IS_QLA6322(ha)) 1777 if (IS_QLA2322(ha) || IS_QLA6322(ha))
1641 ha->optrom_size = OPTROM_SIZE_2322; 1778 ha->optrom_size = OPTROM_SIZE_2322;
1642 ha->isp_ops = &qla2300_isp_ops; 1779 ha->isp_ops = &qla2300_isp_ops;
1643 } else if (IS_QLA24XX_TYPE(ha)) { 1780 } else if (IS_QLA24XX_TYPE(ha)) {
1644 host->max_id = MAX_TARGETS_2200;
1645 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1781 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1646 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1782 req_length = REQUEST_ENTRY_CNT_24XX;
1647 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1783 rsp_length = RESPONSE_ENTRY_CNT_2300;
1648 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1784 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1649 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1785 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1650 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1651 ha->gid_list_info_size = 8; 1786 ha->gid_list_info_size = 8;
1652 ha->optrom_size = OPTROM_SIZE_24XX; 1787 ha->optrom_size = OPTROM_SIZE_24XX;
1788 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
1653 ha->isp_ops = &qla24xx_isp_ops; 1789 ha->isp_ops = &qla24xx_isp_ops;
1654 } else if (IS_QLA25XX(ha)) { 1790 } else if (IS_QLA25XX(ha)) {
1655 host->max_id = MAX_TARGETS_2200;
1656 ha->mbx_count = MAILBOX_REGISTER_COUNT; 1791 ha->mbx_count = MAILBOX_REGISTER_COUNT;
1657 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1792 req_length = REQUEST_ENTRY_CNT_24XX;
1658 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1793 rsp_length = RESPONSE_ENTRY_CNT_2300;
1659 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1794 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
1660 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 1795 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1661 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1662 ha->gid_list_info_size = 8; 1796 ha->gid_list_info_size = 8;
1663 ha->optrom_size = OPTROM_SIZE_25XX; 1797 ha->optrom_size = OPTROM_SIZE_25XX;
1798 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
1664 ha->isp_ops = &qla25xx_isp_ops; 1799 ha->isp_ops = &qla25xx_isp_ops;
1665 } 1800 }
1666 host->can_queue = ha->request_q_length + 128;
1667 1801
1668 mutex_init(&ha->vport_lock); 1802 mutex_init(&ha->vport_lock);
1669 init_completion(&ha->mbx_cmd_comp); 1803 init_completion(&ha->mbx_cmd_comp);
1670 complete(&ha->mbx_cmd_comp); 1804 complete(&ha->mbx_cmd_comp);
1671 init_completion(&ha->mbx_intr_comp); 1805 init_completion(&ha->mbx_intr_comp);
1672 1806
1673 INIT_LIST_HEAD(&ha->list);
1674 INIT_LIST_HEAD(&ha->fcports);
1675 INIT_LIST_HEAD(&ha->vp_list);
1676 INIT_LIST_HEAD(&ha->work_list);
1677
1678 set_bit(0, (unsigned long *) ha->vp_idx_map); 1807 set_bit(0, (unsigned long *) ha->vp_idx_map);
1679 1808
1680 qla2x00_config_dma_addressing(ha); 1809 ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
1681 if (qla2x00_mem_alloc(ha)) { 1810 if (!ret) {
1682 qla_printk(KERN_WARNING, ha, 1811 qla_printk(KERN_WARNING, ha,
1683 "[ERROR] Failed to allocate memory for adapter\n"); 1812 "[ERROR] Failed to allocate memory for adapter\n");
1684 1813
1814 goto probe_hw_failed;
1815 }
1816
1817 req->max_q_depth = MAX_Q_DEPTH;
1818 if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
1819 req->max_q_depth = ql2xmaxqdepth;
1820
1821
1822 base_vha = qla2x00_create_host(sht, ha);
1823 if (!base_vha) {
1824 qla_printk(KERN_WARNING, ha,
1825 "[ERROR] Failed to allocate memory for scsi_host\n");
1826
1685 ret = -ENOMEM; 1827 ret = -ENOMEM;
1828 goto probe_hw_failed;
1829 }
1830
1831 pci_set_drvdata(pdev, base_vha);
1832
1833 qla2x00_config_dma_addressing(base_vha);
1834
1835 host = base_vha->host;
1836 base_vha->req_ques[0] = req->id;
1837 host->can_queue = req->length + 128;
1838 if (IS_QLA2XXX_MIDTYPE(ha))
1839 base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
1840 else
1841 base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
1842 base_vha->vp_idx;
1843 if (IS_QLA2100(ha))
1844 host->sg_tablesize = 32;
1845 host->max_id = max_id;
1846 host->this_id = 255;
1847 host->cmd_per_lun = 3;
1848 host->unique_id = host->host_no;
1849 host->max_cmd_len = MAX_CMDSZ;
1850 host->max_channel = MAX_BUSES - 1;
1851 host->max_lun = MAX_LUNS;
1852 host->transportt = qla2xxx_transport_template;
1853
1854 /* Set up the irqs */
1855 ret = qla2x00_request_irqs(ha, rsp);
1856 if (ret)
1857 goto probe_failed;
1858
1859 /* Alloc arrays of request and response ring ptrs */
1860 if (!qla2x00_alloc_queues(ha)) {
1861 qla_printk(KERN_WARNING, ha,
1862 "[ERROR] Failed to allocate memory for queue"
1863 " pointers\n");
1686 goto probe_failed; 1864 goto probe_failed;
1687 } 1865 }
1866 ha->rsp_q_map[0] = rsp;
1867 ha->req_q_map[0] = req;
1688 1868
1689 if (qla2x00_initialize_adapter(ha)) { 1869 if (ha->mqenable) {
1870 ha->isp_ops->wrt_req_reg = qla25xx_wrt_req_reg;
1871 ha->isp_ops->wrt_rsp_reg = qla25xx_wrt_rsp_reg;
1872 ha->isp_ops->rd_req_reg = qla25xx_rd_req_reg;
1873 }
1874
1875 if (qla2x00_initialize_adapter(base_vha)) {
1690 qla_printk(KERN_WARNING, ha, 1876 qla_printk(KERN_WARNING, ha,
1691 "Failed to initialize adapter\n"); 1877 "Failed to initialize adapter\n");
1692 1878
1693 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - " 1879 DEBUG2(printk("scsi(%ld): Failed to initialize adapter - "
1694 "Adapter flags %x.\n", 1880 "Adapter flags %x.\n",
1695 ha->host_no, ha->device_flags)); 1881 base_vha->host_no, base_vha->device_flags));
1696 1882
1697 ret = -ENODEV; 1883 ret = -ENODEV;
1698 goto probe_failed; 1884 goto probe_failed;
@@ -1702,7 +1888,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1702 * Startup the kernel thread for this host adapter 1888 * Startup the kernel thread for this host adapter
1703 */ 1889 */
1704 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha, 1890 ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
1705 "%s_dpc", ha->host_str); 1891 "%s_dpc", base_vha->host_str);
1706 if (IS_ERR(ha->dpc_thread)) { 1892 if (IS_ERR(ha->dpc_thread)) {
1707 qla_printk(KERN_WARNING, ha, 1893 qla_printk(KERN_WARNING, ha,
1708 "Unable to start DPC thread!\n"); 1894 "Unable to start DPC thread!\n");
@@ -1710,28 +1896,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1710 goto probe_failed; 1896 goto probe_failed;
1711 } 1897 }
1712 1898
1713 host->this_id = 255; 1899 list_add_tail(&base_vha->list, &ha->vp_list);
1714 host->cmd_per_lun = 3; 1900 base_vha->host->irq = ha->pdev->irq;
1715 host->unique_id = host->host_no;
1716 host->max_cmd_len = MAX_CMDSZ;
1717 host->max_channel = MAX_BUSES - 1;
1718 host->max_lun = MAX_LUNS;
1719 host->transportt = qla2xxx_transport_template;
1720
1721 ret = qla2x00_request_irqs(ha);
1722 if (ret)
1723 goto probe_failed;
1724 1901
1725 /* Initialized the timer */ 1902 /* Initialized the timer */
1726 qla2x00_start_timer(ha, qla2x00_timer, WATCH_INTERVAL); 1903 qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL);
1727 1904
1728 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n", 1905 DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
1729 ha->host_no, ha)); 1906 base_vha->host_no, ha));
1730 1907
1731 pci_set_drvdata(pdev, ha); 1908 base_vha->flags.init_done = 1;
1732 1909 base_vha->flags.online = 1;
1733 ha->flags.init_done = 1;
1734 ha->flags.online = 1;
1735 1910
1736 ret = scsi_add_host(host, &pdev->dev); 1911 ret = scsi_add_host(host, &pdev->dev);
1737 if (ret) 1912 if (ret)
@@ -1741,76 +1916,98 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1741 1916
1742 scsi_scan_host(host); 1917 scsi_scan_host(host);
1743 1918
1744 qla2x00_alloc_sysfs_attr(ha); 1919 qla2x00_alloc_sysfs_attr(base_vha);
1745 1920
1746 qla2x00_init_host_attr(ha); 1921 qla2x00_init_host_attr(base_vha);
1747 1922
1748 qla2x00_dfs_setup(ha); 1923 qla2x00_dfs_setup(base_vha);
1749 1924
1750 qla_printk(KERN_INFO, ha, "\n" 1925 qla_printk(KERN_INFO, ha, "\n"
1751 " QLogic Fibre Channel HBA Driver: %s\n" 1926 " QLogic Fibre Channel HBA Driver: %s\n"
1752 " QLogic %s - %s\n" 1927 " QLogic %s - %s\n"
1753 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n", 1928 " ISP%04X: %s @ %s hdma%c, host#=%ld, fw=%s\n",
1754 qla2x00_version_str, ha->model_number, 1929 qla2x00_version_str, ha->model_number,
1755 ha->model_desc ? ha->model_desc: "", pdev->device, 1930 ha->model_desc ? ha->model_desc : "", pdev->device,
1756 ha->isp_ops->pci_info_str(ha, pci_info), pci_name(pdev), 1931 ha->isp_ops->pci_info_str(base_vha, pci_info), pci_name(pdev),
1757 ha->flags.enable_64bit_addressing ? '+': '-', ha->host_no, 1932 ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no,
1758 ha->isp_ops->fw_version_str(ha, fw_str)); 1933 ha->isp_ops->fw_version_str(base_vha, fw_str));
1759 1934
1760 return 0; 1935 return 0;
1761 1936
1762probe_failed: 1937probe_failed:
1763 qla2x00_free_device(ha); 1938 qla2x00_free_que(ha, req, rsp);
1939 qla2x00_free_device(base_vha);
1764 1940
1765 scsi_host_put(host); 1941 scsi_host_put(base_vha->host);
1766 1942
1767probe_disable_device: 1943probe_hw_failed:
1768 pci_disable_device(pdev); 1944 if (ha->iobase)
1945 iounmap(ha->iobase);
1946
1947 pci_release_selected_regions(ha->pdev, ha->bars);
1948 kfree(ha);
1949 ha = NULL;
1769 1950
1770probe_out: 1951probe_out:
1952 pci_disable_device(pdev);
1771 return ret; 1953 return ret;
1772} 1954}
1773 1955
1774static void 1956static void
1775qla2x00_remove_one(struct pci_dev *pdev) 1957qla2x00_remove_one(struct pci_dev *pdev)
1776{ 1958{
1777 scsi_qla_host_t *ha, *vha, *temp; 1959 scsi_qla_host_t *base_vha, *vha, *temp;
1960 struct qla_hw_data *ha;
1961
1962 base_vha = pci_get_drvdata(pdev);
1963 ha = base_vha->hw;
1964
1965 list_for_each_entry_safe(vha, temp, &ha->vp_list, list) {
1966 if (vha && vha->fc_vport)
1967 fc_vport_terminate(vha->fc_vport);
1968 }
1778 1969
1779 ha = pci_get_drvdata(pdev); 1970 set_bit(UNLOADING, &base_vha->dpc_flags);
1780 1971
1781 list_for_each_entry_safe(vha, temp, &ha->vp_list, vp_list) 1972 qla2x00_dfs_remove(base_vha);
1782 fc_vport_terminate(vha->fc_vport);
1783 1973
1784 set_bit(UNLOADING, &ha->dpc_flags); 1974 qla84xx_put_chip(base_vha);
1785 1975
1786 qla2x00_dfs_remove(ha); 1976 qla2x00_free_sysfs_attr(base_vha);
1787 1977
1788 qla84xx_put_chip(ha); 1978 fc_remove_host(base_vha->host);
1789 1979
1790 qla2x00_free_sysfs_attr(ha); 1980 scsi_remove_host(base_vha->host);
1791 1981
1792 fc_remove_host(ha->host); 1982 qla2x00_free_device(base_vha);
1793 1983
1794 scsi_remove_host(ha->host); 1984 scsi_host_put(base_vha->host);
1795 1985
1796 qla2x00_free_device(ha); 1986 if (ha->iobase)
1987 iounmap(ha->iobase);
1797 1988
1798 scsi_host_put(ha->host); 1989 if (ha->mqiobase)
1990 iounmap(ha->mqiobase);
1991
1992 pci_release_selected_regions(ha->pdev, ha->bars);
1993 kfree(ha);
1994 ha = NULL;
1799 1995
1800 pci_disable_device(pdev); 1996 pci_disable_device(pdev);
1801 pci_set_drvdata(pdev, NULL); 1997 pci_set_drvdata(pdev, NULL);
1802} 1998}
1803 1999
1804static void 2000static void
1805qla2x00_free_device(scsi_qla_host_t *ha) 2001qla2x00_free_device(scsi_qla_host_t *vha)
1806{ 2002{
1807 qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16); 2003 struct qla_hw_data *ha = vha->hw;
2004 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1808 2005
1809 /* Disable timer */ 2006 /* Disable timer */
1810 if (ha->timer_active) 2007 if (vha->timer_active)
1811 qla2x00_stop_timer(ha); 2008 qla2x00_stop_timer(vha);
1812 2009
1813 ha->flags.online = 0; 2010 vha->flags.online = 0;
1814 2011
1815 /* Kill the kernel thread for this host */ 2012 /* Kill the kernel thread for this host */
1816 if (ha->dpc_thread) { 2013 if (ha->dpc_thread) {
@@ -1825,45 +2022,41 @@ qla2x00_free_device(scsi_qla_host_t *ha)
1825 } 2022 }
1826 2023
1827 if (ha->flags.fce_enabled) 2024 if (ha->flags.fce_enabled)
1828 qla2x00_disable_fce_trace(ha, NULL, NULL); 2025 qla2x00_disable_fce_trace(vha, NULL, NULL);
1829 2026
1830 if (ha->eft) 2027 if (ha->eft)
1831 qla2x00_disable_eft_trace(ha); 2028 qla2x00_disable_eft_trace(vha);
1832 2029
1833 /* Stop currently executing firmware. */ 2030 /* Stop currently executing firmware. */
1834 qla2x00_try_to_stop_firmware(ha); 2031 qla2x00_try_to_stop_firmware(vha);
1835 2032
1836 /* turn-off interrupts on the card */ 2033 /* turn-off interrupts on the card */
1837 if (ha->interrupts_on) 2034 if (ha->interrupts_on)
1838 ha->isp_ops->disable_intrs(ha); 2035 ha->isp_ops->disable_intrs(ha);
1839 2036
1840 qla2x00_mem_free(ha); 2037 qla2x00_free_irqs(vha);
1841 2038
1842 qla2x00_free_irqs(ha); 2039 qla2x00_mem_free(ha);
1843 2040
1844 /* release io space registers */ 2041 qla2x00_free_queues(ha);
1845 if (ha->iobase)
1846 iounmap(ha->iobase);
1847 pci_release_selected_regions(ha->pdev, ha->bars);
1848} 2042}
1849 2043
1850static inline void 2044static inline void
1851qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 2045qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
1852 int defer) 2046 int defer)
1853{ 2047{
1854 struct fc_rport *rport; 2048 struct fc_rport *rport;
1855 scsi_qla_host_t *pha = to_qla_parent(ha);
1856 2049
1857 if (!fcport->rport) 2050 if (!fcport->rport)
1858 return; 2051 return;
1859 2052
1860 rport = fcport->rport; 2053 rport = fcport->rport;
1861 if (defer) { 2054 if (defer) {
1862 spin_lock_irq(ha->host->host_lock); 2055 spin_lock_irq(vha->host->host_lock);
1863 fcport->drport = rport; 2056 fcport->drport = rport;
1864 spin_unlock_irq(ha->host->host_lock); 2057 spin_unlock_irq(vha->host->host_lock);
1865 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags); 2058 set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
1866 qla2xxx_wake_dpc(pha); 2059 qla2xxx_wake_dpc(vha);
1867 } else 2060 } else
1868 fc_remote_port_delete(rport); 2061 fc_remote_port_delete(rport);
1869} 2062}
@@ -1877,13 +2070,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1877 * 2070 *
1878 * Context: 2071 * Context:
1879 */ 2072 */
1880void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 2073void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
1881 int do_login, int defer) 2074 int do_login, int defer)
1882{ 2075{
1883 if (atomic_read(&fcport->state) == FCS_ONLINE && 2076 if (atomic_read(&fcport->state) == FCS_ONLINE &&
1884 ha->vp_idx == fcport->vp_idx) 2077 vha->vp_idx == fcport->vp_idx) {
1885 qla2x00_schedule_rport_del(ha, fcport, defer); 2078 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1886 2079 qla2x00_schedule_rport_del(vha, fcport, defer);
2080 }
1887 /* 2081 /*
1888 * We may need to retry the login, so don't change the state of the 2082 * We may need to retry the login, so don't change the state of the
1889 * port but do the retries. 2083 * port but do the retries.
@@ -1895,13 +2089,13 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1895 return; 2089 return;
1896 2090
1897 if (fcport->login_retry == 0) { 2091 if (fcport->login_retry == 0) {
1898 fcport->login_retry = ha->login_retry_count; 2092 fcport->login_retry = vha->hw->login_retry_count;
1899 set_bit(RELOGIN_NEEDED, &ha->dpc_flags); 2093 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1900 2094
1901 DEBUG(printk("scsi(%ld): Port login retry: " 2095 DEBUG(printk("scsi(%ld): Port login retry: "
1902 "%02x%02x%02x%02x%02x%02x%02x%02x, " 2096 "%02x%02x%02x%02x%02x%02x%02x%02x, "
1903 "id = 0x%04x retry cnt=%d\n", 2097 "id = 0x%04x retry cnt=%d\n",
1904 ha->host_no, 2098 vha->host_no,
1905 fcport->port_name[0], 2099 fcport->port_name[0],
1906 fcport->port_name[1], 2100 fcport->port_name[1],
1907 fcport->port_name[2], 2101 fcport->port_name[2],
@@ -1929,13 +2123,12 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1929 * Context: 2123 * Context:
1930 */ 2124 */
1931void 2125void
1932qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 2126qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
1933{ 2127{
1934 fc_port_t *fcport; 2128 fc_port_t *fcport;
1935 scsi_qla_host_t *pha = to_qla_parent(ha);
1936 2129
1937 list_for_each_entry(fcport, &pha->fcports, list) { 2130 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1938 if (ha->vp_idx != fcport->vp_idx) 2131 if (vha->vp_idx != fcport->vp_idx)
1939 continue; 2132 continue;
1940 /* 2133 /*
1941 * No point in marking the device as lost, if the device is 2134 * No point in marking the device as lost, if the device is
@@ -1943,9 +2136,11 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1943 */ 2136 */
1944 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 2137 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1945 continue; 2138 continue;
1946 if (atomic_read(&fcport->state) == FCS_ONLINE) 2139 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1947 qla2x00_schedule_rport_del(ha, fcport, defer); 2140 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1948 atomic_set(&fcport->state, FCS_DEVICE_LOST); 2141 qla2x00_schedule_rport_del(vha, fcport, defer);
2142 } else
2143 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1949 } 2144 }
1950} 2145}
1951 2146
@@ -1958,105 +2153,153 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1958* !0 = failure. 2153* !0 = failure.
1959*/ 2154*/
1960static int 2155static int
1961qla2x00_mem_alloc(scsi_qla_host_t *ha) 2156qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2157 struct req_que **req, struct rsp_que **rsp)
1962{ 2158{
1963 char name[16]; 2159 char name[16];
1964 2160
1965 ha->request_ring = dma_alloc_coherent(&ha->pdev->dev, 2161 ha->init_cb_size = sizeof(init_cb_t);
1966 (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma, 2162 if (IS_QLA2XXX_MIDTYPE(ha))
1967 GFP_KERNEL); 2163 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1968 if (!ha->request_ring)
1969 goto fail;
1970
1971 ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
1972 (ha->response_q_length + 1) * sizeof(response_t),
1973 &ha->response_dma, GFP_KERNEL);
1974 if (!ha->response_ring)
1975 goto fail_free_request_ring;
1976
1977 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1978 &ha->gid_list_dma, GFP_KERNEL);
1979 if (!ha->gid_list)
1980 goto fail_free_response_ring;
1981 2164
1982 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, 2165 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
1983 &ha->init_cb_dma, GFP_KERNEL); 2166 &ha->init_cb_dma, GFP_KERNEL);
1984 if (!ha->init_cb) 2167 if (!ha->init_cb)
1985 goto fail_free_gid_list; 2168 goto fail;
1986 2169
1987 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 2170 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
1988 ha->host_no); 2171 &ha->gid_list_dma, GFP_KERNEL);
1989 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 2172 if (!ha->gid_list)
1990 DMA_POOL_SIZE, 8, 0);
1991 if (!ha->s_dma_pool)
1992 goto fail_free_init_cb; 2173 goto fail_free_init_cb;
1993 2174
1994 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 2175 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
1995 if (!ha->srb_mempool) 2176 if (!ha->srb_mempool)
1996 goto fail_free_s_dma_pool; 2177 goto fail_free_gid_list;
1997 2178
1998 /* Get memory for cached NVRAM */ 2179 /* Get memory for cached NVRAM */
1999 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL); 2180 ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
2000 if (!ha->nvram) 2181 if (!ha->nvram)
2001 goto fail_free_srb_mempool; 2182 goto fail_free_srb_mempool;
2002 2183
2184 snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
2185 ha->pdev->device);
2186 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
2187 DMA_POOL_SIZE, 8, 0);
2188 if (!ha->s_dma_pool)
2189 goto fail_free_nvram;
2190
2003 /* Allocate memory for SNS commands */ 2191 /* Allocate memory for SNS commands */
2004 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2192 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2005 /* Get consistent memory allocated for SNS commands */ 2193 /* Get consistent memory allocated for SNS commands */
2006 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev, 2194 ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
2007 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL); 2195 sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
2008 if (!ha->sns_cmd) 2196 if (!ha->sns_cmd)
2009 goto fail_free_nvram; 2197 goto fail_dma_pool;
2010 } else { 2198 } else {
2011 /* Get consistent memory allocated for MS IOCB */ 2199 /* Get consistent memory allocated for MS IOCB */
2012 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 2200 ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2013 &ha->ms_iocb_dma); 2201 &ha->ms_iocb_dma);
2014 if (!ha->ms_iocb) 2202 if (!ha->ms_iocb)
2015 goto fail_free_nvram; 2203 goto fail_dma_pool;
2016 2204 /* Get consistent memory allocated for CT SNS commands */
2017 /* Get consistent memory allocated for CT SNS commands */
2018 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev, 2205 ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
2019 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL); 2206 sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
2020 if (!ha->ct_sns) 2207 if (!ha->ct_sns)
2021 goto fail_free_ms_iocb; 2208 goto fail_free_ms_iocb;
2022 } 2209 }
2023 2210
2024 return 0; 2211 /* Allocate memory for request ring */
2212 *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
2213 if (!*req) {
2214 DEBUG(printk("Unable to allocate memory for req\n"));
2215 goto fail_req;
2216 }
2217 (*req)->length = req_len;
2218 (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
2219 ((*req)->length + 1) * sizeof(request_t),
2220 &(*req)->dma, GFP_KERNEL);
2221 if (!(*req)->ring) {
2222 DEBUG(printk("Unable to allocate memory for req_ring\n"));
2223 goto fail_req_ring;
2224 }
2225 /* Allocate memory for response ring */
2226 *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
2227 if (!*rsp) {
2228 qla_printk(KERN_WARNING, ha,
2229 "Unable to allocate memory for rsp\n");
2230 goto fail_rsp;
2231 }
2232 (*rsp)->hw = ha;
2233 (*rsp)->length = rsp_len;
2234 (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
2235 ((*rsp)->length + 1) * sizeof(response_t),
2236 &(*rsp)->dma, GFP_KERNEL);
2237 if (!(*rsp)->ring) {
2238 qla_printk(KERN_WARNING, ha,
2239 "Unable to allocate memory for rsp_ring\n");
2240 goto fail_rsp_ring;
2241 }
2242 (*req)->rsp = *rsp;
2243 (*rsp)->req = *req;
2244 /* Allocate memory for NVRAM data for vports */
2245 if (ha->nvram_npiv_size) {
2246 ha->npiv_info = kzalloc(sizeof(struct qla_npiv_entry) *
2247 ha->nvram_npiv_size, GFP_KERNEL);
2248 if (!ha->npiv_info) {
2249 qla_printk(KERN_WARNING, ha,
2250 "Unable to allocate memory for npiv info\n");
2251 goto fail_npiv_info;
2252 }
2253 } else
2254 ha->npiv_info = NULL;
2025 2255
2256 INIT_LIST_HEAD(&ha->vp_list);
2257 return 1;
2258
2259fail_npiv_info:
2260 dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
2261 sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
2262 (*rsp)->ring = NULL;
2263 (*rsp)->dma = 0;
2264fail_rsp_ring:
2265 kfree(*rsp);
2266fail_rsp:
2267 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
2268 sizeof(request_t), (*req)->ring, (*req)->dma);
2269 (*req)->ring = NULL;
2270 (*req)->dma = 0;
2271fail_req_ring:
2272 kfree(*req);
2273fail_req:
2274 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2275 ha->ct_sns, ha->ct_sns_dma);
2276 ha->ct_sns = NULL;
2277 ha->ct_sns_dma = 0;
2026fail_free_ms_iocb: 2278fail_free_ms_iocb:
2027 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2279 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2028 ha->ms_iocb = NULL; 2280 ha->ms_iocb = NULL;
2029 ha->ms_iocb_dma = 0; 2281 ha->ms_iocb_dma = 0;
2282fail_dma_pool:
2283 dma_pool_destroy(ha->s_dma_pool);
2284 ha->s_dma_pool = NULL;
2030fail_free_nvram: 2285fail_free_nvram:
2031 kfree(ha->nvram); 2286 kfree(ha->nvram);
2032 ha->nvram = NULL; 2287 ha->nvram = NULL;
2033fail_free_srb_mempool: 2288fail_free_srb_mempool:
2034 mempool_destroy(ha->srb_mempool); 2289 mempool_destroy(ha->srb_mempool);
2035 ha->srb_mempool = NULL; 2290 ha->srb_mempool = NULL;
2036fail_free_s_dma_pool:
2037 dma_pool_destroy(ha->s_dma_pool);
2038 ha->s_dma_pool = NULL;
2039fail_free_init_cb:
2040 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2041 ha->init_cb_dma);
2042 ha->init_cb = NULL;
2043 ha->init_cb_dma = 0;
2044fail_free_gid_list: 2291fail_free_gid_list:
2045 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2292 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2046 ha->gid_list_dma); 2293 ha->gid_list_dma);
2047 ha->gid_list = NULL; 2294 ha->gid_list = NULL;
2048 ha->gid_list_dma = 0; 2295 ha->gid_list_dma = 0;
2049fail_free_response_ring: 2296fail_free_init_cb:
2050 dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) * 2297 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
2051 sizeof(response_t), ha->response_ring, ha->response_dma); 2298 ha->init_cb_dma);
2052 ha->response_ring = NULL; 2299 ha->init_cb = NULL;
2053 ha->response_dma = 0; 2300 ha->init_cb_dma = 0;
2054fail_free_request_ring:
2055 dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
2056 sizeof(request_t), ha->request_ring, ha->request_dma);
2057 ha->request_ring = NULL;
2058 ha->request_dma = 0;
2059fail: 2301fail:
2302 DEBUG(printk("%s: Memory allocation failure\n", __func__));
2060 return -ENOMEM; 2303 return -ENOMEM;
2061} 2304}
2062 2305
@@ -2068,32 +2311,29 @@ fail:
2068* ha = adapter block pointer. 2311* ha = adapter block pointer.
2069*/ 2312*/
2070static void 2313static void
2071qla2x00_mem_free(scsi_qla_host_t *ha) 2314qla2x00_mem_free(struct qla_hw_data *ha)
2072{ 2315{
2073 struct list_head *fcpl, *fcptemp;
2074 fc_port_t *fcport;
2075
2076 if (ha->srb_mempool) 2316 if (ha->srb_mempool)
2077 mempool_destroy(ha->srb_mempool); 2317 mempool_destroy(ha->srb_mempool);
2078 2318
2079 if (ha->fce) 2319 if (ha->fce)
2080 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, 2320 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
2081 ha->fce_dma); 2321 ha->fce_dma);
2082 2322
2083 if (ha->fw_dump) { 2323 if (ha->fw_dump) {
2084 if (ha->eft) 2324 if (ha->eft)
2085 dma_free_coherent(&ha->pdev->dev, 2325 dma_free_coherent(&ha->pdev->dev,
2086 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); 2326 ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
2087 vfree(ha->fw_dump); 2327 vfree(ha->fw_dump);
2088 } 2328 }
2089 2329
2090 if (ha->sns_cmd) 2330 if (ha->sns_cmd)
2091 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), 2331 dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
2092 ha->sns_cmd, ha->sns_cmd_dma); 2332 ha->sns_cmd, ha->sns_cmd_dma);
2093 2333
2094 if (ha->ct_sns) 2334 if (ha->ct_sns)
2095 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt), 2335 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
2096 ha->ct_sns, ha->ct_sns_dma); 2336 ha->ct_sns, ha->ct_sns_dma);
2097 2337
2098 if (ha->sfp_data) 2338 if (ha->sfp_data)
2099 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 2339 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
@@ -2104,23 +2344,18 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2104 if (ha->s_dma_pool) 2344 if (ha->s_dma_pool)
2105 dma_pool_destroy(ha->s_dma_pool); 2345 dma_pool_destroy(ha->s_dma_pool);
2106 2346
2107 if (ha->init_cb)
2108 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2109 ha->init_cb, ha->init_cb_dma);
2110 2347
2111 if (ha->gid_list) 2348 if (ha->gid_list)
2112 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2349 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2113 ha->gid_list_dma); 2350 ha->gid_list_dma);
2114 2351
2115 if (ha->response_ring)
2116 dma_free_coherent(&ha->pdev->dev,
2117 (ha->response_q_length + 1) * sizeof(response_t),
2118 ha->response_ring, ha->response_dma);
2119 2352
2120 if (ha->request_ring) 2353 if (ha->init_cb)
2121 dma_free_coherent(&ha->pdev->dev, 2354 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2122 (ha->request_q_length + 1) * sizeof(request_t), 2355 ha->init_cb, ha->init_cb_dma);
2123 ha->request_ring, ha->request_dma); 2356 vfree(ha->optrom_buffer);
2357 kfree(ha->nvram);
2358 kfree(ha->npiv_info);
2124 2359
2125 ha->srb_mempool = NULL; 2360 ha->srb_mempool = NULL;
2126 ha->eft = NULL; 2361 ha->eft = NULL;
@@ -2139,30 +2374,45 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2139 ha->gid_list = NULL; 2374 ha->gid_list = NULL;
2140 ha->gid_list_dma = 0; 2375 ha->gid_list_dma = 0;
2141 2376
2142 ha->response_ring = NULL; 2377 ha->fw_dump = NULL;
2143 ha->response_dma = 0; 2378 ha->fw_dumped = 0;
2144 ha->request_ring = NULL; 2379 ha->fw_dump_reading = 0;
2145 ha->request_dma = 0; 2380}
2146 2381
2147 list_for_each_safe(fcpl, fcptemp, &ha->fcports) { 2382struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
2148 fcport = list_entry(fcpl, fc_port_t, list); 2383 struct qla_hw_data *ha)
2384{
2385 struct Scsi_Host *host;
2386 struct scsi_qla_host *vha = NULL;
2149 2387
2150 /* fc ports */ 2388 host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
2151 list_del_init(&fcport->list); 2389 if (host == NULL) {
2152 kfree(fcport); 2390 printk(KERN_WARNING
2391 "qla2xxx: Couldn't allocate host from scsi layer!\n");
2392 goto fail;
2153 } 2393 }
2154 INIT_LIST_HEAD(&ha->fcports);
2155 2394
2156 ha->fw_dump = NULL; 2395 /* Clear our data area */
2157 ha->fw_dumped = 0; 2396 vha = shost_priv(host);
2158 ha->fw_dump_reading = 0; 2397 memset(vha, 0, sizeof(scsi_qla_host_t));
2159 2398
2160 vfree(ha->optrom_buffer); 2399 vha->host = host;
2161 kfree(ha->nvram); 2400 vha->host_no = host->host_no;
2401 vha->hw = ha;
2402
2403 INIT_LIST_HEAD(&vha->vp_fcports);
2404 INIT_LIST_HEAD(&vha->work_list);
2405 INIT_LIST_HEAD(&vha->list);
2406
2407 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
2408 return vha;
2409
2410fail:
2411 return vha;
2162} 2412}
2163 2413
2164static struct qla_work_evt * 2414static struct qla_work_evt *
2165qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type, 2415qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
2166 int locked) 2416 int locked)
2167{ 2417{
2168 struct qla_work_evt *e; 2418 struct qla_work_evt *e;
@@ -2179,42 +2429,42 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2179} 2429}
2180 2430
2181static int 2431static int
2182qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2432qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
2183{ 2433{
2184 unsigned long uninitialized_var(flags); 2434 unsigned long uninitialized_var(flags);
2185 scsi_qla_host_t *pha = to_qla_parent(ha); 2435 struct qla_hw_data *ha = vha->hw;
2186 2436
2187 if (!locked) 2437 if (!locked)
2188 spin_lock_irqsave(&pha->hardware_lock, flags); 2438 spin_lock_irqsave(&ha->hardware_lock, flags);
2189 list_add_tail(&e->list, &ha->work_list); 2439 list_add_tail(&e->list, &vha->work_list);
2190 qla2xxx_wake_dpc(ha); 2440 qla2xxx_wake_dpc(vha);
2191 if (!locked) 2441 if (!locked)
2192 spin_unlock_irqrestore(&pha->hardware_lock, flags); 2442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2193 return QLA_SUCCESS; 2443 return QLA_SUCCESS;
2194} 2444}
2195 2445
2196int 2446int
2197qla2x00_post_aen_work(struct scsi_qla_host *ha, enum fc_host_event_code code, 2447qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
2198 u32 data) 2448 u32 data)
2199{ 2449{
2200 struct qla_work_evt *e; 2450 struct qla_work_evt *e;
2201 2451
2202 e = qla2x00_alloc_work(ha, QLA_EVT_AEN, 1); 2452 e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
2203 if (!e) 2453 if (!e)
2204 return QLA_FUNCTION_FAILED; 2454 return QLA_FUNCTION_FAILED;
2205 2455
2206 e->u.aen.code = code; 2456 e->u.aen.code = code;
2207 e->u.aen.data = data; 2457 e->u.aen.data = data;
2208 return qla2x00_post_work(ha, e, 1); 2458 return qla2x00_post_work(vha, e, 1);
2209} 2459}
2210 2460
2211int 2461int
2212qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1, 2462qla2x00_post_hwe_work(struct scsi_qla_host *vha, uint16_t code, uint16_t d1,
2213 uint16_t d2, uint16_t d3) 2463 uint16_t d2, uint16_t d3)
2214{ 2464{
2215 struct qla_work_evt *e; 2465 struct qla_work_evt *e;
2216 2466
2217 e = qla2x00_alloc_work(ha, QLA_EVT_HWE_LOG, 1); 2467 e = qla2x00_alloc_work(vha, QLA_EVT_HWE_LOG, 1);
2218 if (!e) 2468 if (!e)
2219 return QLA_FUNCTION_FAILED; 2469 return QLA_FUNCTION_FAILED;
2220 2470
@@ -2222,36 +2472,95 @@ qla2x00_post_hwe_work(struct scsi_qla_host *ha, uint16_t code, uint16_t d1,
2222 e->u.hwe.d1 = d1; 2472 e->u.hwe.d1 = d1;
2223 e->u.hwe.d2 = d2; 2473 e->u.hwe.d2 = d2;
2224 e->u.hwe.d3 = d3; 2474 e->u.hwe.d3 = d3;
2225 return qla2x00_post_work(ha, e, 1); 2475 return qla2x00_post_work(vha, e, 1);
2226} 2476}
2227 2477
2228static void 2478static void
2229qla2x00_do_work(struct scsi_qla_host *ha) 2479qla2x00_do_work(struct scsi_qla_host *vha)
2230{ 2480{
2231 struct qla_work_evt *e; 2481 struct qla_work_evt *e;
2232 scsi_qla_host_t *pha = to_qla_parent(ha); 2482 struct qla_hw_data *ha = vha->hw;
2233 2483
2234 spin_lock_irq(&pha->hardware_lock); 2484 spin_lock_irq(&ha->hardware_lock);
2235 while (!list_empty(&ha->work_list)) { 2485 while (!list_empty(&vha->work_list)) {
2236 e = list_entry(ha->work_list.next, struct qla_work_evt, list); 2486 e = list_entry(vha->work_list.next, struct qla_work_evt, list);
2237 list_del_init(&e->list); 2487 list_del_init(&e->list);
2238 spin_unlock_irq(&pha->hardware_lock); 2488 spin_unlock_irq(&ha->hardware_lock);
2239 2489
2240 switch (e->type) { 2490 switch (e->type) {
2241 case QLA_EVT_AEN: 2491 case QLA_EVT_AEN:
2242 fc_host_post_event(ha->host, fc_get_event_number(), 2492 fc_host_post_event(vha->host, fc_get_event_number(),
2243 e->u.aen.code, e->u.aen.data); 2493 e->u.aen.code, e->u.aen.data);
2244 break; 2494 break;
2245 case QLA_EVT_HWE_LOG: 2495 case QLA_EVT_HWE_LOG:
2246 qla2xxx_hw_event_log(ha, e->u.hwe.code, e->u.hwe.d1, 2496 qla2xxx_hw_event_log(vha, e->u.hwe.code, e->u.hwe.d1,
2247 e->u.hwe.d2, e->u.hwe.d3); 2497 e->u.hwe.d2, e->u.hwe.d3);
2248 break; 2498 break;
2249 } 2499 }
2250 if (e->flags & QLA_EVT_FLAG_FREE) 2500 if (e->flags & QLA_EVT_FLAG_FREE)
2251 kfree(e); 2501 kfree(e);
2252 spin_lock_irq(&pha->hardware_lock); 2502 spin_lock_irq(&ha->hardware_lock);
2503 }
2504 spin_unlock_irq(&ha->hardware_lock);
2505}
2506/* Relogins all the fcports of a vport
2507 * Context: dpc thread
2508 */
2509void qla2x00_relogin(struct scsi_qla_host *vha)
2510{
2511 fc_port_t *fcport;
2512 uint8_t status;
2513 uint16_t next_loopid = 0;
2514 struct qla_hw_data *ha = vha->hw;
2515
2516 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2517 /*
2518 * If the port is not ONLINE then try to login
2519 * to it if we haven't run out of retries.
2520 */
2521 if (atomic_read(&fcport->state) !=
2522 FCS_ONLINE && fcport->login_retry) {
2523
2524 if (fcport->flags & FCF_FABRIC_DEVICE) {
2525 if (fcport->flags & FCF_TAPE_PRESENT)
2526 ha->isp_ops->fabric_logout(vha,
2527 fcport->loop_id,
2528 fcport->d_id.b.domain,
2529 fcport->d_id.b.area,
2530 fcport->d_id.b.al_pa);
2531
2532 status = qla2x00_fabric_login(vha, fcport,
2533 &next_loopid);
2534 } else
2535 status = qla2x00_local_device_login(vha,
2536 fcport);
2537
2538 fcport->login_retry--;
2539 if (status == QLA_SUCCESS) {
2540 fcport->old_loop_id = fcport->loop_id;
2541
2542 DEBUG(printk("scsi(%ld): port login OK: logged "
2543 "in ID 0x%x\n", vha->host_no, fcport->loop_id));
2544
2545 qla2x00_update_fcport(vha, fcport);
2546
2547 } else if (status == 1) {
2548 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2549 /* retry the login again */
2550 DEBUG(printk("scsi(%ld): Retrying"
2551 " %d login again loop_id 0x%x\n",
2552 vha->host_no, fcport->login_retry,
2553 fcport->loop_id));
2554 } else {
2555 fcport->login_retry = 0;
2556 }
2557
2558 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2559 fcport->loop_id = FC_NO_LOOP_ID;
2560 }
2561 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
2562 break;
2253 } 2563 }
2254 spin_unlock_irq(&pha->hardware_lock);
2255} 2564}
2256 2565
2257/************************************************************************** 2566/**************************************************************************
@@ -2271,15 +2580,11 @@ static int
2271qla2x00_do_dpc(void *data) 2580qla2x00_do_dpc(void *data)
2272{ 2581{
2273 int rval; 2582 int rval;
2274 scsi_qla_host_t *ha; 2583 scsi_qla_host_t *base_vha;
2275 fc_port_t *fcport; 2584 struct qla_hw_data *ha;
2276 uint8_t status;
2277 uint16_t next_loopid;
2278 struct scsi_qla_host *vha;
2279 int i;
2280 2585
2281 2586 ha = (struct qla_hw_data *)data;
2282 ha = (scsi_qla_host_t *)data; 2587 base_vha = pci_get_drvdata(ha->pdev);
2283 2588
2284 set_user_nice(current, -20); 2589 set_user_nice(current, -20);
2285 2590
@@ -2293,10 +2598,10 @@ qla2x00_do_dpc(void *data)
2293 DEBUG3(printk("qla2x00: DPC handler waking up\n")); 2598 DEBUG3(printk("qla2x00: DPC handler waking up\n"));
2294 2599
2295 /* Initialization not yet finished. Don't do anything yet. */ 2600 /* Initialization not yet finished. Don't do anything yet. */
2296 if (!ha->flags.init_done) 2601 if (!base_vha->flags.init_done)
2297 continue; 2602 continue;
2298 2603
2299 DEBUG3(printk("scsi(%ld): DPC handler\n", ha->host_no)); 2604 DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
2300 2605
2301 ha->dpc_active = 1; 2606 ha->dpc_active = 1;
2302 2607
@@ -2305,149 +2610,98 @@ qla2x00_do_dpc(void *data)
2305 continue; 2610 continue;
2306 } 2611 }
2307 2612
2308 qla2x00_do_work(ha); 2613 qla2x00_do_work(base_vha);
2309 2614
2310 if (test_and_clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) { 2615 if (test_and_clear_bit(ISP_ABORT_NEEDED,
2616 &base_vha->dpc_flags)) {
2311 2617
2312 DEBUG(printk("scsi(%ld): dpc: sched " 2618 DEBUG(printk("scsi(%ld): dpc: sched "
2313 "qla2x00_abort_isp ha = %p\n", 2619 "qla2x00_abort_isp ha = %p\n",
2314 ha->host_no, ha)); 2620 base_vha->host_no, ha));
2315 if (!(test_and_set_bit(ABORT_ISP_ACTIVE, 2621 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
2316 &ha->dpc_flags))) { 2622 &base_vha->dpc_flags))) {
2317 2623
2318 if (qla2x00_abort_isp(ha)) { 2624 if (qla2x00_abort_isp(base_vha)) {
2319 /* failed. retry later */ 2625 /* failed. retry later */
2320 set_bit(ISP_ABORT_NEEDED, 2626 set_bit(ISP_ABORT_NEEDED,
2321 &ha->dpc_flags); 2627 &base_vha->dpc_flags);
2322 }
2323 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
2324 }
2325
2326 for_each_mapped_vp_idx(ha, i) {
2327 list_for_each_entry(vha, &ha->vp_list,
2328 vp_list) {
2329 if (i == vha->vp_idx) {
2330 set_bit(ISP_ABORT_NEEDED,
2331 &vha->dpc_flags);
2332 break;
2333 }
2334 } 2628 }
2629 clear_bit(ABORT_ISP_ACTIVE,
2630 &base_vha->dpc_flags);
2335 } 2631 }
2336 2632
2337 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n", 2633 DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
2338 ha->host_no)); 2634 base_vha->host_no));
2339 } 2635 }
2340 2636
2341 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) { 2637 if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
2342 qla2x00_update_fcports(ha); 2638 qla2x00_update_fcports(base_vha);
2343 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 2639 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2344 } 2640 }
2345 2641
2346 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2642 if (test_and_clear_bit(RESET_MARKER_NEEDED,
2347 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2643 &base_vha->dpc_flags) &&
2644 (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
2348 2645
2349 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n", 2646 DEBUG(printk("scsi(%ld): qla2x00_reset_marker()\n",
2350 ha->host_no)); 2647 base_vha->host_no));
2351 2648
2352 qla2x00_rst_aen(ha); 2649 qla2x00_rst_aen(base_vha);
2353 clear_bit(RESET_ACTIVE, &ha->dpc_flags); 2650 clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
2354 } 2651 }
2355 2652
2356 /* Retry each device up to login retry count */ 2653 /* Retry each device up to login retry count */
2357 if ((test_and_clear_bit(RELOGIN_NEEDED, &ha->dpc_flags)) && 2654 if ((test_and_clear_bit(RELOGIN_NEEDED,
2358 !test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) && 2655 &base_vha->dpc_flags)) &&
2359 atomic_read(&ha->loop_state) != LOOP_DOWN) { 2656 !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
2657 atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
2360 2658
2361 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n", 2659 DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
2362 ha->host_no)); 2660 base_vha->host_no));
2363 2661 qla2x00_relogin(base_vha);
2364 next_loopid = 0; 2662
2365 list_for_each_entry(fcport, &ha->fcports, list) {
2366 /*
2367 * If the port is not ONLINE then try to login
2368 * to it if we haven't run out of retries.
2369 */
2370 if (atomic_read(&fcport->state) != FCS_ONLINE &&
2371 fcport->login_retry) {
2372
2373 if (fcport->flags & FCF_FABRIC_DEVICE) {
2374 if (fcport->flags &
2375 FCF_TAPE_PRESENT)
2376 ha->isp_ops->fabric_logout(
2377 ha, fcport->loop_id,
2378 fcport->d_id.b.domain,
2379 fcport->d_id.b.area,
2380 fcport->d_id.b.al_pa);
2381 status = qla2x00_fabric_login(
2382 ha, fcport, &next_loopid);
2383 } else
2384 status =
2385 qla2x00_local_device_login(
2386 ha, fcport);
2387
2388 fcport->login_retry--;
2389 if (status == QLA_SUCCESS) {
2390 fcport->old_loop_id = fcport->loop_id;
2391
2392 DEBUG(printk("scsi(%ld): port login OK: logged in ID 0x%x\n",
2393 ha->host_no, fcport->loop_id));
2394
2395 qla2x00_update_fcport(ha,
2396 fcport);
2397 } else if (status == 1) {
2398 set_bit(RELOGIN_NEEDED, &ha->dpc_flags);
2399 /* retry the login again */
2400 DEBUG(printk("scsi(%ld): Retrying %d login again loop_id 0x%x\n",
2401 ha->host_no,
2402 fcport->login_retry, fcport->loop_id));
2403 } else {
2404 fcport->login_retry = 0;
2405 }
2406 if (fcport->login_retry == 0 && status != QLA_SUCCESS)
2407 fcport->loop_id = FC_NO_LOOP_ID;
2408 }
2409 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2410 break;
2411 }
2412 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n", 2663 DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
2413 ha->host_no)); 2664 base_vha->host_no));
2414 } 2665 }
2415 2666
2416 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2667 if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
2668 &base_vha->dpc_flags)) {
2417 2669
2418 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n", 2670 DEBUG(printk("scsi(%ld): qla2x00_loop_resync()\n",
2419 ha->host_no)); 2671 base_vha->host_no));
2420 2672
2421 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2673 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2422 &ha->dpc_flags))) { 2674 &base_vha->dpc_flags))) {
2423 2675
2424 rval = qla2x00_loop_resync(ha); 2676 rval = qla2x00_loop_resync(base_vha);
2425 2677
2426 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2678 clear_bit(LOOP_RESYNC_ACTIVE,
2679 &base_vha->dpc_flags);
2427 } 2680 }
2428 2681
2429 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n", 2682 DEBUG(printk("scsi(%ld): qla2x00_loop_resync - end\n",
2430 ha->host_no)); 2683 base_vha->host_no));
2431 } 2684 }
2432 2685
2433 if (test_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags) && 2686 if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
2434 atomic_read(&ha->loop_state) == LOOP_READY) { 2687 atomic_read(&base_vha->loop_state) == LOOP_READY) {
2435 clear_bit(NPIV_CONFIG_NEEDED, &ha->dpc_flags); 2688 clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
2436 qla2xxx_flash_npiv_conf(ha); 2689 qla2xxx_flash_npiv_conf(base_vha);
2437 } 2690 }
2438 2691
2439 if (!ha->interrupts_on) 2692 if (!ha->interrupts_on)
2440 ha->isp_ops->enable_intrs(ha); 2693 ha->isp_ops->enable_intrs(ha);
2441 2694
2442 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2695 if (test_and_clear_bit(BEACON_BLINK_NEEDED,
2443 ha->isp_ops->beacon_blink(ha); 2696 &base_vha->dpc_flags))
2697 ha->isp_ops->beacon_blink(base_vha);
2444 2698
2445 qla2x00_do_dpc_all_vps(ha); 2699 qla2x00_do_dpc_all_vps(base_vha);
2446 2700
2447 ha->dpc_active = 0; 2701 ha->dpc_active = 0;
2448 } /* End of while(1) */ 2702 } /* End of while(1) */
2449 2703
2450 DEBUG(printk("scsi(%ld): DPC handler exiting\n", ha->host_no)); 2704 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
2451 2705
2452 /* 2706 /*
2453 * Make sure that nobody tries to wake us up again. 2707 * Make sure that nobody tries to wake us up again.
@@ -2458,11 +2712,12 @@ qla2x00_do_dpc(void *data)
2458} 2712}
2459 2713
2460void 2714void
2461qla2xxx_wake_dpc(scsi_qla_host_t *ha) 2715qla2xxx_wake_dpc(struct scsi_qla_host *vha)
2462{ 2716{
2717 struct qla_hw_data *ha = vha->hw;
2463 struct task_struct *t = ha->dpc_thread; 2718 struct task_struct *t = ha->dpc_thread;
2464 2719
2465 if (!test_bit(UNLOADING, &ha->dpc_flags) && t) 2720 if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
2466 wake_up_process(t); 2721 wake_up_process(t);
2467} 2722}
2468 2723
@@ -2474,26 +2729,26 @@ qla2xxx_wake_dpc(scsi_qla_host_t *ha)
2474* ha = adapter block pointer. 2729* ha = adapter block pointer.
2475*/ 2730*/
2476static void 2731static void
2477qla2x00_rst_aen(scsi_qla_host_t *ha) 2732qla2x00_rst_aen(scsi_qla_host_t *vha)
2478{ 2733{
2479 if (ha->flags.online && !ha->flags.reset_active && 2734 if (vha->flags.online && !vha->flags.reset_active &&
2480 !atomic_read(&ha->loop_down_timer) && 2735 !atomic_read(&vha->loop_down_timer) &&
2481 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 2736 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
2482 do { 2737 do {
2483 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 2738 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
2484 2739
2485 /* 2740 /*
2486 * Issue marker command only when we are going to start 2741 * Issue marker command only when we are going to start
2487 * the I/O. 2742 * the I/O.
2488 */ 2743 */
2489 ha->marker_needed = 1; 2744 vha->marker_needed = 1;
2490 } while (!atomic_read(&ha->loop_down_timer) && 2745 } while (!atomic_read(&vha->loop_down_timer) &&
2491 (test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags))); 2746 (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
2492 } 2747 }
2493} 2748}
2494 2749
2495static void 2750static void
2496qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp) 2751qla2x00_sp_free_dma(srb_t *sp)
2497{ 2752{
2498 struct scsi_cmnd *cmd = sp->cmd; 2753 struct scsi_cmnd *cmd = sp->cmd;
2499 2754
@@ -2505,11 +2760,11 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2505} 2760}
2506 2761
2507void 2762void
2508qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp) 2763qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
2509{ 2764{
2510 struct scsi_cmnd *cmd = sp->cmd; 2765 struct scsi_cmnd *cmd = sp->cmd;
2511 2766
2512 qla2x00_sp_free_dma(ha, sp); 2767 qla2x00_sp_free_dma(sp);
2513 2768
2514 mempool_free(sp, ha->srb_mempool); 2769 mempool_free(sp, ha->srb_mempool);
2515 2770
@@ -2525,7 +2780,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2525* Context: Interrupt 2780* Context: Interrupt
2526***************************************************************************/ 2781***************************************************************************/
2527void 2782void
2528qla2x00_timer(scsi_qla_host_t *ha) 2783qla2x00_timer(scsi_qla_host_t *vha)
2529{ 2784{
2530 unsigned long cpu_flags = 0; 2785 unsigned long cpu_flags = 0;
2531 fc_port_t *fcport; 2786 fc_port_t *fcport;
@@ -2533,8 +2788,8 @@ qla2x00_timer(scsi_qla_host_t *ha)
2533 int index; 2788 int index;
2534 srb_t *sp; 2789 srb_t *sp;
2535 int t; 2790 int t;
2536 scsi_qla_host_t *pha = to_qla_parent(ha); 2791 struct qla_hw_data *ha = vha->hw;
2537 2792 struct req_que *req;
2538 /* 2793 /*
2539 * Ports - Port down timer. 2794 * Ports - Port down timer.
2540 * 2795 *
@@ -2543,7 +2798,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2543 * the port it marked DEAD. 2798 * the port it marked DEAD.
2544 */ 2799 */
2545 t = 0; 2800 t = 0;
2546 list_for_each_entry(fcport, &ha->fcports, list) { 2801 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2547 if (fcport->port_type != FCT_TARGET) 2802 if (fcport->port_type != FCT_TARGET)
2548 continue; 2803 continue;
2549 2804
@@ -2557,7 +2812,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2557 2812
2558 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " 2813 DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
2559 "%d remaining\n", 2814 "%d remaining\n",
2560 ha->host_no, 2815 vha->host_no,
2561 t, atomic_read(&fcport->port_down_timer))); 2816 t, atomic_read(&fcport->port_down_timer)));
2562 } 2817 }
2563 t++; 2818 t++;
@@ -2565,30 +2820,32 @@ qla2x00_timer(scsi_qla_host_t *ha)
2565 2820
2566 2821
2567 /* Loop down handler. */ 2822 /* Loop down handler. */
2568 if (atomic_read(&ha->loop_down_timer) > 0 && 2823 if (atomic_read(&vha->loop_down_timer) > 0 &&
2569 !(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags)) && ha->flags.online) { 2824 !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
2825 && vha->flags.online) {
2570 2826
2571 if (atomic_read(&ha->loop_down_timer) == 2827 if (atomic_read(&vha->loop_down_timer) ==
2572 ha->loop_down_abort_time) { 2828 vha->loop_down_abort_time) {
2573 2829
2574 DEBUG(printk("scsi(%ld): Loop Down - aborting the " 2830 DEBUG(printk("scsi(%ld): Loop Down - aborting the "
2575 "queues before time expire\n", 2831 "queues before time expire\n",
2576 ha->host_no)); 2832 vha->host_no));
2577 2833
2578 if (!IS_QLA2100(ha) && ha->link_down_timeout) 2834 if (!IS_QLA2100(ha) && vha->link_down_timeout)
2579 atomic_set(&ha->loop_state, LOOP_DEAD); 2835 atomic_set(&vha->loop_state, LOOP_DEAD);
2580 2836
2581 /* Schedule an ISP abort to return any tape commands. */ 2837 /* Schedule an ISP abort to return any tape commands. */
2582 /* NPIV - scan physical port only */ 2838 /* NPIV - scan physical port only */
2583 if (!ha->parent) { 2839 if (!vha->vp_idx) {
2584 spin_lock_irqsave(&ha->hardware_lock, 2840 spin_lock_irqsave(&ha->hardware_lock,
2585 cpu_flags); 2841 cpu_flags);
2842 req = ha->req_q_map[0];
2586 for (index = 1; 2843 for (index = 1;
2587 index < MAX_OUTSTANDING_COMMANDS; 2844 index < MAX_OUTSTANDING_COMMANDS;
2588 index++) { 2845 index++) {
2589 fc_port_t *sfcp; 2846 fc_port_t *sfcp;
2590 2847
2591 sp = ha->outstanding_cmds[index]; 2848 sp = req->outstanding_cmds[index];
2592 if (!sp) 2849 if (!sp)
2593 continue; 2850 continue;
2594 sfcp = sp->fcport; 2851 sfcp = sp->fcport;
@@ -2596,63 +2853,63 @@ qla2x00_timer(scsi_qla_host_t *ha)
2596 continue; 2853 continue;
2597 2854
2598 set_bit(ISP_ABORT_NEEDED, 2855 set_bit(ISP_ABORT_NEEDED,
2599 &ha->dpc_flags); 2856 &vha->dpc_flags);
2600 break; 2857 break;
2601 } 2858 }
2602 spin_unlock_irqrestore(&ha->hardware_lock, 2859 spin_unlock_irqrestore(&ha->hardware_lock,
2603 cpu_flags); 2860 cpu_flags);
2604 } 2861 }
2605 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2862 set_bit(ABORT_QUEUES_NEEDED, &vha->dpc_flags);
2606 start_dpc++; 2863 start_dpc++;
2607 } 2864 }
2608 2865
2609 /* if the loop has been down for 4 minutes, reinit adapter */ 2866 /* if the loop has been down for 4 minutes, reinit adapter */
2610 if (atomic_dec_and_test(&ha->loop_down_timer) != 0) { 2867 if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
2611 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - " 2868 DEBUG(printk("scsi(%ld): Loop down exceed 4 mins - "
2612 "restarting queues.\n", 2869 "restarting queues.\n",
2613 ha->host_no)); 2870 vha->host_no));
2614 2871
2615 set_bit(RESTART_QUEUES_NEEDED, &ha->dpc_flags); 2872 set_bit(RESTART_QUEUES_NEEDED, &vha->dpc_flags);
2616 start_dpc++; 2873 start_dpc++;
2617 2874
2618 if (!(ha->device_flags & DFLG_NO_CABLE) && 2875 if (!(vha->device_flags & DFLG_NO_CABLE) &&
2619 !ha->parent) { 2876 !vha->vp_idx) {
2620 DEBUG(printk("scsi(%ld): Loop down - " 2877 DEBUG(printk("scsi(%ld): Loop down - "
2621 "aborting ISP.\n", 2878 "aborting ISP.\n",
2622 ha->host_no)); 2879 vha->host_no));
2623 qla_printk(KERN_WARNING, ha, 2880 qla_printk(KERN_WARNING, ha,
2624 "Loop down - aborting ISP.\n"); 2881 "Loop down - aborting ISP.\n");
2625 2882
2626 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2883 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2627 } 2884 }
2628 } 2885 }
2629 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n", 2886 DEBUG3(printk("scsi(%ld): Loop Down - seconds remaining %d\n",
2630 ha->host_no, 2887 vha->host_no,
2631 atomic_read(&ha->loop_down_timer))); 2888 atomic_read(&vha->loop_down_timer)));
2632 } 2889 }
2633 2890
2634 /* Check if beacon LED needs to be blinked */ 2891 /* Check if beacon LED needs to be blinked */
2635 if (ha->beacon_blink_led == 1) { 2892 if (ha->beacon_blink_led == 1) {
2636 set_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags); 2893 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
2637 start_dpc++; 2894 start_dpc++;
2638 } 2895 }
2639 2896
2640 /* Process any deferred work. */ 2897 /* Process any deferred work. */
2641 if (!list_empty(&ha->work_list)) 2898 if (!list_empty(&vha->work_list))
2642 start_dpc++; 2899 start_dpc++;
2643 2900
2644 /* Schedule the DPC routine if needed */ 2901 /* Schedule the DPC routine if needed */
2645 if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || 2902 if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2646 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || 2903 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
2647 test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags) || 2904 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
2648 start_dpc || 2905 start_dpc ||
2649 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2906 test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
2650 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2907 test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
2651 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) || 2908 test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
2652 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2909 test_bit(RELOGIN_NEEDED, &vha->dpc_flags)))
2653 qla2xxx_wake_dpc(pha); 2910 qla2xxx_wake_dpc(vha);
2654 2911
2655 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2912 qla2x00_restart_timer(vha, WATCH_INTERVAL);
2656} 2913}
2657 2914
2658/* Firmware interface routines. */ 2915/* Firmware interface routines. */
@@ -2684,8 +2941,9 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
2684}; 2941};
2685 2942
2686struct fw_blob * 2943struct fw_blob *
2687qla2x00_request_firmware(scsi_qla_host_t *ha) 2944qla2x00_request_firmware(scsi_qla_host_t *vha)
2688{ 2945{
2946 struct qla_hw_data *ha = vha->hw;
2689 struct fw_blob *blob; 2947 struct fw_blob *blob;
2690 2948
2691 blob = NULL; 2949 blob = NULL;
@@ -2709,7 +2967,7 @@ qla2x00_request_firmware(scsi_qla_host_t *ha)
2709 2967
2710 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) { 2968 if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
2711 DEBUG2(printk("scsi(%ld): Failed to load firmware image " 2969 DEBUG2(printk("scsi(%ld): Failed to load firmware image "
2712 "(%s).\n", ha->host_no, blob->name)); 2970 "(%s).\n", vha->host_no, blob->name));
2713 blob->fw = NULL; 2971 blob->fw = NULL;
2714 blob = NULL; 2972 blob = NULL;
2715 goto out; 2973 goto out;
@@ -2754,7 +3012,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2754 int risc_paused = 0; 3012 int risc_paused = 0;
2755 uint32_t stat; 3013 uint32_t stat;
2756 unsigned long flags; 3014 unsigned long flags;
2757 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3015 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3016 struct qla_hw_data *ha = base_vha->hw;
2758 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 3017 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2759 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 3018 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2760 3019
@@ -2777,7 +3036,7 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
2777 if (risc_paused) { 3036 if (risc_paused) {
2778 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, " 3037 qla_printk(KERN_INFO, ha, "RISC paused -- mmio_enabled, "
2779 "Dumping firmware!\n"); 3038 "Dumping firmware!\n");
2780 ha->isp_ops->fw_dump(ha, 0); 3039 ha->isp_ops->fw_dump(base_vha, 0);
2781 3040
2782 return PCI_ERS_RESULT_NEED_RESET; 3041 return PCI_ERS_RESULT_NEED_RESET;
2783 } else 3042 } else
@@ -2788,7 +3047,8 @@ static pci_ers_result_t
2788qla2xxx_pci_slot_reset(struct pci_dev *pdev) 3047qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2789{ 3048{
2790 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT; 3049 pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
2791 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3050 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3051 struct qla_hw_data *ha = base_vha->hw;
2792 int rc; 3052 int rc;
2793 3053
2794 if (ha->mem_only) 3054 if (ha->mem_only)
@@ -2804,13 +3064,13 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2804 } 3064 }
2805 pci_set_master(pdev); 3065 pci_set_master(pdev);
2806 3066
2807 if (ha->isp_ops->pci_config(ha)) 3067 if (ha->isp_ops->pci_config(base_vha))
2808 return ret; 3068 return ret;
2809 3069
2810 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 3070 set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2811 if (qla2x00_abort_isp(ha)== QLA_SUCCESS) 3071 if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
2812 ret = PCI_ERS_RESULT_RECOVERED; 3072 ret = PCI_ERS_RESULT_RECOVERED;
2813 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 3073 clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
2814 3074
2815 return ret; 3075 return ret;
2816} 3076}
@@ -2818,10 +3078,11 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
2818static void 3078static void
2819qla2xxx_pci_resume(struct pci_dev *pdev) 3079qla2xxx_pci_resume(struct pci_dev *pdev)
2820{ 3080{
2821 scsi_qla_host_t *ha = pci_get_drvdata(pdev); 3081 scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
3082 struct qla_hw_data *ha = base_vha->hw;
2822 int ret; 3083 int ret;
2823 3084
2824 ret = qla2x00_wait_for_hba_online(ha); 3085 ret = qla2x00_wait_for_hba_online(base_vha);
2825 if (ret != QLA_SUCCESS) { 3086 if (ret != QLA_SUCCESS) {
2826 qla_printk(KERN_ERR, ha, 3087 qla_printk(KERN_ERR, ha,
2827 "the device failed to resume I/O " 3088 "the device failed to resume I/O "
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index e4af678eb2d6..c538ee1b1a31 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -10,10 +10,6 @@
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <asm/uaccess.h> 11#include <asm/uaccess.h>
12 12
13static uint16_t qla2x00_nvram_request(scsi_qla_host_t *, uint32_t);
14static void qla2x00_nv_deselect(scsi_qla_host_t *);
15static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
16
17/* 13/*
18 * NVRAM support routines 14 * NVRAM support routines
19 */ 15 */
@@ -23,7 +19,7 @@ static void qla2x00_nv_write(scsi_qla_host_t *, uint16_t);
23 * @ha: HA context 19 * @ha: HA context
24 */ 20 */
25static void 21static void
26qla2x00_lock_nvram_access(scsi_qla_host_t *ha) 22qla2x00_lock_nvram_access(struct qla_hw_data *ha)
27{ 23{
28 uint16_t data; 24 uint16_t data;
29 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 25 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -56,7 +52,7 @@ qla2x00_lock_nvram_access(scsi_qla_host_t *ha)
56 * @ha: HA context 52 * @ha: HA context
57 */ 53 */
58static void 54static void
59qla2x00_unlock_nvram_access(scsi_qla_host_t *ha) 55qla2x00_unlock_nvram_access(struct qla_hw_data *ha)
60{ 56{
61 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 57 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
62 58
@@ -67,6 +63,84 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
67} 63}
68 64
69/** 65/**
66 * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
67 * @ha: HA context
68 * @data: Serial interface selector
69 */
70static void
71qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data)
72{
73 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
74
75 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
76 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
77 NVRAM_DELAY();
78 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_CLOCK |
79 NVR_WRT_ENABLE);
80 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
81 NVRAM_DELAY();
82 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
83 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
84 NVRAM_DELAY();
85}
86
87/**
88 * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
89 * NVRAM.
90 * @ha: HA context
91 * @nv_cmd: NVRAM command
92 *
93 * Bit definitions for NVRAM command:
94 *
95 * Bit 26 = start bit
96 * Bit 25, 24 = opcode
97 * Bit 23-16 = address
98 * Bit 15-0 = write data
99 *
100 * Returns the word read from nvram @addr.
101 */
102static uint16_t
103qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd)
104{
105 uint8_t cnt;
106 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
107 uint16_t data = 0;
108 uint16_t reg_data;
109
110 /* Send command to NVRAM. */
111 nv_cmd <<= 5;
112 for (cnt = 0; cnt < 11; cnt++) {
113 if (nv_cmd & BIT_31)
114 qla2x00_nv_write(ha, NVR_DATA_OUT);
115 else
116 qla2x00_nv_write(ha, 0);
117 nv_cmd <<= 1;
118 }
119
120 /* Read data from NVRAM. */
121 for (cnt = 0; cnt < 16; cnt++) {
122 WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
123 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
124 NVRAM_DELAY();
125 data <<= 1;
126 reg_data = RD_REG_WORD(&reg->nvram);
127 if (reg_data & NVR_DATA_IN)
128 data |= BIT_0;
129 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
130 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
131 NVRAM_DELAY();
132 }
133
134 /* Deselect chip. */
135 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
136 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
137 NVRAM_DELAY();
138
139 return data;
140}
141
142
143/**
70 * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the 144 * qla2x00_get_nvram_word() - Calculates word position in NVRAM and calls the
71 * request routine to get the word from NVRAM. 145 * request routine to get the word from NVRAM.
72 * @ha: HA context 146 * @ha: HA context
@@ -75,7 +149,7 @@ qla2x00_unlock_nvram_access(scsi_qla_host_t *ha)
75 * Returns the word read from nvram @addr. 149 * Returns the word read from nvram @addr.
76 */ 150 */
77static uint16_t 151static uint16_t
78qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr) 152qla2x00_get_nvram_word(struct qla_hw_data *ha, uint32_t addr)
79{ 153{
80 uint16_t data; 154 uint16_t data;
81 uint32_t nv_cmd; 155 uint32_t nv_cmd;
@@ -88,13 +162,27 @@ qla2x00_get_nvram_word(scsi_qla_host_t *ha, uint32_t addr)
88} 162}
89 163
90/** 164/**
165 * qla2x00_nv_deselect() - Deselect NVRAM operations.
166 * @ha: HA context
167 */
168static void
169qla2x00_nv_deselect(struct qla_hw_data *ha)
170{
171 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
172
173 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
174 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
175 NVRAM_DELAY();
176}
177
178/**
91 * qla2x00_write_nvram_word() - Write NVRAM data. 179 * qla2x00_write_nvram_word() - Write NVRAM data.
92 * @ha: HA context 180 * @ha: HA context
93 * @addr: Address in NVRAM to write 181 * @addr: Address in NVRAM to write
94 * @data: word to program 182 * @data: word to program
95 */ 183 */
96static void 184static void
97qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data) 185qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
98{ 186{
99 int count; 187 int count;
100 uint16_t word; 188 uint16_t word;
@@ -132,7 +220,7 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
132 do { 220 do {
133 if (!--wait_cnt) { 221 if (!--wait_cnt) {
134 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 222 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
135 __func__, ha->host_no)); 223 __func__, vha->host_no));
136 break; 224 break;
137 } 225 }
138 NVRAM_DELAY(); 226 NVRAM_DELAY();
@@ -150,8 +238,8 @@ qla2x00_write_nvram_word(scsi_qla_host_t *ha, uint32_t addr, uint16_t data)
150} 238}
151 239
152static int 240static int
153qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data, 241qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr,
154 uint32_t tmo) 242 uint16_t data, uint32_t tmo)
155{ 243{
156 int ret, count; 244 int ret, count;
157 uint16_t word; 245 uint16_t word;
@@ -209,102 +297,11 @@ qla2x00_write_nvram_word_tmo(scsi_qla_host_t *ha, uint32_t addr, uint16_t data,
209} 297}
210 298
211/** 299/**
212 * qla2x00_nvram_request() - Sends read command to NVRAM and gets data from
213 * NVRAM.
214 * @ha: HA context
215 * @nv_cmd: NVRAM command
216 *
217 * Bit definitions for NVRAM command:
218 *
219 * Bit 26 = start bit
220 * Bit 25, 24 = opcode
221 * Bit 23-16 = address
222 * Bit 15-0 = write data
223 *
224 * Returns the word read from nvram @addr.
225 */
226static uint16_t
227qla2x00_nvram_request(scsi_qla_host_t *ha, uint32_t nv_cmd)
228{
229 uint8_t cnt;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 uint16_t data = 0;
232 uint16_t reg_data;
233
234 /* Send command to NVRAM. */
235 nv_cmd <<= 5;
236 for (cnt = 0; cnt < 11; cnt++) {
237 if (nv_cmd & BIT_31)
238 qla2x00_nv_write(ha, NVR_DATA_OUT);
239 else
240 qla2x00_nv_write(ha, 0);
241 nv_cmd <<= 1;
242 }
243
244 /* Read data from NVRAM. */
245 for (cnt = 0; cnt < 16; cnt++) {
246 WRT_REG_WORD(&reg->nvram, NVR_SELECT | NVR_CLOCK);
247 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
248 NVRAM_DELAY();
249 data <<= 1;
250 reg_data = RD_REG_WORD(&reg->nvram);
251 if (reg_data & NVR_DATA_IN)
252 data |= BIT_0;
253 WRT_REG_WORD(&reg->nvram, NVR_SELECT);
254 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
255 NVRAM_DELAY();
256 }
257
258 /* Deselect chip. */
259 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
260 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
261 NVRAM_DELAY();
262
263 return (data);
264}
265
266/**
267 * qla2x00_nv_write() - Clean NVRAM operations.
268 * @ha: HA context
269 */
270static void
271qla2x00_nv_deselect(scsi_qla_host_t *ha)
272{
273 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
274
275 WRT_REG_WORD(&reg->nvram, NVR_DESELECT);
276 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
277 NVRAM_DELAY();
278}
279
280/**
281 * qla2x00_nv_write() - Prepare for NVRAM read/write operation.
282 * @ha: HA context
283 * @data: Serial interface selector
284 */
285static void
286qla2x00_nv_write(scsi_qla_host_t *ha, uint16_t data)
287{
288 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
289
290 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
291 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
292 NVRAM_DELAY();
293 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT| NVR_CLOCK |
294 NVR_WRT_ENABLE);
295 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
296 NVRAM_DELAY();
297 WRT_REG_WORD(&reg->nvram, data | NVR_SELECT | NVR_WRT_ENABLE);
298 RD_REG_WORD(&reg->nvram); /* PCI Posting. */
299 NVRAM_DELAY();
300}
301
302/**
303 * qla2x00_clear_nvram_protection() - 300 * qla2x00_clear_nvram_protection() -
304 * @ha: HA context 301 * @ha: HA context
305 */ 302 */
306static int 303static int
307qla2x00_clear_nvram_protection(scsi_qla_host_t *ha) 304qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
308{ 305{
309 int ret, stat; 306 int ret, stat;
310 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 307 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -352,9 +349,8 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
352 wait_cnt = NVR_WAIT_CNT; 349 wait_cnt = NVR_WAIT_CNT;
353 do { 350 do {
354 if (!--wait_cnt) { 351 if (!--wait_cnt) {
355 DEBUG9_10(printk("%s(%ld): NVRAM didn't go " 352 DEBUG9_10(qla_printk(
356 "ready...\n", __func__, 353 "NVRAM didn't go ready...\n"));
357 ha->host_no));
358 break; 354 break;
359 } 355 }
360 NVRAM_DELAY(); 356 NVRAM_DELAY();
@@ -370,7 +366,7 @@ qla2x00_clear_nvram_protection(scsi_qla_host_t *ha)
370} 366}
371 367
372static void 368static void
373qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat) 369qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
374{ 370{
375 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 371 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
376 uint32_t word, wait_cnt; 372 uint32_t word, wait_cnt;
@@ -412,8 +408,7 @@ qla2x00_set_nvram_protection(scsi_qla_host_t *ha, int stat)
412 wait_cnt = NVR_WAIT_CNT; 408 wait_cnt = NVR_WAIT_CNT;
413 do { 409 do {
414 if (!--wait_cnt) { 410 if (!--wait_cnt) {
415 DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", 411 DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
416 __func__, ha->host_no));
417 break; 412 break;
418 } 413 }
419 NVRAM_DELAY(); 414 NVRAM_DELAY();
@@ -454,7 +449,7 @@ nvram_data_to_access_addr(uint32_t naddr)
454} 449}
455 450
456static uint32_t 451static uint32_t
457qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr) 452qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr)
458{ 453{
459 int rval; 454 int rval;
460 uint32_t cnt, data; 455 uint32_t cnt, data;
@@ -482,21 +477,20 @@ qla24xx_read_flash_dword(scsi_qla_host_t *ha, uint32_t addr)
482} 477}
483 478
484uint32_t * 479uint32_t *
485qla24xx_read_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 480qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
486 uint32_t dwords) 481 uint32_t dwords)
487{ 482{
488 uint32_t i; 483 uint32_t i;
489
490 /* Dword reads to flash. */ 484 /* Dword reads to flash. */
491 for (i = 0; i < dwords; i++, faddr++) 485 for (i = 0; i < dwords; i++, faddr++)
492 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 486 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
493 flash_data_to_access_addr(faddr))); 487 flash_data_to_access_addr(faddr)));
494 488
495 return dwptr; 489 return dwptr;
496} 490}
497 491
498static int 492static int
499qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data) 493qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data)
500{ 494{
501 int rval; 495 int rval;
502 uint32_t cnt; 496 uint32_t cnt;
@@ -519,7 +513,7 @@ qla24xx_write_flash_dword(scsi_qla_host_t *ha, uint32_t addr, uint32_t data)
519} 513}
520 514
521static void 515static void
522qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 516qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
523 uint8_t *flash_id) 517 uint8_t *flash_id)
524{ 518{
525 uint32_t ids; 519 uint32_t ids;
@@ -544,7 +538,7 @@ qla24xx_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
544} 538}
545 539
546static int 540static int
547qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start) 541qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
548{ 542{
549 const char *loc, *locations[] = { "DEF", "PCI" }; 543 const char *loc, *locations[] = { "DEF", "PCI" };
550 uint32_t pcihdr, pcids; 544 uint32_t pcihdr, pcids;
@@ -552,6 +546,8 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
552 uint8_t *buf, *bcode, last_image; 546 uint8_t *buf, *bcode, last_image;
553 uint16_t cnt, chksum, *wptr; 547 uint16_t cnt, chksum, *wptr;
554 struct qla_flt_location *fltl; 548 struct qla_flt_location *fltl;
549 struct qla_hw_data *ha = vha->hw;
550 struct req_que *req = ha->req_q_map[0];
555 551
556 /* 552 /*
557 * FLT-location structure resides after the last PCI region. 553 * FLT-location structure resides after the last PCI region.
@@ -563,20 +559,20 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
563 FA_FLASH_LAYOUT_ADDR; 559 FA_FLASH_LAYOUT_ADDR;
564 560
565 /* Begin with first PCI expansion ROM header. */ 561 /* Begin with first PCI expansion ROM header. */
566 buf = (uint8_t *)ha->request_ring; 562 buf = (uint8_t *)req->ring;
567 dcode = (uint32_t *)ha->request_ring; 563 dcode = (uint32_t *)req->ring;
568 pcihdr = 0; 564 pcihdr = 0;
569 last_image = 1; 565 last_image = 1;
570 do { 566 do {
571 /* Verify PCI expansion ROM header. */ 567 /* Verify PCI expansion ROM header. */
572 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); 568 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
573 bcode = buf + (pcihdr % 4); 569 bcode = buf + (pcihdr % 4);
574 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) 570 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa)
575 goto end; 571 goto end;
576 572
577 /* Locate PCI data structure. */ 573 /* Locate PCI data structure. */
578 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 574 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
579 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); 575 qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
580 bcode = buf + (pcihdr % 4); 576 bcode = buf + (pcihdr % 4);
581 577
582 /* Validate signature of PCI data structure. */ 578 /* Validate signature of PCI data structure. */
@@ -591,14 +587,14 @@ qla2xxx_find_flt_start(scsi_qla_host_t *ha, uint32_t *start)
591 } while (!last_image); 587 } while (!last_image);
592 588
593 /* Now verify FLT-location structure. */ 589 /* Now verify FLT-location structure. */
594 fltl = (struct qla_flt_location *)ha->request_ring; 590 fltl = (struct qla_flt_location *)req->ring;
595 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 591 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2,
596 sizeof(struct qla_flt_location) >> 2); 592 sizeof(struct qla_flt_location) >> 2);
597 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' || 593 if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' ||
598 fltl->sig[2] != 'L' || fltl->sig[3] != 'T') 594 fltl->sig[2] != 'L' || fltl->sig[3] != 'T')
599 goto end; 595 goto end;
600 596
601 wptr = (uint16_t *)ha->request_ring; 597 wptr = (uint16_t *)req->ring;
602 cnt = sizeof(struct qla_flt_location) >> 1; 598 cnt = sizeof(struct qla_flt_location) >> 1;
603 for (chksum = 0; cnt; cnt--) 599 for (chksum = 0; cnt; cnt--)
604 chksum += le16_to_cpu(*wptr++); 600 chksum += le16_to_cpu(*wptr++);
@@ -619,7 +615,7 @@ end:
619} 615}
620 616
621static void 617static void
622qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr) 618qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
623{ 619{
624 const char *loc, *locations[] = { "DEF", "FLT" }; 620 const char *loc, *locations[] = { "DEF", "FLT" };
625 uint16_t *wptr; 621 uint16_t *wptr;
@@ -627,12 +623,14 @@ qla2xxx_get_flt_info(scsi_qla_host_t *ha, uint32_t flt_addr)
627 uint32_t start; 623 uint32_t start;
628 struct qla_flt_header *flt; 624 struct qla_flt_header *flt;
629 struct qla_flt_region *region; 625 struct qla_flt_region *region;
626 struct qla_hw_data *ha = vha->hw;
627 struct req_que *req = ha->req_q_map[0];
630 628
631 ha->flt_region_flt = flt_addr; 629 ha->flt_region_flt = flt_addr;
632 wptr = (uint16_t *)ha->request_ring; 630 wptr = (uint16_t *)req->ring;
633 flt = (struct qla_flt_header *)ha->request_ring; 631 flt = (struct qla_flt_header *)req->ring;
634 region = (struct qla_flt_region *)&flt[1]; 632 region = (struct qla_flt_region *)&flt[1];
635 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 633 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
636 flt_addr << 2, OPTROM_BURST_SIZE); 634 flt_addr << 2, OPTROM_BURST_SIZE);
637 if (*wptr == __constant_cpu_to_le16(0xffff)) 635 if (*wptr == __constant_cpu_to_le16(0xffff))
638 goto no_flash_data; 636 goto no_flash_data;
@@ -720,7 +718,7 @@ done:
720} 718}
721 719
722static void 720static void
723qla2xxx_get_fdt_info(scsi_qla_host_t *ha) 721qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
724{ 722{
725#define FLASH_BLK_SIZE_4K 0x1000 723#define FLASH_BLK_SIZE_4K 0x1000
726#define FLASH_BLK_SIZE_32K 0x8000 724#define FLASH_BLK_SIZE_32K 0x8000
@@ -731,10 +729,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *ha)
731 struct qla_fdt_layout *fdt; 729 struct qla_fdt_layout *fdt;
732 uint8_t man_id, flash_id; 730 uint8_t man_id, flash_id;
733 uint16_t mid, fid; 731 uint16_t mid, fid;
732 struct qla_hw_data *ha = vha->hw;
733 struct req_que *req = ha->req_q_map[0];
734 734
735 wptr = (uint16_t *)ha->request_ring; 735 wptr = (uint16_t *)req->ring;
736 fdt = (struct qla_fdt_layout *)ha->request_ring; 736 fdt = (struct qla_fdt_layout *)req->ring;
737 ha->isp_ops->read_optrom(ha, (uint8_t *)ha->request_ring, 737 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); 738 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
739 if (*wptr == __constant_cpu_to_le16(0xffff)) 739 if (*wptr == __constant_cpu_to_le16(0xffff))
740 goto no_flash_data; 740 goto no_flash_data;
@@ -807,38 +807,41 @@ done:
807} 807}
808 808
809int 809int
810qla2xxx_get_flash_info(scsi_qla_host_t *ha) 810qla2xxx_get_flash_info(scsi_qla_host_t *vha)
811{ 811{
812 int ret; 812 int ret;
813 uint32_t flt_addr; 813 uint32_t flt_addr;
814 struct qla_hw_data *ha = vha->hw;
814 815
815 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 816 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
816 return QLA_SUCCESS; 817 return QLA_SUCCESS;
817 818
818 ret = qla2xxx_find_flt_start(ha, &flt_addr); 819 ret = qla2xxx_find_flt_start(vha, &flt_addr);
819 if (ret != QLA_SUCCESS) 820 if (ret != QLA_SUCCESS)
820 return ret; 821 return ret;
821 822
822 qla2xxx_get_flt_info(ha, flt_addr); 823 qla2xxx_get_flt_info(vha, flt_addr);
823 qla2xxx_get_fdt_info(ha); 824 qla2xxx_get_fdt_info(vha);
824 825
825 return QLA_SUCCESS; 826 return QLA_SUCCESS;
826} 827}
827 828
828void 829void
829qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha) 830qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
830{ 831{
831#define NPIV_CONFIG_SIZE (16*1024) 832#define NPIV_CONFIG_SIZE (16*1024)
832 void *data; 833 void *data;
833 uint16_t *wptr; 834 uint16_t *wptr;
834 uint16_t cnt, chksum; 835 uint16_t cnt, chksum;
836 int i;
835 struct qla_npiv_header hdr; 837 struct qla_npiv_header hdr;
836 struct qla_npiv_entry *entry; 838 struct qla_npiv_entry *entry;
839 struct qla_hw_data *ha = vha->hw;
837 840
838 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 841 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
839 return; 842 return;
840 843
841 ha->isp_ops->read_optrom(ha, (uint8_t *)&hdr, 844 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
842 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 845 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
843 if (hdr.version == __constant_cpu_to_le16(0xffff)) 846 if (hdr.version == __constant_cpu_to_le16(0xffff))
844 return; 847 return;
@@ -857,7 +860,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
857 return; 860 return;
858 } 861 }
859 862
860 ha->isp_ops->read_optrom(ha, (uint8_t *)data, 863 ha->isp_ops->read_optrom(vha, (uint8_t *)data,
861 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); 864 ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE);
862 865
863 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) * 866 cnt = (sizeof(struct qla_npiv_header) + le16_to_cpu(hdr.entries) *
@@ -874,7 +877,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
874 877
875 entry = data + sizeof(struct qla_npiv_header); 878 entry = data + sizeof(struct qla_npiv_header);
876 cnt = le16_to_cpu(hdr.entries); 879 cnt = le16_to_cpu(hdr.entries);
877 for ( ; cnt; cnt--, entry++) { 880 for (i = 0; cnt; cnt--, entry++, i++) {
878 uint16_t flags; 881 uint16_t flags;
879 struct fc_vport_identifiers vid; 882 struct fc_vport_identifiers vid;
880 struct fc_vport *vport; 883 struct fc_vport *vport;
@@ -892,25 +895,29 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *ha)
892 vid.port_name = wwn_to_u64(entry->port_name); 895 vid.port_name = wwn_to_u64(entry->port_name);
893 vid.node_name = wwn_to_u64(entry->node_name); 896 vid.node_name = wwn_to_u64(entry->node_name);
894 897
898 memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
899
895 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " 900 DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
896 "wwnn=%llx vf_id=0x%x qos=0x%x.\n", cnt, 901 "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
897 (unsigned long long)vid.port_name, 902 vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
898 (unsigned long long)vid.node_name, 903 entry->q_qos, entry->f_qos));
899 le16_to_cpu(entry->vf_id), le16_to_cpu(entry->qos))); 904
900 905 if (i < QLA_PRECONFIG_VPORTS) {
901 vport = fc_vport_create(ha->host, 0, &vid); 906 vport = fc_vport_create(vha->host, 0, &vid);
902 if (!vport) 907 if (!vport)
903 qla_printk(KERN_INFO, ha, "NPIV-Config: Failed to " 908 qla_printk(KERN_INFO, ha,
904 "create vport [%02x]: wwpn=%llx wwnn=%llx.\n", cnt, 909 "NPIV-Config: Failed to create vport [%02x]: "
905 (unsigned long long)vid.port_name, 910 "wwpn=%llx wwnn=%llx.\n", cnt,
906 (unsigned long long)vid.node_name); 911 vid.port_name, vid.node_name);
912 }
907 } 913 }
908done: 914done:
909 kfree(data); 915 kfree(data);
916 ha->npiv_info = NULL;
910} 917}
911 918
912static void 919static void
913qla24xx_unprotect_flash(scsi_qla_host_t *ha) 920qla24xx_unprotect_flash(struct qla_hw_data *ha)
914{ 921{
915 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 922 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
916 923
@@ -929,7 +936,7 @@ qla24xx_unprotect_flash(scsi_qla_host_t *ha)
929} 936}
930 937
931static void 938static void
932qla24xx_protect_flash(scsi_qla_host_t *ha) 939qla24xx_protect_flash(struct qla_hw_data *ha)
933{ 940{
934 uint32_t cnt; 941 uint32_t cnt;
935 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 942 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
@@ -955,7 +962,7 @@ skip_wrt_protect:
955} 962}
956 963
957static int 964static int
958qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr, 965qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
959 uint32_t dwords) 966 uint32_t dwords)
960{ 967{
961 int ret; 968 int ret;
@@ -965,6 +972,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
965 dma_addr_t optrom_dma; 972 dma_addr_t optrom_dma;
966 void *optrom = NULL; 973 void *optrom = NULL;
967 uint32_t *s, *d; 974 uint32_t *s, *d;
975 struct qla_hw_data *ha = vha->hw;
968 976
969 ret = QLA_SUCCESS; 977 ret = QLA_SUCCESS;
970 978
@@ -1002,9 +1010,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1002 (fdata & 0xff00) |((fdata << 16) & 1010 (fdata & 0xff00) |((fdata << 16) &
1003 0xff0000) | ((fdata >> 16) & 0xff)); 1011 0xff0000) | ((fdata >> 16) & 0xff));
1004 if (ret != QLA_SUCCESS) { 1012 if (ret != QLA_SUCCESS) {
1005 DEBUG9(printk("%s(%ld) Unable to flash " 1013 DEBUG9(qla_printk("Unable to flash sector: "
1006 "sector: address=%x.\n", __func__, 1014 "address=%x.\n", faddr));
1007 ha->host_no, faddr));
1008 break; 1015 break;
1009 } 1016 }
1010 } 1017 }
@@ -1016,7 +1023,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1016 miter < OPTROM_BURST_DWORDS; miter++, s++, d++) 1023 miter < OPTROM_BURST_DWORDS; miter++, s++, d++)
1017 *s = cpu_to_le32(*d); 1024 *s = cpu_to_le32(*d);
1018 1025
1019 ret = qla2x00_load_ram(ha, optrom_dma, 1026 ret = qla2x00_load_ram(vha, optrom_dma,
1020 flash_data_to_access_addr(faddr), 1027 flash_data_to_access_addr(faddr),
1021 OPTROM_BURST_DWORDS); 1028 OPTROM_BURST_DWORDS);
1022 if (ret != QLA_SUCCESS) { 1029 if (ret != QLA_SUCCESS) {
@@ -1044,7 +1051,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1044 if (ret != QLA_SUCCESS) { 1051 if (ret != QLA_SUCCESS) {
1045 DEBUG9(printk("%s(%ld) Unable to program flash " 1052 DEBUG9(printk("%s(%ld) Unable to program flash "
1046 "address=%x data=%x.\n", __func__, 1053 "address=%x data=%x.\n", __func__,
1047 ha->host_no, faddr, *dwptr)); 1054 vha->host_no, faddr, *dwptr));
1048 break; 1055 break;
1049 } 1056 }
1050 1057
@@ -1067,11 +1074,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *ha, uint32_t *dwptr, uint32_t faddr,
1067} 1074}
1068 1075
1069uint8_t * 1076uint8_t *
1070qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1077qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1071 uint32_t bytes) 1078 uint32_t bytes)
1072{ 1079{
1073 uint32_t i; 1080 uint32_t i;
1074 uint16_t *wptr; 1081 uint16_t *wptr;
1082 struct qla_hw_data *ha = vha->hw;
1075 1083
1076 /* Word reads to NVRAM via registers. */ 1084 /* Word reads to NVRAM via registers. */
1077 wptr = (uint16_t *)buf; 1085 wptr = (uint16_t *)buf;
@@ -1085,7 +1093,7 @@ qla2x00_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1085} 1093}
1086 1094
1087uint8_t * 1095uint8_t *
1088qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1096qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1089 uint32_t bytes) 1097 uint32_t bytes)
1090{ 1098{
1091 uint32_t i; 1099 uint32_t i;
@@ -1094,20 +1102,21 @@ qla24xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1094 /* Dword reads to flash. */ 1102 /* Dword reads to flash. */
1095 dwptr = (uint32_t *)buf; 1103 dwptr = (uint32_t *)buf;
1096 for (i = 0; i < bytes >> 2; i++, naddr++) 1104 for (i = 0; i < bytes >> 2; i++, naddr++)
1097 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, 1105 dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(vha->hw,
1098 nvram_data_to_access_addr(naddr))); 1106 nvram_data_to_access_addr(naddr)));
1099 1107
1100 return buf; 1108 return buf;
1101} 1109}
1102 1110
1103int 1111int
1104qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1112qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1105 uint32_t bytes) 1113 uint32_t bytes)
1106{ 1114{
1107 int ret, stat; 1115 int ret, stat;
1108 uint32_t i; 1116 uint32_t i;
1109 uint16_t *wptr; 1117 uint16_t *wptr;
1110 unsigned long flags; 1118 unsigned long flags;
1119 struct qla_hw_data *ha = vha->hw;
1111 1120
1112 ret = QLA_SUCCESS; 1121 ret = QLA_SUCCESS;
1113 1122
@@ -1134,12 +1143,13 @@ qla2x00_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1134} 1143}
1135 1144
1136int 1145int
1137qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1146qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1138 uint32_t bytes) 1147 uint32_t bytes)
1139{ 1148{
1140 int ret; 1149 int ret;
1141 uint32_t i; 1150 uint32_t i;
1142 uint32_t *dwptr; 1151 uint32_t *dwptr;
1152 struct qla_hw_data *ha = vha->hw;
1143 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1153 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1144 1154
1145 ret = QLA_SUCCESS; 1155 ret = QLA_SUCCESS;
@@ -1162,9 +1172,8 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1162 nvram_data_to_access_addr(naddr), 1172 nvram_data_to_access_addr(naddr),
1163 cpu_to_le32(*dwptr)); 1173 cpu_to_le32(*dwptr));
1164 if (ret != QLA_SUCCESS) { 1174 if (ret != QLA_SUCCESS) {
1165 DEBUG9(printk("%s(%ld) Unable to program " 1175 DEBUG9(qla_printk("Unable to program nvram address=%x "
1166 "nvram address=%x data=%x.\n", __func__, 1176 "data=%x.\n", naddr, *dwptr));
1167 ha->host_no, naddr, *dwptr));
1168 break; 1177 break;
1169 } 1178 }
1170 } 1179 }
@@ -1182,11 +1191,12 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1182} 1191}
1183 1192
1184uint8_t * 1193uint8_t *
1185qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1194qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1186 uint32_t bytes) 1195 uint32_t bytes)
1187{ 1196{
1188 uint32_t i; 1197 uint32_t i;
1189 uint32_t *dwptr; 1198 uint32_t *dwptr;
1199 struct qla_hw_data *ha = vha->hw;
1190 1200
1191 /* Dword reads to flash. */ 1201 /* Dword reads to flash. */
1192 dwptr = (uint32_t *)buf; 1202 dwptr = (uint32_t *)buf;
@@ -1199,19 +1209,20 @@ qla25xx_read_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1199} 1209}
1200 1210
1201int 1211int
1202qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr, 1212qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
1203 uint32_t bytes) 1213 uint32_t bytes)
1204{ 1214{
1215 struct qla_hw_data *ha = vha->hw;
1205#define RMW_BUFFER_SIZE (64 * 1024) 1216#define RMW_BUFFER_SIZE (64 * 1024)
1206 uint8_t *dbuf; 1217 uint8_t *dbuf;
1207 1218
1208 dbuf = vmalloc(RMW_BUFFER_SIZE); 1219 dbuf = vmalloc(RMW_BUFFER_SIZE);
1209 if (!dbuf) 1220 if (!dbuf)
1210 return QLA_MEMORY_ALLOC_FAILED; 1221 return QLA_MEMORY_ALLOC_FAILED;
1211 ha->isp_ops->read_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, 1222 ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
1212 RMW_BUFFER_SIZE); 1223 RMW_BUFFER_SIZE);
1213 memcpy(dbuf + (naddr << 2), buf, bytes); 1224 memcpy(dbuf + (naddr << 2), buf, bytes);
1214 ha->isp_ops->write_optrom(ha, dbuf, ha->flt_region_vpd_nvram << 2, 1225 ha->isp_ops->write_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2,
1215 RMW_BUFFER_SIZE); 1226 RMW_BUFFER_SIZE);
1216 vfree(dbuf); 1227 vfree(dbuf);
1217 1228
@@ -1219,7 +1230,7 @@ qla25xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
1219} 1230}
1220 1231
1221static inline void 1232static inline void
1222qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 1233qla2x00_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
1223{ 1234{
1224 if (IS_QLA2322(ha)) { 1235 if (IS_QLA2322(ha)) {
1225 /* Flip all colors. */ 1236 /* Flip all colors. */
@@ -1249,12 +1260,13 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
1249#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r)) 1260#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
1250 1261
1251void 1262void
1252qla2x00_beacon_blink(struct scsi_qla_host *ha) 1263qla2x00_beacon_blink(struct scsi_qla_host *vha)
1253{ 1264{
1254 uint16_t gpio_enable; 1265 uint16_t gpio_enable;
1255 uint16_t gpio_data; 1266 uint16_t gpio_data;
1256 uint16_t led_color = 0; 1267 uint16_t led_color = 0;
1257 unsigned long flags; 1268 unsigned long flags;
1269 struct qla_hw_data *ha = vha->hw;
1258 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1270 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1259 1271
1260 spin_lock_irqsave(&ha->hardware_lock, flags); 1272 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1298,17 +1310,18 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
1298} 1310}
1299 1311
1300int 1312int
1301qla2x00_beacon_on(struct scsi_qla_host *ha) 1313qla2x00_beacon_on(struct scsi_qla_host *vha)
1302{ 1314{
1303 uint16_t gpio_enable; 1315 uint16_t gpio_enable;
1304 uint16_t gpio_data; 1316 uint16_t gpio_data;
1305 unsigned long flags; 1317 unsigned long flags;
1318 struct qla_hw_data *ha = vha->hw;
1306 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1307 1320
1308 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1321 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1309 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7; 1322 ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
1310 1323
1311 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1324 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1312 qla_printk(KERN_WARNING, ha, 1325 qla_printk(KERN_WARNING, ha,
1313 "Unable to update fw options (beacon on).\n"); 1326 "Unable to update fw options (beacon on).\n");
1314 return QLA_FUNCTION_FAILED; 1327 return QLA_FUNCTION_FAILED;
@@ -1354,9 +1367,10 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
1354} 1367}
1355 1368
1356int 1369int
1357qla2x00_beacon_off(struct scsi_qla_host *ha) 1370qla2x00_beacon_off(struct scsi_qla_host *vha)
1358{ 1371{
1359 int rval = QLA_SUCCESS; 1372 int rval = QLA_SUCCESS;
1373 struct qla_hw_data *ha = vha->hw;
1360 1374
1361 ha->beacon_blink_led = 0; 1375 ha->beacon_blink_led = 0;
1362 1376
@@ -1366,12 +1380,12 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
1366 else 1380 else
1367 ha->beacon_color_state = QLA_LED_GRN_ON; 1381 ha->beacon_color_state = QLA_LED_GRN_ON;
1368 1382
1369 ha->isp_ops->beacon_blink(ha); /* This turns green LED off */ 1383 ha->isp_ops->beacon_blink(vha); /* This turns green LED off */
1370 1384
1371 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; 1385 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
1372 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7; 1386 ha->fw_options[1] &= ~FO1_DISABLE_GPIO6_7;
1373 1387
1374 rval = qla2x00_set_fw_options(ha, ha->fw_options); 1388 rval = qla2x00_set_fw_options(vha, ha->fw_options);
1375 if (rval != QLA_SUCCESS) 1389 if (rval != QLA_SUCCESS)
1376 qla_printk(KERN_WARNING, ha, 1390 qla_printk(KERN_WARNING, ha,
1377 "Unable to update fw options (beacon off).\n"); 1391 "Unable to update fw options (beacon off).\n");
@@ -1380,7 +1394,7 @@ qla2x00_beacon_off(struct scsi_qla_host *ha)
1380 1394
1381 1395
1382static inline void 1396static inline void
1383qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags) 1397qla24xx_flip_colors(struct qla_hw_data *ha, uint16_t *pflags)
1384{ 1398{
1385 /* Flip all colors. */ 1399 /* Flip all colors. */
1386 if (ha->beacon_color_state == QLA_LED_ALL_ON) { 1400 if (ha->beacon_color_state == QLA_LED_ALL_ON) {
@@ -1395,11 +1409,12 @@ qla24xx_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
1395} 1409}
1396 1410
1397void 1411void
1398qla24xx_beacon_blink(struct scsi_qla_host *ha) 1412qla24xx_beacon_blink(struct scsi_qla_host *vha)
1399{ 1413{
1400 uint16_t led_color = 0; 1414 uint16_t led_color = 0;
1401 uint32_t gpio_data; 1415 uint32_t gpio_data;
1402 unsigned long flags; 1416 unsigned long flags;
1417 struct qla_hw_data *ha = vha->hw;
1403 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1418 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1404 1419
1405 /* Save the Original GPIOD. */ 1420 /* Save the Original GPIOD. */
@@ -1428,20 +1443,21 @@ qla24xx_beacon_blink(struct scsi_qla_host *ha)
1428} 1443}
1429 1444
1430int 1445int
1431qla24xx_beacon_on(struct scsi_qla_host *ha) 1446qla24xx_beacon_on(struct scsi_qla_host *vha)
1432{ 1447{
1433 uint32_t gpio_data; 1448 uint32_t gpio_data;
1434 unsigned long flags; 1449 unsigned long flags;
1450 struct qla_hw_data *ha = vha->hw;
1435 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1451 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1436 1452
1437 if (ha->beacon_blink_led == 0) { 1453 if (ha->beacon_blink_led == 0) {
1438 /* Enable firmware for update */ 1454 /* Enable firmware for update */
1439 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 1455 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
1440 1456
1441 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) 1457 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS)
1442 return QLA_FUNCTION_FAILED; 1458 return QLA_FUNCTION_FAILED;
1443 1459
1444 if (qla2x00_get_fw_options(ha, ha->fw_options) != 1460 if (qla2x00_get_fw_options(vha, ha->fw_options) !=
1445 QLA_SUCCESS) { 1461 QLA_SUCCESS) {
1446 qla_printk(KERN_WARNING, ha, 1462 qla_printk(KERN_WARNING, ha,
1447 "Unable to update fw options (beacon on).\n"); 1463 "Unable to update fw options (beacon on).\n");
@@ -1469,16 +1485,17 @@ qla24xx_beacon_on(struct scsi_qla_host *ha)
1469} 1485}
1470 1486
1471int 1487int
1472qla24xx_beacon_off(struct scsi_qla_host *ha) 1488qla24xx_beacon_off(struct scsi_qla_host *vha)
1473{ 1489{
1474 uint32_t gpio_data; 1490 uint32_t gpio_data;
1475 unsigned long flags; 1491 unsigned long flags;
1492 struct qla_hw_data *ha = vha->hw;
1476 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1493 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1477 1494
1478 ha->beacon_blink_led = 0; 1495 ha->beacon_blink_led = 0;
1479 ha->beacon_color_state = QLA_LED_ALL_ON; 1496 ha->beacon_color_state = QLA_LED_ALL_ON;
1480 1497
1481 ha->isp_ops->beacon_blink(ha); /* Will flip to all off. */ 1498 ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
1482 1499
1483 /* Give control back to firmware. */ 1500 /* Give control back to firmware. */
1484 spin_lock_irqsave(&ha->hardware_lock, flags); 1501 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1492,13 +1509,13 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1492 1509
1493 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1510 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1494 1511
1495 if (qla2x00_set_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1512 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1496 qla_printk(KERN_WARNING, ha, 1513 qla_printk(KERN_WARNING, ha,
1497 "Unable to update fw options (beacon off).\n"); 1514 "Unable to update fw options (beacon off).\n");
1498 return QLA_FUNCTION_FAILED; 1515 return QLA_FUNCTION_FAILED;
1499 } 1516 }
1500 1517
1501 if (qla2x00_get_fw_options(ha, ha->fw_options) != QLA_SUCCESS) { 1518 if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
1502 qla_printk(KERN_WARNING, ha, 1519 qla_printk(KERN_WARNING, ha,
1503 "Unable to get fw options (beacon off).\n"); 1520 "Unable to get fw options (beacon off).\n");
1504 return QLA_FUNCTION_FAILED; 1521 return QLA_FUNCTION_FAILED;
@@ -1517,7 +1534,7 @@ qla24xx_beacon_off(struct scsi_qla_host *ha)
1517 * @ha: HA context 1534 * @ha: HA context
1518 */ 1535 */
1519static void 1536static void
1520qla2x00_flash_enable(scsi_qla_host_t *ha) 1537qla2x00_flash_enable(struct qla_hw_data *ha)
1521{ 1538{
1522 uint16_t data; 1539 uint16_t data;
1523 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1540 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1533,7 +1550,7 @@ qla2x00_flash_enable(scsi_qla_host_t *ha)
1533 * @ha: HA context 1550 * @ha: HA context
1534 */ 1551 */
1535static void 1552static void
1536qla2x00_flash_disable(scsi_qla_host_t *ha) 1553qla2x00_flash_disable(struct qla_hw_data *ha)
1537{ 1554{
1538 uint16_t data; 1555 uint16_t data;
1539 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1554,7 +1571,7 @@ qla2x00_flash_disable(scsi_qla_host_t *ha)
1554 * Returns the byte read from flash @addr. 1571 * Returns the byte read from flash @addr.
1555 */ 1572 */
1556static uint8_t 1573static uint8_t
1557qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr) 1574qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr)
1558{ 1575{
1559 uint16_t data; 1576 uint16_t data;
1560 uint16_t bank_select; 1577 uint16_t bank_select;
@@ -1615,7 +1632,7 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
1615 * @data: Data to write 1632 * @data: Data to write
1616 */ 1633 */
1617static void 1634static void
1618qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data) 1635qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data)
1619{ 1636{
1620 uint16_t bank_select; 1637 uint16_t bank_select;
1621 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1638 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
@@ -1678,7 +1695,7 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
1678 * Returns 0 on success, else non-zero. 1695 * Returns 0 on success, else non-zero.
1679 */ 1696 */
1680static int 1697static int
1681qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data, 1698qla2x00_poll_flash(struct qla_hw_data *ha, uint32_t addr, uint8_t poll_data,
1682 uint8_t man_id, uint8_t flash_id) 1699 uint8_t man_id, uint8_t flash_id)
1683{ 1700{
1684 int status; 1701 int status;
@@ -1718,8 +1735,8 @@ qla2x00_poll_flash(scsi_qla_host_t *ha, uint32_t addr, uint8_t poll_data,
1718 * Returns 0 on success, else non-zero. 1735 * Returns 0 on success, else non-zero.
1719 */ 1736 */
1720static int 1737static int
1721qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data, 1738qla2x00_program_flash_address(struct qla_hw_data *ha, uint32_t addr,
1722 uint8_t man_id, uint8_t flash_id) 1739 uint8_t data, uint8_t man_id, uint8_t flash_id)
1723{ 1740{
1724 /* Write Program Command Sequence. */ 1741 /* Write Program Command Sequence. */
1725 if (IS_OEM_001(ha)) { 1742 if (IS_OEM_001(ha)) {
@@ -1755,7 +1772,7 @@ qla2x00_program_flash_address(scsi_qla_host_t *ha, uint32_t addr, uint8_t data,
1755 * Returns 0 on success, else non-zero. 1772 * Returns 0 on success, else non-zero.
1756 */ 1773 */
1757static int 1774static int
1758qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id) 1775qla2x00_erase_flash(struct qla_hw_data *ha, uint8_t man_id, uint8_t flash_id)
1759{ 1776{
1760 /* Individual Sector Erase Command Sequence */ 1777 /* Individual Sector Erase Command Sequence */
1761 if (IS_OEM_001(ha)) { 1778 if (IS_OEM_001(ha)) {
@@ -1791,7 +1808,7 @@ qla2x00_erase_flash(scsi_qla_host_t *ha, uint8_t man_id, uint8_t flash_id)
1791 * Returns 0 on success, else non-zero. 1808 * Returns 0 on success, else non-zero.
1792 */ 1809 */
1793static int 1810static int
1794qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr, 1811qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
1795 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id) 1812 uint32_t sec_mask, uint8_t man_id, uint8_t flash_id)
1796{ 1813{
1797 /* Individual Sector Erase Command Sequence */ 1814 /* Individual Sector Erase Command Sequence */
@@ -1817,7 +1834,7 @@ qla2x00_erase_flash_sector(scsi_qla_host_t *ha, uint32_t addr,
1817 * @flash_id: Flash ID 1834 * @flash_id: Flash ID
1818 */ 1835 */
1819static void 1836static void
1820qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id, 1837qla2x00_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id,
1821 uint8_t *flash_id) 1838 uint8_t *flash_id)
1822{ 1839{
1823 qla2x00_write_flash_byte(ha, 0x5555, 0xaa); 1840 qla2x00_write_flash_byte(ha, 0x5555, 0xaa);
@@ -1831,8 +1848,8 @@ qla2x00_get_flash_manufacturer(scsi_qla_host_t *ha, uint8_t *man_id,
1831} 1848}
1832 1849
1833static void 1850static void
1834qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr, 1851qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf,
1835 uint32_t length) 1852 uint32_t saddr, uint32_t length)
1836{ 1853{
1837 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1854 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1838 uint32_t midpoint, ilength; 1855 uint32_t midpoint, ilength;
@@ -1856,14 +1873,15 @@ qla2x00_read_flash_data(scsi_qla_host_t *ha, uint8_t *tmp_buf, uint32_t saddr,
1856} 1873}
1857 1874
1858static inline void 1875static inline void
1859qla2x00_suspend_hba(struct scsi_qla_host *ha) 1876qla2x00_suspend_hba(struct scsi_qla_host *vha)
1860{ 1877{
1861 int cnt; 1878 int cnt;
1862 unsigned long flags; 1879 unsigned long flags;
1880 struct qla_hw_data *ha = vha->hw;
1863 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1881 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1864 1882
1865 /* Suspend HBA. */ 1883 /* Suspend HBA. */
1866 scsi_block_requests(ha->host); 1884 scsi_block_requests(vha->host);
1867 ha->isp_ops->disable_intrs(ha); 1885 ha->isp_ops->disable_intrs(ha);
1868 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1886 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1869 1887
@@ -1884,26 +1902,29 @@ qla2x00_suspend_hba(struct scsi_qla_host *ha)
1884} 1902}
1885 1903
1886static inline void 1904static inline void
1887qla2x00_resume_hba(struct scsi_qla_host *ha) 1905qla2x00_resume_hba(struct scsi_qla_host *vha)
1888{ 1906{
1907 struct qla_hw_data *ha = vha->hw;
1908
1889 /* Resume HBA. */ 1909 /* Resume HBA. */
1890 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 1910 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
1891 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1911 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1892 qla2xxx_wake_dpc(ha); 1912 qla2xxx_wake_dpc(vha);
1893 qla2x00_wait_for_hba_online(ha); 1913 qla2x00_wait_for_hba_online(vha);
1894 scsi_unblock_requests(ha->host); 1914 scsi_unblock_requests(vha->host);
1895} 1915}
1896 1916
1897uint8_t * 1917uint8_t *
1898qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1918qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
1899 uint32_t offset, uint32_t length) 1919 uint32_t offset, uint32_t length)
1900{ 1920{
1901 uint32_t addr, midpoint; 1921 uint32_t addr, midpoint;
1902 uint8_t *data; 1922 uint8_t *data;
1923 struct qla_hw_data *ha = vha->hw;
1903 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1924 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1904 1925
1905 /* Suspend HBA. */ 1926 /* Suspend HBA. */
1906 qla2x00_suspend_hba(ha); 1927 qla2x00_suspend_hba(vha);
1907 1928
1908 /* Go with read. */ 1929 /* Go with read. */
1909 midpoint = ha->optrom_size / 2; 1930 midpoint = ha->optrom_size / 2;
@@ -1922,13 +1943,13 @@ qla2x00_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1922 qla2x00_flash_disable(ha); 1943 qla2x00_flash_disable(ha);
1923 1944
1924 /* Resume HBA. */ 1945 /* Resume HBA. */
1925 qla2x00_resume_hba(ha); 1946 qla2x00_resume_hba(vha);
1926 1947
1927 return buf; 1948 return buf;
1928} 1949}
1929 1950
1930int 1951int
1931qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 1952qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
1932 uint32_t offset, uint32_t length) 1953 uint32_t offset, uint32_t length)
1933{ 1954{
1934 1955
@@ -1936,10 +1957,11 @@ qla2x00_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
1936 uint8_t man_id, flash_id, sec_number, data; 1957 uint8_t man_id, flash_id, sec_number, data;
1937 uint16_t wd; 1958 uint16_t wd;
1938 uint32_t addr, liter, sec_mask, rest_addr; 1959 uint32_t addr, liter, sec_mask, rest_addr;
1960 struct qla_hw_data *ha = vha->hw;
1939 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1961 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1940 1962
1941 /* Suspend HBA. */ 1963 /* Suspend HBA. */
1942 qla2x00_suspend_hba(ha); 1964 qla2x00_suspend_hba(vha);
1943 1965
1944 rval = QLA_SUCCESS; 1966 rval = QLA_SUCCESS;
1945 sec_number = 0; 1967 sec_number = 0;
@@ -2139,55 +2161,58 @@ update_flash:
2139 qla2x00_flash_disable(ha); 2161 qla2x00_flash_disable(ha);
2140 2162
2141 /* Resume HBA. */ 2163 /* Resume HBA. */
2142 qla2x00_resume_hba(ha); 2164 qla2x00_resume_hba(vha);
2143 2165
2144 return rval; 2166 return rval;
2145} 2167}
2146 2168
2147uint8_t * 2169uint8_t *
2148qla24xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2170qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2149 uint32_t offset, uint32_t length) 2171 uint32_t offset, uint32_t length)
2150{ 2172{
2173 struct qla_hw_data *ha = vha->hw;
2174
2151 /* Suspend HBA. */ 2175 /* Suspend HBA. */
2152 scsi_block_requests(ha->host); 2176 scsi_block_requests(vha->host);
2153 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2177 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2154 2178
2155 /* Go with read. */ 2179 /* Go with read. */
2156 qla24xx_read_flash_data(ha, (uint32_t *)buf, offset >> 2, length >> 2); 2180 qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2);
2157 2181
2158 /* Resume HBA. */ 2182 /* Resume HBA. */
2159 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2183 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2160 scsi_unblock_requests(ha->host); 2184 scsi_unblock_requests(vha->host);
2161 2185
2162 return buf; 2186 return buf;
2163} 2187}
2164 2188
2165int 2189int
2166qla24xx_write_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2190qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2167 uint32_t offset, uint32_t length) 2191 uint32_t offset, uint32_t length)
2168{ 2192{
2169 int rval; 2193 int rval;
2194 struct qla_hw_data *ha = vha->hw;
2170 2195
2171 /* Suspend HBA. */ 2196 /* Suspend HBA. */
2172 scsi_block_requests(ha->host); 2197 scsi_block_requests(vha->host);
2173 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2198 set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2174 2199
2175 /* Go with write. */ 2200 /* Go with write. */
2176 rval = qla24xx_write_flash_data(ha, (uint32_t *)buf, offset >> 2, 2201 rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2,
2177 length >> 2); 2202 length >> 2);
2178 2203
2179 /* Resume HBA -- RISC reset needed. */ 2204 /* Resume HBA -- RISC reset needed. */
2180 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); 2205 clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags);
2181 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 2206 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2182 qla2xxx_wake_dpc(ha); 2207 qla2xxx_wake_dpc(vha);
2183 qla2x00_wait_for_hba_online(ha); 2208 qla2x00_wait_for_hba_online(vha);
2184 scsi_unblock_requests(ha->host); 2209 scsi_unblock_requests(vha->host);
2185 2210
2186 return rval; 2211 return rval;
2187} 2212}
2188 2213
2189uint8_t * 2214uint8_t *
2190qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf, 2215qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2191 uint32_t offset, uint32_t length) 2216 uint32_t offset, uint32_t length)
2192{ 2217{
2193 int rval; 2218 int rval;
@@ -2195,6 +2220,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2195 void *optrom; 2220 void *optrom;
2196 uint8_t *pbuf; 2221 uint8_t *pbuf;
2197 uint32_t faddr, left, burst; 2222 uint32_t faddr, left, burst;
2223 struct qla_hw_data *ha = vha->hw;
2198 2224
2199 if (offset & 0xfff) 2225 if (offset & 0xfff)
2200 goto slow_read; 2226 goto slow_read;
@@ -2219,7 +2245,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2219 if (burst > left) 2245 if (burst > left)
2220 burst = left; 2246 burst = left;
2221 2247
2222 rval = qla2x00_dump_ram(ha, optrom_dma, 2248 rval = qla2x00_dump_ram(vha, optrom_dma,
2223 flash_data_to_access_addr(faddr), burst); 2249 flash_data_to_access_addr(faddr), burst);
2224 if (rval) { 2250 if (rval) {
2225 qla_printk(KERN_WARNING, ha, 2251 qla_printk(KERN_WARNING, ha,
@@ -2248,7 +2274,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
2248 return buf; 2274 return buf;
2249 2275
2250slow_read: 2276slow_read:
2251 return qla24xx_read_optrom_data(ha, buf, offset, length); 2277 return qla24xx_read_optrom_data(vha, buf, offset, length);
2252} 2278}
2253 2279
2254/** 2280/**
@@ -2270,7 +2296,7 @@ slow_read:
2270 * Returns QLA_SUCCESS on successful retrieval of version. 2296 * Returns QLA_SUCCESS on successful retrieval of version.
2271 */ 2297 */
2272static void 2298static void
2273qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids) 2299qla2x00_get_fcode_version(struct qla_hw_data *ha, uint32_t pcids)
2274{ 2300{
2275 int ret = QLA_FUNCTION_FAILED; 2301 int ret = QLA_FUNCTION_FAILED;
2276 uint32_t istart, iend, iter, vend; 2302 uint32_t istart, iend, iter, vend;
@@ -2344,13 +2370,14 @@ qla2x00_get_fcode_version(scsi_qla_host_t *ha, uint32_t pcids)
2344} 2370}
2345 2371
2346int 2372int
2347qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf) 2373qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2348{ 2374{
2349 int ret = QLA_SUCCESS; 2375 int ret = QLA_SUCCESS;
2350 uint8_t code_type, last_image; 2376 uint8_t code_type, last_image;
2351 uint32_t pcihdr, pcids; 2377 uint32_t pcihdr, pcids;
2352 uint8_t *dbyte; 2378 uint8_t *dbyte;
2353 uint16_t *dcode; 2379 uint16_t *dcode;
2380 struct qla_hw_data *ha = vha->hw;
2354 2381
2355 if (!ha->pio_address || !mbuf) 2382 if (!ha->pio_address || !mbuf)
2356 return QLA_FUNCTION_FAILED; 2383 return QLA_FUNCTION_FAILED;
@@ -2370,8 +2397,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2370 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 || 2397 if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
2371 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) { 2398 qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
2372 /* No signature */ 2399 /* No signature */
2373 DEBUG2(printk("scsi(%ld): No matching ROM " 2400 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
2374 "signature.\n", ha->host_no)); 2401 "signature.\n"));
2375 ret = QLA_FUNCTION_FAILED; 2402 ret = QLA_FUNCTION_FAILED;
2376 break; 2403 break;
2377 } 2404 }
@@ -2387,8 +2414,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2387 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' || 2414 qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
2388 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') { 2415 qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
2389 /* Incorrect header. */ 2416 /* Incorrect header. */
2390 DEBUG2(printk("%s(): PCI data struct not found " 2417 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
2391 "pcir_adr=%x.\n", __func__, pcids)); 2418 "found pcir_adr=%x.\n", pcids));
2392 ret = QLA_FUNCTION_FAILED; 2419 ret = QLA_FUNCTION_FAILED;
2393 break; 2420 break;
2394 } 2421 }
@@ -2402,7 +2429,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2402 qla2x00_read_flash_byte(ha, pcids + 0x12); 2429 qla2x00_read_flash_byte(ha, pcids + 0x12);
2403 ha->bios_revision[1] = 2430 ha->bios_revision[1] =
2404 qla2x00_read_flash_byte(ha, pcids + 0x13); 2431 qla2x00_read_flash_byte(ha, pcids + 0x13);
2405 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, 2432 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
2406 ha->bios_revision[1], ha->bios_revision[0])); 2433 ha->bios_revision[1], ha->bios_revision[0]));
2407 break; 2434 break;
2408 case ROM_CODE_TYPE_FCODE: 2435 case ROM_CODE_TYPE_FCODE:
@@ -2416,12 +2443,12 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2416 qla2x00_read_flash_byte(ha, pcids + 0x12); 2443 qla2x00_read_flash_byte(ha, pcids + 0x12);
2417 ha->efi_revision[1] = 2444 ha->efi_revision[1] =
2418 qla2x00_read_flash_byte(ha, pcids + 0x13); 2445 qla2x00_read_flash_byte(ha, pcids + 0x13);
2419 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, 2446 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
2420 ha->efi_revision[1], ha->efi_revision[0])); 2447 ha->efi_revision[1], ha->efi_revision[0]));
2421 break; 2448 break;
2422 default: 2449 default:
2423 DEBUG2(printk("%s(): Unrecognized code type %x at " 2450 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
2424 "pcids %x.\n", __func__, code_type, pcids)); 2451 "type %x at pcids %x.\n", code_type, pcids));
2425 break; 2452 break;
2426 } 2453 }
2427 2454
@@ -2441,16 +2468,16 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2441 2468
2442 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10, 2469 qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
2443 8); 2470 8);
2444 DEBUG3(printk("%s(%ld): dumping fw ver from flash:\n", 2471 DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
2445 __func__, ha->host_no)); 2472 "flash:\n"));
2446 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8)); 2473 DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
2447 2474
2448 if ((dcode[0] == 0xffff && dcode[1] == 0xffff && 2475 if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
2449 dcode[2] == 0xffff && dcode[3] == 0xffff) || 2476 dcode[2] == 0xffff && dcode[3] == 0xffff) ||
2450 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2477 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2451 dcode[3] == 0)) { 2478 dcode[3] == 0)) {
2452 DEBUG2(printk("%s(): Unrecognized fw revision at " 2479 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
2453 "%x.\n", __func__, ha->flt_region_fw * 4)); 2480 "revision at %x.\n", ha->flt_region_fw * 4));
2454 } else { 2481 } else {
2455 /* values are in big endian */ 2482 /* values are in big endian */
2456 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1]; 2483 ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
@@ -2465,7 +2492,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2465} 2492}
2466 2493
2467int 2494int
2468qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf) 2495qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
2469{ 2496{
2470 int ret = QLA_SUCCESS; 2497 int ret = QLA_SUCCESS;
2471 uint32_t pcihdr, pcids; 2498 uint32_t pcihdr, pcids;
@@ -2473,6 +2500,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2473 uint8_t *bcode; 2500 uint8_t *bcode;
2474 uint8_t code_type, last_image; 2501 uint8_t code_type, last_image;
2475 int i; 2502 int i;
2503 struct qla_hw_data *ha = vha->hw;
2476 2504
2477 if (!mbuf) 2505 if (!mbuf)
2478 return QLA_FUNCTION_FAILED; 2506 return QLA_FUNCTION_FAILED;
@@ -2489,12 +2517,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2489 last_image = 1; 2517 last_image = 1;
2490 do { 2518 do {
2491 /* Verify PCI expansion ROM header. */ 2519 /* Verify PCI expansion ROM header. */
2492 qla24xx_read_flash_data(ha, dcode, pcihdr >> 2, 0x20); 2520 qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20);
2493 bcode = mbuf + (pcihdr % 4); 2521 bcode = mbuf + (pcihdr % 4);
2494 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { 2522 if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
2495 /* No signature */ 2523 /* No signature */
2496 DEBUG2(printk("scsi(%ld): No matching ROM " 2524 DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
2497 "signature.\n", ha->host_no)); 2525 "signature.\n"));
2498 ret = QLA_FUNCTION_FAILED; 2526 ret = QLA_FUNCTION_FAILED;
2499 break; 2527 break;
2500 } 2528 }
@@ -2502,15 +2530,15 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2502 /* Locate PCI data structure. */ 2530 /* Locate PCI data structure. */
2503 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); 2531 pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
2504 2532
2505 qla24xx_read_flash_data(ha, dcode, pcids >> 2, 0x20); 2533 qla24xx_read_flash_data(vha, dcode, pcids >> 2, 0x20);
2506 bcode = mbuf + (pcihdr % 4); 2534 bcode = mbuf + (pcihdr % 4);
2507 2535
2508 /* Validate signature of PCI data structure. */ 2536 /* Validate signature of PCI data structure. */
2509 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || 2537 if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
2510 bcode[0x2] != 'I' || bcode[0x3] != 'R') { 2538 bcode[0x2] != 'I' || bcode[0x3] != 'R') {
2511 /* Incorrect header. */ 2539 /* Incorrect header. */
2512 DEBUG2(printk("%s(): PCI data struct not found " 2540 DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
2513 "pcir_adr=%x.\n", __func__, pcids)); 2541 "found pcir_adr=%x.\n", pcids));
2514 ret = QLA_FUNCTION_FAILED; 2542 ret = QLA_FUNCTION_FAILED;
2515 break; 2543 break;
2516 } 2544 }
@@ -2522,26 +2550,26 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2522 /* Intel x86, PC-AT compatible. */ 2550 /* Intel x86, PC-AT compatible. */
2523 ha->bios_revision[0] = bcode[0x12]; 2551 ha->bios_revision[0] = bcode[0x12];
2524 ha->bios_revision[1] = bcode[0x13]; 2552 ha->bios_revision[1] = bcode[0x13];
2525 DEBUG3(printk("%s(): read BIOS %d.%d.\n", __func__, 2553 DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
2526 ha->bios_revision[1], ha->bios_revision[0])); 2554 ha->bios_revision[1], ha->bios_revision[0]));
2527 break; 2555 break;
2528 case ROM_CODE_TYPE_FCODE: 2556 case ROM_CODE_TYPE_FCODE:
2529 /* Open Firmware standard for PCI (FCode). */ 2557 /* Open Firmware standard for PCI (FCode). */
2530 ha->fcode_revision[0] = bcode[0x12]; 2558 ha->fcode_revision[0] = bcode[0x12];
2531 ha->fcode_revision[1] = bcode[0x13]; 2559 ha->fcode_revision[1] = bcode[0x13];
2532 DEBUG3(printk("%s(): read FCODE %d.%d.\n", __func__, 2560 DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
2533 ha->fcode_revision[1], ha->fcode_revision[0])); 2561 ha->fcode_revision[1], ha->fcode_revision[0]));
2534 break; 2562 break;
2535 case ROM_CODE_TYPE_EFI: 2563 case ROM_CODE_TYPE_EFI:
2536 /* Extensible Firmware Interface (EFI). */ 2564 /* Extensible Firmware Interface (EFI). */
2537 ha->efi_revision[0] = bcode[0x12]; 2565 ha->efi_revision[0] = bcode[0x12];
2538 ha->efi_revision[1] = bcode[0x13]; 2566 ha->efi_revision[1] = bcode[0x13];
2539 DEBUG3(printk("%s(): read EFI %d.%d.\n", __func__, 2567 DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
2540 ha->efi_revision[1], ha->efi_revision[0])); 2568 ha->efi_revision[1], ha->efi_revision[0]));
2541 break; 2569 break;
2542 default: 2570 default:
2543 DEBUG2(printk("%s(): Unrecognized code type %x at " 2571 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
2544 "pcids %x.\n", __func__, code_type, pcids)); 2572 "type %x at pcids %x.\n", code_type, pcids));
2545 break; 2573 break;
2546 } 2574 }
2547 2575
@@ -2555,7 +2583,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2555 memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); 2583 memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
2556 dcode = mbuf; 2584 dcode = mbuf;
2557 2585
2558 qla24xx_read_flash_data(ha, dcode, ha->flt_region_fw + 4, 4); 2586 qla24xx_read_flash_data(vha, dcode, ha->flt_region_fw + 4, 4);
2559 for (i = 0; i < 4; i++) 2587 for (i = 0; i < 4; i++)
2560 dcode[i] = be32_to_cpu(dcode[i]); 2588 dcode[i] = be32_to_cpu(dcode[i]);
2561 2589
@@ -2563,8 +2591,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2563 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || 2591 dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
2564 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && 2592 (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
2565 dcode[3] == 0)) { 2593 dcode[3] == 0)) {
2566 DEBUG2(printk("%s(): Unrecognized fw version at %x.\n", 2594 DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
2567 __func__, ha->flt_region_fw)); 2595 "revision at %x.\n", ha->flt_region_fw * 4));
2568 } else { 2596 } else {
2569 ha->fw_revision[0] = dcode[0]; 2597 ha->fw_revision[0] = dcode[0];
2570 ha->fw_revision[1] = dcode[1]; 2598 ha->fw_revision[1] = dcode[1];
@@ -2593,8 +2621,9 @@ qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
2593} 2621}
2594 2622
2595int 2623int
2596qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size) 2624qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
2597{ 2625{
2626 struct qla_hw_data *ha = vha->hw;
2598 uint8_t *pos = ha->vpd; 2627 uint8_t *pos = ha->vpd;
2599 uint8_t *end = pos + ha->vpd_size; 2628 uint8_t *end = pos + ha->vpd_size;
2600 int len = 0; 2629 int len = 0;
@@ -2621,9 +2650,10 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
2621} 2650}
2622 2651
2623static int 2652static int
2624qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) 2653qla2xxx_hw_event_store(scsi_qla_host_t *vha, uint32_t *fdata)
2625{ 2654{
2626 uint32_t d[2], faddr; 2655 uint32_t d[2], faddr;
2656 struct qla_hw_data *ha = vha->hw;
2627 2657
2628 /* Locate first empty entry. */ 2658 /* Locate first empty entry. */
2629 for (;;) { 2659 for (;;) {
@@ -2634,7 +2664,7 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2634 return QLA_MEMORY_ALLOC_FAILED; 2664 return QLA_MEMORY_ALLOC_FAILED;
2635 } 2665 }
2636 2666
2637 qla24xx_read_flash_data(ha, d, ha->hw_event_ptr, 2); 2667 qla24xx_read_flash_data(vha, d, ha->hw_event_ptr, 2);
2638 faddr = flash_data_to_access_addr(ha->hw_event_ptr); 2668 faddr = flash_data_to_access_addr(ha->hw_event_ptr);
2639 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE; 2669 ha->hw_event_ptr += FA_HW_EVENT_ENTRY_SIZE;
2640 if (d[0] == __constant_cpu_to_le32(0xffffffff) && 2670 if (d[0] == __constant_cpu_to_le32(0xffffffff) &&
@@ -2655,12 +2685,12 @@ qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2655} 2685}
2656 2686
2657int 2687int
2658qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1, 2688qla2xxx_hw_event_log(scsi_qla_host_t *vha, uint16_t code, uint16_t d1,
2659 uint16_t d2, uint16_t d3) 2689 uint16_t d2, uint16_t d3)
2660{ 2690{
2661#define QMARK(a, b, c, d) \ 2691#define QMARK(a, b, c, d) \
2662 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d)) 2692 cpu_to_le32(LSB(a) << 24 | LSB(b) << 16 | LSB(c) << 8 | LSB(d))
2663 2693 struct qla_hw_data *ha = vha->hw;
2664 int rval; 2694 int rval;
2665 uint32_t marker[2], fdata[4]; 2695 uint32_t marker[2], fdata[4];
2666 2696
@@ -2681,7 +2711,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2681 /* Locate marker. */ 2711 /* Locate marker. */
2682 ha->hw_event_ptr = ha->flt_region_hw_event; 2712 ha->hw_event_ptr = ha->flt_region_hw_event;
2683 for (;;) { 2713 for (;;) {
2684 qla24xx_read_flash_data(ha, fdata, ha->hw_event_ptr, 2714 qla24xx_read_flash_data(vha, fdata, ha->hw_event_ptr,
2685 4); 2715 4);
2686 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) && 2716 if (fdata[0] == __constant_cpu_to_le32(0xffffffff) &&
2687 fdata[1] == __constant_cpu_to_le32(0xffffffff)) 2717 fdata[1] == __constant_cpu_to_le32(0xffffffff))
@@ -2700,7 +2730,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2700 } 2730 }
2701 /* No marker, write it. */ 2731 /* No marker, write it. */
2702 if (!ha->flags.hw_event_marker_found) { 2732 if (!ha->flags.hw_event_marker_found) {
2703 rval = qla2xxx_hw_event_store(ha, marker); 2733 rval = qla2xxx_hw_event_store(vha, marker);
2704 if (rval != QLA_SUCCESS) { 2734 if (rval != QLA_SUCCESS) {
2705 DEBUG2(qla_printk(KERN_WARNING, ha, 2735 DEBUG2(qla_printk(KERN_WARNING, ha,
2706 "HW event -- Failed marker write=%x.!\n", 2736 "HW event -- Failed marker write=%x.!\n",
@@ -2714,7 +2744,7 @@ qla2xxx_hw_event_log(scsi_qla_host_t *ha, uint16_t code, uint16_t d1,
2714 /* Store error. */ 2744 /* Store error. */
2715 fdata[0] = cpu_to_le32(code << 16 | d1); 2745 fdata[0] = cpu_to_le32(code << 16 | d1);
2716 fdata[1] = cpu_to_le32(d2 << 16 | d3); 2746 fdata[1] = cpu_to_le32(d2 << 16 | d3);
2717 rval = qla2xxx_hw_event_store(ha, fdata); 2747 rval = qla2xxx_hw_event_store(vha, fdata);
2718 if (rval != QLA_SUCCESS) { 2748 if (rval != QLA_SUCCESS) {
2719 DEBUG2(qla_printk(KERN_WARNING, ha, 2749 DEBUG2(qla_printk(KERN_WARNING, ha,
2720 "HW event -- Failed error write=%x.!\n", 2750 "HW event -- Failed error write=%x.!\n",
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index eea6720adf16..be22f3a09f8d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k9" 10#define QLA2XXX_VERSION "8.02.03-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
14#define QLA_DRIVER_PATCH_VER 1 14#define QLA_DRIVER_PATCH_VER 3
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0