aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:51:54 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-15 19:51:54 -0400
commitbc06cffdec85d487c77109dffcd2f285bdc502d3 (patch)
treeadc6e6398243da87e66c56102840597a329183a0 /drivers/scsi
parentd3502d7f25b22cfc9762bf1781faa9db1bb3be2e (diff)
parent9413d7b8aa777dd1fc7db9563ce5e80d769fe7b5 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (166 commits) [SCSI] ibmvscsi: convert to use the data buffer accessors [SCSI] dc395x: convert to use the data buffer accessors [SCSI] ncr53c8xx: convert to use the data buffer accessors [SCSI] sym53c8xx: convert to use the data buffer accessors [SCSI] ppa: coding police and printk levels [SCSI] aic7xxx_old: remove redundant GFP_ATOMIC from kmalloc [SCSI] i2o: remove redundant GFP_ATOMIC from kmalloc from device.c [SCSI] remove the dead CYBERSTORMIII_SCSI option [SCSI] don't build scsi_dma_{map,unmap} for !HAS_DMA [SCSI] Clean up scsi_add_lun a bit [SCSI] 53c700: Remove printk, which triggers because of low scsi clock on SNI RMs [SCSI] sni_53c710: Cleanup [SCSI] qla4xxx: Fix underrun/overrun conditions [SCSI] megaraid_mbox: use mutex instead of semaphore [SCSI] aacraid: add 51245, 51645 and 52245 adapters to documentation. [SCSI] qla2xxx: update version to 8.02.00-k1. [SCSI] qla2xxx: add support for NPIV [SCSI] stex: use resid for xfer len information [SCSI] Add Brownie 1200U3P to blacklist [SCSI] scsi.c: convert to use the data buffer accessors ...
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c135
-rw-r--r--drivers/scsi/3w-xxxx.c104
-rw-r--r--drivers/scsi/53c700.c77
-rw-r--r--drivers/scsi/53c700.h5
-rw-r--r--drivers/scsi/53c7xx.c6102
-rw-r--r--drivers/scsi/53c7xx.h1608
-rw-r--r--drivers/scsi/53c7xx.scr1591
-rw-r--r--drivers/scsi/53c7xx_d.h_shipped2874
-rw-r--r--drivers/scsi/53c7xx_u.h_shipped102
-rw-r--r--drivers/scsi/BusLogic.c51
-rw-r--r--drivers/scsi/Kconfig56
-rw-r--r--drivers/scsi/Makefile23
-rw-r--r--drivers/scsi/NCR5380.c14
-rw-r--r--drivers/scsi/NCR5380.h6
-rw-r--r--drivers/scsi/NCR53c406a.c45
-rw-r--r--drivers/scsi/a100u2w.c1239
-rw-r--r--drivers/scsi/a100u2w.h297
-rw-r--r--drivers/scsi/a4000t.c143
-rw-r--r--drivers/scsi/aacraid/aachba.c322
-rw-r--r--drivers/scsi/aacraid/aacraid.h40
-rw-r--r--drivers/scsi/aacraid/commsup.c210
-rw-r--r--drivers/scsi/aacraid/linit.c104
-rw-r--r--drivers/scsi/aacraid/rx.c33
-rw-r--r--drivers/scsi/advansys.c101
-rw-r--r--drivers/scsi/advansys.h36
-rw-r--r--drivers/scsi/aha152x.c50
-rw-r--r--drivers/scsi/aha1740.c48
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c51
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c59
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx_old.c57
-rw-r--r--drivers/scsi/amiga7xx.c138
-rw-r--r--drivers/scsi/amiga7xx.h23
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c559
-rw-r--r--drivers/scsi/bvme6000.c76
-rw-r--r--drivers/scsi/bvme6000.h24
-rw-r--r--drivers/scsi/bvme6000_scsi.c135
-rw-r--r--drivers/scsi/dc395x.c163
-rw-r--r--drivers/scsi/dpt_i2o.c33
-rw-r--r--drivers/scsi/eata.c48
-rw-r--r--drivers/scsi/esp_scsi.c30
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fdomain.c70
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/hptiop.c76
-rw-r--r--drivers/scsi/ibmmca.c1267
-rw-r--r--drivers/scsi/ibmmca.h21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c463
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c20
-rw-r--r--drivers/scsi/initio.c3819
-rw-r--r--drivers/scsi/initio.h313
-rw-r--r--drivers/scsi/ipr.c144
-rw-r--r--drivers/scsi/ips.c401
-rw-r--r--drivers/scsi/ips.h44
-rw-r--r--drivers/scsi/iscsi_tcp.c606
-rw-r--r--drivers/scsi/iscsi_tcp.h9
-rw-r--r--drivers/scsi/jazz_esp.c4
-rw-r--r--drivers/scsi/libiscsi.c650
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c10
-rw-r--r--drivers/scsi/lpfc/Makefile5
-rw-r--r--drivers/scsi/lpfc/lpfc.h358
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c760
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h182
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c971
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c508
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h50
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3377
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2262
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h558
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c948
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c306
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c1325
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c557
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2047
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h47
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c523
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h113
-rw-r--r--drivers/scsi/mac53c94.c62
-rw-r--r--drivers/scsi/megaraid.c141
-rw-r--r--drivers/scsi/megaraid/mega_common.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c171
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c112
-rw-r--r--drivers/scsi/mesh.c46
-rw-r--r--drivers/scsi/mvme16x.c78
-rw-r--r--drivers/scsi/mvme16x.h24
-rw-r--r--drivers/scsi/mvme16x_scsi.c158
-rw-r--r--drivers/scsi/ncr53c8xx.c70
-rw-r--r--drivers/scsi/nsp32.c194
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c42
-rw-r--r--drivers/scsi/ppa.c57
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c164
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h83
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h91
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c166
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c242
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c396
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c497
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c237
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c174
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h78
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h426
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c105
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c101
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c114
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c274
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c96
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicfas408.c30
-rw-r--r--drivers/scsi/scsi.c48
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_lib_dma.c50
-rw-r--r--drivers/scsi/scsi_scan.c67
-rw-r--r--drivers/scsi/scsi_sysfs.c25
-rw-r--r--drivers/scsi/scsi_transport_fc.c831
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c138
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/sni_53c710.c10
-rw-r--r--drivers/scsi/sr.c6
-rw-r--r--drivers/scsi/stex.c111
-rw-r--r--drivers/scsi/sun_esp.c2
-rw-r--r--drivers/scsi/sym53c416.c44
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c83
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h2
-rw-r--r--drivers/scsi/tmscsim.c85
-rw-r--r--drivers/scsi/tmscsim.h10
-rw-r--r--drivers/scsi/u14-34f.c60
-rw-r--r--drivers/scsi/ultrastor.c19
-rw-r--r--drivers/scsi/wd7000.c20
-rw-r--r--drivers/scsi/zorro7xx.c180
150 files changed, 20562 insertions, 25626 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index eb766c3af1c8..76c09097175f 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1306,22 +1306,26 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1306 wake_up(&tw_dev->ioctl_wqueue); 1306 wake_up(&tw_dev->ioctl_wqueue);
1307 } 1307 }
1308 } else { 1308 } else {
1309 struct scsi_cmnd *cmd;
1310
1311 cmd = tw_dev->srb[request_id];
1312
1309 twa_scsiop_execute_scsi_complete(tw_dev, request_id); 1313 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1310 /* If no error command was a success */ 1314 /* If no error command was a success */
1311 if (error == 0) { 1315 if (error == 0) {
1312 tw_dev->srb[request_id]->result = (DID_OK << 16); 1316 cmd->result = (DID_OK << 16);
1313 } 1317 }
1314 1318
1315 /* If error, command failed */ 1319 /* If error, command failed */
1316 if (error == 1) { 1320 if (error == 1) {
1317 /* Ask for a host reset */ 1321 /* Ask for a host reset */
1318 tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 1322 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1319 } 1323 }
1320 1324
1321 /* Report residual bytes for single sgl */ 1325 /* Report residual bytes for single sgl */
1322 if ((tw_dev->srb[request_id]->use_sg <= 1) && (full_command_packet->command.newcommand.status == 0)) { 1326 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1323 if (full_command_packet->command.newcommand.sg_list[0].length < tw_dev->srb[request_id]->request_bufflen) 1327 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1324 tw_dev->srb[request_id]->resid = tw_dev->srb[request_id]->request_bufflen - full_command_packet->command.newcommand.sg_list[0].length; 1328 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1325 } 1329 }
1326 1330
1327 /* Now complete the io */ 1331 /* Now complete the io */
@@ -1384,52 +1388,20 @@ static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1384{ 1388{
1385 int use_sg; 1389 int use_sg;
1386 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1390 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1387 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1388 int retval = 0;
1389
1390 if (cmd->use_sg == 0)
1391 goto out;
1392
1393 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
1394 1391
1395 if (use_sg == 0) { 1392 use_sg = scsi_dma_map(cmd);
1393 if (!use_sg)
1394 return 0;
1395 else if (use_sg < 0) {
1396 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); 1396 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1397 goto out; 1397 return 0;
1398 } 1398 }
1399 1399
1400 cmd->SCp.phase = TW_PHASE_SGLIST; 1400 cmd->SCp.phase = TW_PHASE_SGLIST;
1401 cmd->SCp.have_data_in = use_sg; 1401 cmd->SCp.have_data_in = use_sg;
1402 retval = use_sg;
1403out:
1404 return retval;
1405} /* End twa_map_scsi_sg_data() */
1406
1407/* This function will perform a pci-dma map for a single buffer */
1408static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id)
1409{
1410 dma_addr_t mapping;
1411 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1412 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1413 dma_addr_t retval = 0;
1414
1415 if (cmd->request_bufflen == 0) {
1416 retval = 0;
1417 goto out;
1418 }
1419
1420 mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL);
1421
1422 if (mapping == 0) {
1423 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page");
1424 goto out;
1425 }
1426 1402
1427 cmd->SCp.phase = TW_PHASE_SINGLE; 1403 return use_sg;
1428 cmd->SCp.have_data_in = mapping; 1404} /* End twa_map_scsi_sg_data() */
1429 retval = mapping;
1430out:
1431 return retval;
1432} /* End twa_map_scsi_single_data() */
1433 1405
1434/* This function will poll for a response interrupt of a request */ 1406/* This function will poll for a response interrupt of a request */
1435static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1407static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
@@ -1815,15 +1787,13 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1815 u32 num_sectors = 0x0; 1787 u32 num_sectors = 0x0;
1816 int i, sg_count; 1788 int i, sg_count;
1817 struct scsi_cmnd *srb = NULL; 1789 struct scsi_cmnd *srb = NULL;
1818 struct scatterlist *sglist = NULL; 1790 struct scatterlist *sglist = NULL, *sg;
1819 dma_addr_t buffaddr = 0x0;
1820 int retval = 1; 1791 int retval = 1;
1821 1792
1822 if (tw_dev->srb[request_id]) { 1793 if (tw_dev->srb[request_id]) {
1823 if (tw_dev->srb[request_id]->request_buffer) {
1824 sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1825 }
1826 srb = tw_dev->srb[request_id]; 1794 srb = tw_dev->srb[request_id];
1795 if (scsi_sglist(srb))
1796 sglist = scsi_sglist(srb);
1827 } 1797 }
1828 1798
1829 /* Initialize command packet */ 1799 /* Initialize command packet */
@@ -1856,32 +1826,12 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1856 1826
1857 if (!sglistarg) { 1827 if (!sglistarg) {
1858 /* Map sglist from scsi layer to cmd packet */ 1828 /* Map sglist from scsi layer to cmd packet */
1859 if (tw_dev->srb[request_id]->use_sg == 0) {
1860 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
1861 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1862 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1863 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
1864 memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
1865 } else {
1866 buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
1867 if (buffaddr == 0)
1868 goto out;
1869
1870 command_packet->sg_list[0].address = TW_CPU_TO_SGL(buffaddr);
1871 command_packet->sg_list[0].length = cpu_to_le32(tw_dev->srb[request_id]->request_bufflen);
1872 }
1873 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), 1));
1874 1829
1875 if (command_packet->sg_list[0].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1830 if (scsi_sg_count(srb)) {
1876 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi"); 1831 if ((scsi_sg_count(srb) == 1) &&
1877 goto out; 1832 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1878 } 1833 if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
1879 } 1834 struct scatterlist *sg = scsi_sglist(srb);
1880
1881 if (tw_dev->srb[request_id]->use_sg > 0) {
1882 if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) {
1883 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) {
1884 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1885 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1835 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1886 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length); 1836 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
1887 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1837 kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -1893,16 +1843,16 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1893 if (sg_count == 0) 1843 if (sg_count == 0)
1894 goto out; 1844 goto out;
1895 1845
1896 for (i = 0; i < sg_count; i++) { 1846 scsi_for_each_sg(srb, sg, sg_count, i) {
1897 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(&sglist[i])); 1847 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1898 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(&sglist[i])); 1848 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1899 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1849 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); 1850 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1901 goto out; 1851 goto out;
1902 } 1852 }
1903 } 1853 }
1904 } 1854 }
1905 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), tw_dev->srb[request_id]->use_sg)); 1855 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1906 } 1856 }
1907 } else { 1857 } else {
1908 /* Internal cdb post */ 1858 /* Internal cdb post */
@@ -1932,7 +1882,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1932 1882
1933 /* Update SG statistics */ 1883 /* Update SG statistics */
1934 if (srb) { 1884 if (srb) {
1935 tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg; 1885 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1936 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1886 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1937 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1887 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1938 } 1888 }
@@ -1951,16 +1901,13 @@ out:
1951/* This function completes an execute scsi operation */ 1901/* This function completes an execute scsi operation */
1952static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) 1902static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1953{ 1903{
1954 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH && 1904 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1955 (tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE || 1905
1956 tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) { 1906 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1957 if (tw_dev->srb[request_id]->use_sg == 0) { 1907 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1958 memcpy(tw_dev->srb[request_id]->request_buffer, 1908 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1959 tw_dev->generic_buffer_virt[request_id], 1909 if (scsi_sg_count(cmd) == 1) {
1960 tw_dev->srb[request_id]->request_bufflen); 1910 struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]);
1961 }
1962 if (tw_dev->srb[request_id]->use_sg == 1) {
1963 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1964 char *buf; 1911 char *buf;
1965 unsigned long flags = 0; 1912 unsigned long flags = 0;
1966 local_irq_save(flags); 1913 local_irq_save(flags);
@@ -2017,16 +1964,8 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
2017static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) 1964static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
2018{ 1965{
2019 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1966 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2020 struct pci_dev *pdev = tw_dev->tw_pci_dev;
2021 1967
2022 switch(cmd->SCp.phase) { 1968 scsi_dma_unmap(cmd);
2023 case TW_PHASE_SINGLE:
2024 pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
2025 break;
2026 case TW_PHASE_SGLIST:
2027 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
2028 break;
2029 }
2030} /* End twa_unmap_scsi_data() */ 1969} /* End twa_unmap_scsi_data() */
2031 1970
2032/* scsi_host_template initializer */ 1971/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 656bdb1352d8..c7995fc216e8 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1273,57 +1273,24 @@ static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1273 int use_sg; 1273 int use_sg;
1274 1274
1275 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); 1275 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
1276
1277 if (cmd->use_sg == 0)
1278 return 0;
1279 1276
1280 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL); 1277 use_sg = scsi_dma_map(cmd);
1281 1278 if (use_sg < 0) {
1282 if (use_sg == 0) {
1283 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); 1279 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
1284 return 0; 1280 return 0;
1285 } 1281 }
1286 1282
1287 cmd->SCp.phase = TW_PHASE_SGLIST; 1283 cmd->SCp.phase = TW_PHASE_SGLIST;
1288 cmd->SCp.have_data_in = use_sg; 1284 cmd->SCp.have_data_in = use_sg;
1289 1285
1290 return use_sg; 1286 return use_sg;
1291} /* End tw_map_scsi_sg_data() */ 1287} /* End tw_map_scsi_sg_data() */
1292 1288
1293static u32 tw_map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1294{
1295 dma_addr_t mapping;
1296
1297 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data()\n");
1298
1299 if (cmd->request_bufflen == 0)
1300 return 0;
1301
1302 mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, DMA_BIDIRECTIONAL);
1303
1304 if (mapping == 0) {
1305 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data(): pci_map_page() failed.\n");
1306 return 0;
1307 }
1308
1309 cmd->SCp.phase = TW_PHASE_SINGLE;
1310 cmd->SCp.have_data_in = mapping;
1311
1312 return mapping;
1313} /* End tw_map_scsi_single_data() */
1314
1315static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 1289static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1316{ 1290{
1317 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); 1291 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1318 1292
1319 switch(cmd->SCp.phase) { 1293 scsi_dma_unmap(cmd);
1320 case TW_PHASE_SINGLE:
1321 pci_unmap_page(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
1322 break;
1323 case TW_PHASE_SGLIST:
1324 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
1325 break;
1326 }
1327} /* End tw_unmap_scsi_data() */ 1294} /* End tw_unmap_scsi_data() */
1328 1295
1329/* This function will reset a device extension */ 1296/* This function will reset a device extension */
@@ -1499,27 +1466,16 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1499 void *buf; 1466 void *buf;
1500 unsigned int transfer_len; 1467 unsigned int transfer_len;
1501 unsigned long flags = 0; 1468 unsigned long flags = 0;
1469 struct scatterlist *sg = scsi_sglist(cmd);
1502 1470
1503 if (cmd->use_sg) { 1471 local_irq_save(flags);
1504 struct scatterlist *sg = 1472 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1505 (struct scatterlist *)cmd->request_buffer; 1473 transfer_len = min(sg->length, len);
1506 local_irq_save(flags);
1507 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1508 transfer_len = min(sg->length, len);
1509 } else {
1510 buf = cmd->request_buffer;
1511 transfer_len = min(cmd->request_bufflen, len);
1512 }
1513 1474
1514 memcpy(buf, data, transfer_len); 1475 memcpy(buf, data, transfer_len);
1515
1516 if (cmd->use_sg) {
1517 struct scatterlist *sg;
1518 1476
1519 sg = (struct scatterlist *)cmd->request_buffer; 1477 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1520 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1478 local_irq_restore(flags);
1521 local_irq_restore(flags);
1522 }
1523} 1479}
1524 1480
1525/* This function is called by the isr to complete an inquiry command */ 1481/* This function is called by the isr to complete an inquiry command */
@@ -1764,19 +1720,20 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1764{ 1720{
1765 TW_Command *command_packet; 1721 TW_Command *command_packet;
1766 unsigned long command_que_value; 1722 unsigned long command_que_value;
1767 u32 lba = 0x0, num_sectors = 0x0, buffaddr = 0x0; 1723 u32 lba = 0x0, num_sectors = 0x0;
1768 int i, use_sg; 1724 int i, use_sg;
1769 struct scsi_cmnd *srb; 1725 struct scsi_cmnd *srb;
1770 struct scatterlist *sglist; 1726 struct scatterlist *sglist, *sg;
1771 1727
1772 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n"); 1728 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n");
1773 1729
1774 if (tw_dev->srb[request_id]->request_buffer == NULL) { 1730 srb = tw_dev->srb[request_id];
1731
1732 sglist = scsi_sglist(srb);
1733 if (!sglist) {
1775 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n"); 1734 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n");
1776 return 1; 1735 return 1;
1777 } 1736 }
1778 sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1779 srb = tw_dev->srb[request_id];
1780 1737
1781 /* Initialize command packet */ 1738 /* Initialize command packet */
1782 command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; 1739 command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
@@ -1819,33 +1776,18 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1819 command_packet->byte8.io.lba = lba; 1776 command_packet->byte8.io.lba = lba;
1820 command_packet->byte6.block_count = num_sectors; 1777 command_packet->byte6.block_count = num_sectors;
1821 1778
1822 /* Do this if there are no sg list entries */ 1779 use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
1823 if (tw_dev->srb[request_id]->use_sg == 0) { 1780 if (!use_sg)
1824 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): SG = 0\n"); 1781 return 1;
1825 buffaddr = tw_map_scsi_single_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
1826 if (buffaddr == 0)
1827 return 1;
1828 1782
1829 command_packet->byte8.io.sgl[0].address = buffaddr; 1783 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
1830 command_packet->byte8.io.sgl[0].length = tw_dev->srb[request_id]->request_bufflen; 1784 command_packet->byte8.io.sgl[i].address = sg_dma_address(sg);
1785 command_packet->byte8.io.sgl[i].length = sg_dma_len(sg);
1831 command_packet->size+=2; 1786 command_packet->size+=2;
1832 } 1787 }
1833 1788
1834 /* Do this if we have multiple sg list entries */
1835 if (tw_dev->srb[request_id]->use_sg > 0) {
1836 use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
1837 if (use_sg == 0)
1838 return 1;
1839
1840 for (i=0;i<use_sg; i++) {
1841 command_packet->byte8.io.sgl[i].address = sg_dma_address(&sglist[i]);
1842 command_packet->byte8.io.sgl[i].length = sg_dma_len(&sglist[i]);
1843 command_packet->size+=2;
1844 }
1845 }
1846
1847 /* Update SG statistics */ 1789 /* Update SG statistics */
1848 tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg; 1790 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1849 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1791 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1850 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1792 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1851 1793
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index cb02656eb54c..71ff3fbfce12 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -267,8 +267,6 @@ NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
267 offset = max_offset; 267 offset = max_offset;
268 } 268 }
269 if(XFERP < min_xferp) { 269 if(XFERP < min_xferp) {
270 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
271 XFERP, min_xferp);
272 XFERP = min_xferp; 270 XFERP = min_xferp;
273 } 271 }
274 return (offset & 0x0f) | (XFERP & 0x07)<<4; 272 return (offset & 0x0f) | (XFERP & 0x07)<<4;
@@ -585,16 +583,8 @@ NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
585 struct NCR_700_command_slot *slot) 583 struct NCR_700_command_slot *slot)
586{ 584{
587 if(SCp->sc_data_direction != DMA_NONE && 585 if(SCp->sc_data_direction != DMA_NONE &&
588 SCp->sc_data_direction != DMA_BIDIRECTIONAL) { 586 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
589 if(SCp->use_sg) { 587 scsi_dma_unmap(SCp);
590 dma_unmap_sg(hostdata->dev, SCp->request_buffer,
591 SCp->use_sg, SCp->sc_data_direction);
592 } else {
593 dma_unmap_single(hostdata->dev, slot->dma_handle,
594 SCp->request_bufflen,
595 SCp->sc_data_direction);
596 }
597 }
598} 588}
599 589
600STATIC inline void 590STATIC inline void
@@ -661,7 +651,6 @@ NCR_700_chip_setup(struct Scsi_Host *host)
661{ 651{
662 struct NCR_700_Host_Parameters *hostdata = 652 struct NCR_700_Host_Parameters *hostdata =
663 (struct NCR_700_Host_Parameters *)host->hostdata[0]; 653 (struct NCR_700_Host_Parameters *)host->hostdata[0];
664 __u32 dcntl_extra = 0;
665 __u8 min_period; 654 __u8 min_period;
666 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP); 655 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
667 656
@@ -686,13 +675,14 @@ NCR_700_chip_setup(struct Scsi_Host *host)
686 burst_disable = BURST_DISABLE; 675 burst_disable = BURST_DISABLE;
687 break; 676 break;
688 } 677 }
689 dcntl_extra = COMPAT_700_MODE; 678 hostdata->dcntl_extra |= COMPAT_700_MODE;
690 679
691 NCR_700_writeb(dcntl_extra, host, DCNTL_REG); 680 NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
692 NCR_700_writeb(burst_length | hostdata->dmode_extra, 681 NCR_700_writeb(burst_length | hostdata->dmode_extra,
693 host, DMODE_710_REG); 682 host, DMODE_710_REG);
694 NCR_700_writeb(burst_disable | (hostdata->differential ? 683 NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
695 DIFF : 0), host, CTEST7_REG); 684 (hostdata->differential ? DIFF : 0),
685 host, CTEST7_REG);
696 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG); 686 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
697 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY 687 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
698 | AUTO_ATN, host, SCNTL0_REG); 688 | AUTO_ATN, host, SCNTL0_REG);
@@ -727,13 +717,13 @@ NCR_700_chip_setup(struct Scsi_Host *host)
727 * of spec: sync divider 2, async divider 3 */ 717 * of spec: sync divider 2, async divider 3 */
728 DEBUG(("53c700: sync 2 async 3\n")); 718 DEBUG(("53c700: sync 2 async 3\n"));
729 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG); 719 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
730 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG); 720 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
731 hostdata->sync_clock = hostdata->clock/2; 721 hostdata->sync_clock = hostdata->clock/2;
732 } else if(hostdata->clock > 50 && hostdata->clock <= 75) { 722 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
733 /* sync divider 1.5, async divider 3 */ 723 /* sync divider 1.5, async divider 3 */
734 DEBUG(("53c700: sync 1.5 async 3\n")); 724 DEBUG(("53c700: sync 1.5 async 3\n"));
735 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG); 725 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
736 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG); 726 NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
737 hostdata->sync_clock = hostdata->clock*2; 727 hostdata->sync_clock = hostdata->clock*2;
738 hostdata->sync_clock /= 3; 728 hostdata->sync_clock /= 3;
739 729
@@ -741,18 +731,18 @@ NCR_700_chip_setup(struct Scsi_Host *host)
741 /* sync divider 1, async divider 2 */ 731 /* sync divider 1, async divider 2 */
742 DEBUG(("53c700: sync 1 async 2\n")); 732 DEBUG(("53c700: sync 1 async 2\n"));
743 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); 733 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
744 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG); 734 NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
745 hostdata->sync_clock = hostdata->clock; 735 hostdata->sync_clock = hostdata->clock;
746 } else if(hostdata->clock > 25 && hostdata->clock <=37) { 736 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
747 /* sync divider 1, async divider 1.5 */ 737 /* sync divider 1, async divider 1.5 */
748 DEBUG(("53c700: sync 1 async 1.5\n")); 738 DEBUG(("53c700: sync 1 async 1.5\n"));
749 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); 739 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
750 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG); 740 NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
751 hostdata->sync_clock = hostdata->clock; 741 hostdata->sync_clock = hostdata->clock;
752 } else { 742 } else {
753 DEBUG(("53c700: sync 1 async 1\n")); 743 DEBUG(("53c700: sync 1 async 1\n"));
754 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG); 744 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
755 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG); 745 NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
756 /* sync divider 1, async divider 1 */ 746 /* sync divider 1, async divider 1 */
757 hostdata->sync_clock = hostdata->clock; 747 hostdata->sync_clock = hostdata->clock;
758 } 748 }
@@ -1263,14 +1253,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1263 host->host_no, pun, lun, NCR_700_condition[i], 1253 host->host_no, pun, lun, NCR_700_condition[i],
1264 NCR_700_phase[j], dsp - hostdata->pScript); 1254 NCR_700_phase[j], dsp - hostdata->pScript);
1265 if(SCp != NULL) { 1255 if(SCp != NULL) {
1266 scsi_print_command(SCp); 1256 struct scatterlist *sg;
1267 1257
1268 if(SCp->use_sg) { 1258 scsi_print_command(SCp);
1269 for(i = 0; i < SCp->use_sg + 1; i++) { 1259 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1270 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr); 1260 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1271 }
1272 } 1261 }
1273 } 1262 }
1274 NCR_700_internal_bus_reset(host); 1263 NCR_700_internal_bus_reset(host);
1275 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) { 1264 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1276 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n", 1265 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
@@ -1844,8 +1833,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1844 } 1833 }
1845 /* sanity check: some of the commands generated by the mid-layer 1834 /* sanity check: some of the commands generated by the mid-layer
1846 * have an eccentric idea of their sc_data_direction */ 1835 * have an eccentric idea of their sc_data_direction */
1847 if(!SCp->use_sg && !SCp->request_bufflen 1836 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1848 && SCp->sc_data_direction != DMA_NONE) { 1837 SCp->sc_data_direction != DMA_NONE) {
1849#ifdef NCR_700_DEBUG 1838#ifdef NCR_700_DEBUG
1850 printk("53c700: Command"); 1839 printk("53c700: Command");
1851 scsi_print_command(SCp); 1840 scsi_print_command(SCp);
@@ -1887,31 +1876,15 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1887 int i; 1876 int i;
1888 int sg_count; 1877 int sg_count;
1889 dma_addr_t vPtr = 0; 1878 dma_addr_t vPtr = 0;
1879 struct scatterlist *sg;
1890 __u32 count = 0; 1880 __u32 count = 0;
1891 1881
1892 if(SCp->use_sg) { 1882 sg_count = scsi_dma_map(SCp);
1893 sg_count = dma_map_sg(hostdata->dev, 1883 BUG_ON(sg_count < 0);
1894 SCp->request_buffer, SCp->use_sg,
1895 direction);
1896 } else {
1897 vPtr = dma_map_single(hostdata->dev,
1898 SCp->request_buffer,
1899 SCp->request_bufflen,
1900 direction);
1901 count = SCp->request_bufflen;
1902 slot->dma_handle = vPtr;
1903 sg_count = 1;
1904 }
1905
1906 1884
1907 for(i = 0; i < sg_count; i++) { 1885 scsi_for_each_sg(SCp, sg, sg_count, i) {
1908 1886 vPtr = sg_dma_address(sg);
1909 if(SCp->use_sg) { 1887 count = sg_dma_len(sg);
1910 struct scatterlist *sg = SCp->request_buffer;
1911
1912 vPtr = sg_dma_address(&sg[i]);
1913 count = sg_dma_len(&sg[i]);
1914 }
1915 1888
1916 slot->SG[i].ins = bS_to_host(move_ins | count); 1889 slot->SG[i].ins = bS_to_host(move_ins | count);
1917 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n", 1890 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 841e1bb27d57..e06bdfeab420 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -177,6 +177,7 @@ struct NCR_700_command_slot {
177 __u8 state; 177 __u8 state;
178 #define NCR_700_FLAG_AUTOSENSE 0x01 178 #define NCR_700_FLAG_AUTOSENSE 0x01
179 __u8 flags; 179 __u8 flags;
180 __u8 pad1[2]; /* Needed for m68k where min alignment is 2 bytes */
180 int tag; 181 int tag;
181 __u32 resume_offset; 182 __u32 resume_offset;
182 struct scsi_cmnd *cmnd; 183 struct scsi_cmnd *cmnd;
@@ -196,6 +197,8 @@ struct NCR_700_Host_Parameters {
196 void __iomem *base; /* the base for the port (copied to host) */ 197 void __iomem *base; /* the base for the port (copied to host) */
197 struct device *dev; 198 struct device *dev;
198 __u32 dmode_extra; /* adjustable bus settings */ 199 __u32 dmode_extra; /* adjustable bus settings */
200 __u32 dcntl_extra; /* adjustable bus settings */
201 __u32 ctest7_extra; /* adjustable bus settings */
199 __u32 differential:1; /* if we are differential */ 202 __u32 differential:1; /* if we are differential */
200#ifdef CONFIG_53C700_LE_ON_BE 203#ifdef CONFIG_53C700_LE_ON_BE
201 /* This option is for HP only. Set it if your chip is wired for 204 /* This option is for HP only. Set it if your chip is wired for
@@ -352,6 +355,7 @@ struct NCR_700_Host_Parameters {
352#define SEL_TIMEOUT_DISABLE 0x10 /* 710 only */ 355#define SEL_TIMEOUT_DISABLE 0x10 /* 710 only */
353#define DFP 0x08 356#define DFP 0x08
354#define EVP 0x04 357#define EVP 0x04
358#define CTEST7_TT1 0x02
355#define DIFF 0x01 359#define DIFF 0x01
356#define CTEST6_REG 0x1A 360#define CTEST6_REG 0x1A
357#define TEMP_REG 0x1C 361#define TEMP_REG 0x1C
@@ -385,6 +389,7 @@ struct NCR_700_Host_Parameters {
385#define SOFTWARE_RESET 0x01 389#define SOFTWARE_RESET 0x01
386#define COMPAT_700_MODE 0x01 390#define COMPAT_700_MODE 0x01
387#define SCRPTS_16BITS 0x20 391#define SCRPTS_16BITS 0x20
392#define EA_710 0x20
388#define ASYNC_DIV_2_0 0x00 393#define ASYNC_DIV_2_0 0x00
389#define ASYNC_DIV_1_5 0x40 394#define ASYNC_DIV_1_5 0x40
390#define ASYNC_DIV_1_0 0x80 395#define ASYNC_DIV_1_0 0x80
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
deleted file mode 100644
index 93b41f45638a..000000000000
--- a/drivers/scsi/53c7xx.c
+++ /dev/null
@@ -1,6102 +0,0 @@
1/*
2 * 53c710 driver. Modified from Drew Eckhardts driver
3 * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
4 * Check out PERM_OPTIONS and EXPECTED_CLOCK, which may be defined in the
5 * relevant machine specific file (eg. mvme16x.[ch], amiga7xx.[ch]).
6 * There are also currently some defines at the top of 53c7xx.scr.
7 * The chip type is #defined in script_asm.pl, as well as the Makefile.
8 * Host scsi ID expected to be 7 - see NCR53c7x0_init().
9 *
10 * I have removed the PCI code and some of the 53c8xx specific code -
11 * simply to make this file smaller and easier to manage.
12 *
13 * MVME16x issues:
14 * Problems trying to read any chip registers in NCR53c7x0_init(), as they
15 * may never have been set by 16xBug (eg. If kernel has come in over tftp).
16 */
17
18/*
19 * Adapted for Linux/m68k Amiga platforms for the A4000T/A4091 and
20 * WarpEngine SCSI controllers.
21 * By Alan Hourihane <alanh@fairlite.demon.co.uk>
22 * Thanks to Richard Hirst for making it possible with the MVME additions
23 */
24
25/*
26 * 53c710 rev 0 doesn't support add with carry. Rev 1 and 2 does. To
27 * overcome this problem you can define FORCE_DSA_ALIGNMENT, which ensures
28 * that the DSA address is always xxxxxx00. If disconnection is not allowed,
29 * then the script only ever tries to add small (< 256) positive offsets to
30 * DSA, so lack of carry isn't a problem. FORCE_DSA_ALIGNMENT can, of course,
31 * be defined for all chip revisions at a small cost in memory usage.
32 */
33
34#define FORCE_DSA_ALIGNMENT
35
36/*
37 * Selection timer does not always work on the 53c710, depending on the
38 * timing at the last disconnect, if this is a problem for you, try
39 * using validids as detailed below.
40 *
41 * Options for the NCR7xx driver
42 *
43 * noasync:0 - disables sync and asynchronous negotiation
44 * nosync:0 - disables synchronous negotiation (does async)
45 * nodisconnect:0 - disables disconnection
46 * validids:0x?? - Bitmask field that disallows certain ID's.
47 * - e.g. 0x03 allows ID 0,1
48 * - 0x1F allows ID 0,1,2,3,4
49 * opthi:n - replace top word of options with 'n'
50 * optlo:n - replace bottom word of options with 'n'
51 * - ALWAYS SPECIFY opthi THEN optlo <<<<<<<<<<
52 */
53
54/*
55 * PERM_OPTIONS are driver options which will be enabled for all NCR boards
56 * in the system at driver initialization time.
57 *
58 * Don't THINK about touching these in PERM_OPTIONS :
59 * OPTION_MEMORY_MAPPED
60 * 680x0 doesn't have an IO map!
61 *
62 * OPTION_DEBUG_TEST1
63 * Test 1 does bus mastering and interrupt tests, which will help weed
64 * out brain damaged main boards.
65 *
66 * Other PERM_OPTIONS settings are listed below. Note the actual options
67 * required are set in the relevant file (mvme16x.c, amiga7xx.c, etc):
68 *
69 * OPTION_NO_ASYNC
70 * Don't negotiate for asynchronous transfers on the first command
71 * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
72 * devices which do something bad rather than sending a MESSAGE
73 * REJECT back to us like they should if they can't cope.
74 *
75 * OPTION_SYNCHRONOUS
76 * Enable support for synchronous transfers. Target negotiated
77 * synchronous transfers will be responded to. To initiate
78 * a synchronous transfer request, call
79 *
80 * request_synchronous (hostno, target)
81 *
82 * from within KGDB.
83 *
84 * OPTION_ALWAYS_SYNCHRONOUS
85 * Negotiate for synchronous transfers with every target after
86 * driver initialization or a SCSI bus reset. This is a bit dangerous,
87 * since there are some dain bramaged SCSI devices which will accept
88 * SDTR messages but keep talking asynchronously.
89 *
90 * OPTION_DISCONNECT
91 * Enable support for disconnect/reconnect. To change the
92 * default setting on a given host adapter, call
93 *
94 * request_disconnect (hostno, allow)
95 *
96 * where allow is non-zero to allow, 0 to disallow.
97 *
98 * If you really want to run 10MHz FAST SCSI-II transfers, you should
99 * know that the NCR driver currently ignores parity information. Most
100 * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
101 * than 8MHz. To play it safe, we only request 5MHz transfers.
102 *
103 * If you'd rather get 10MHz transfers, edit sdtr_message and change
104 * the fourth byte from 50 to 25.
105 */
106
107/*
108 * Sponsored by
109 * iX Multiuser Multitasking Magazine
110 * Hannover, Germany
111 * hm@ix.de
112 *
113 * Copyright 1993, 1994, 1995 Drew Eckhardt
114 * Visionary Computing
115 * (Unix and Linux consulting and custom programming)
116 * drew@PoohSticks.ORG
117 * +1 (303) 786-7975
118 *
119 * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
120 *
121 * For more information, please consult
122 *
123 * NCR53C810
124 * SCSI I/O Processor
125 * Programmer's Guide
126 *
127 * NCR 53C810
128 * PCI-SCSI I/O Processor
129 * Data Manual
130 *
131 * NCR 53C810/53C820
132 * PCI-SCSI I/O Processor Design In Guide
133 *
134 * For literature on Symbios Logic Inc. formerly NCR, SCSI,
135 * and Communication products please call (800) 334-5454 or
136 * (719) 536-3300.
137 *
138 * PCI BIOS Specification Revision
139 * PCI Local Bus Specification
140 * PCI System Design Guide
141 *
142 * PCI Special Interest Group
143 * M/S HF3-15A
144 * 5200 N.E. Elam Young Parkway
145 * Hillsboro, Oregon 97124-6497
146 * +1 (503) 696-2000
147 * +1 (800) 433-5177
148 */
149
150/*
151 * Design issues :
152 * The cumulative latency needed to propagate a read/write request
153 * through the file system, buffer cache, driver stacks, SCSI host, and
154 * SCSI device is ultimately the limiting factor in throughput once we
155 * have a sufficiently fast host adapter.
156 *
157 * So, to maximize performance we want to keep the ratio of latency to data
158 * transfer time to a minimum by
159 * 1. Minimizing the total number of commands sent (typical command latency
160 * including drive and bus mastering host overhead is as high as 4.5ms)
161 * to transfer a given amount of data.
162 *
163 * This is accomplished by placing no arbitrary limit on the number
164 * of scatter/gather buffers supported, since we can transfer 1K
165 * per scatter/gather buffer without Eric's cluster patches,
166 * 4K with.
167 *
168 * 2. Minimizing the number of fatal interrupts serviced, since
169 * fatal interrupts halt the SCSI I/O processor. Basically,
170 * this means offloading the practical maximum amount of processing
171 * to the SCSI chip.
172 *
173 * On the NCR53c810/820/720, this is accomplished by using
174 * interrupt-on-the-fly signals when commands complete,
175 * and only handling fatal errors and SDTR / WDTR messages
176 * in the host code.
177 *
178 * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
179 * only the lack of a interrupt-on-the-fly facility complicates
180 * things. Also, SCSI ID registers and commands are
181 * bit fielded rather than binary encoded.
182 *
183 * On the NCR53c700 and NCR53c700-66, operations that are done via
184 * indirect, table mode on the more advanced chips must be
185 * replaced by calls through a jump table which
186 * acts as a surrogate for the DSA. Unfortunately, this
187 * will mean that we must service an interrupt for each
188 * disconnect/reconnect.
189 *
190 * 3. Eliminating latency by pipelining operations at the different levels.
191 *
192 * This driver allows a configurable number of commands to be enqueued
193 * for each target/lun combination (experimentally, I have discovered
194 * that two seems to work best) and will ultimately allow for
195 * SCSI-II tagged queuing.
196 *
197 *
198 * Architecture :
199 * This driver is built around a Linux queue of commands waiting to
200 * be executed, and a shared Linux/NCR array of commands to start. Commands
201 * are transferred to the array by the run_process_issue_queue() function
202 * which is called whenever a command completes.
203 *
204 * As commands are completed, the interrupt routine is triggered,
205 * looks for commands in the linked list of completed commands with
206 * valid status, removes these commands from a list of running commands,
207 * calls the done routine, and flags their target/luns as not busy.
208 *
209 * Due to limitations in the intelligence of the NCR chips, certain
210 * concessions are made. In many cases, it is easier to dynamically
211 * generate/fix-up code rather than calculate on the NCR at run time.
212 * So, code is generated or fixed up for
213 *
214 * - Handling data transfers, using a variable number of MOVE instructions
215 * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
216 *
217 * The DATAIN and DATAOUT routines are separate, so that an incorrect
218 * direction can be trapped, and space isn't wasted.
219 *
220 * It may turn out that we're better off using some sort
221 * of table indirect instruction in a loop with a variable
222 * sized table on the NCR53c710 and newer chips.
223 *
224 * - Checking for reselection (NCR53c710 and better)
225 *
226 * - Handling the details of SCSI context switches (NCR53c710 and better),
227 * such as reprogramming appropriate synchronous parameters,
228 * removing the dsa structure from the NCR's queue of outstanding
229 * commands, etc.
230 *
231 */
232
233#include <linux/module.h>
234
235
236#include <linux/types.h>
237#include <asm/setup.h>
238#include <asm/dma.h>
239#include <asm/io.h>
240#include <asm/system.h>
241#include <linux/delay.h>
242#include <linux/signal.h>
243#include <linux/sched.h>
244#include <linux/errno.h>
245#include <linux/string.h>
246#include <linux/slab.h>
247#include <linux/vmalloc.h>
248#include <linux/mm.h>
249#include <linux/ioport.h>
250#include <linux/time.h>
251#include <linux/blkdev.h>
252#include <linux/spinlock.h>
253#include <linux/interrupt.h>
254#include <asm/pgtable.h>
255
256#ifdef CONFIG_AMIGA
257#include <asm/amigahw.h>
258#include <asm/amigaints.h>
259#include <asm/irq.h>
260
261#define BIG_ENDIAN
262#define NO_IO_SPACE
263#endif
264
265#ifdef CONFIG_MVME16x
266#include <asm/mvme16xhw.h>
267
268#define BIG_ENDIAN
269#define NO_IO_SPACE
270#define VALID_IDS
271#endif
272
273#ifdef CONFIG_BVME6000
274#include <asm/bvme6000hw.h>
275
276#define BIG_ENDIAN
277#define NO_IO_SPACE
278#define VALID_IDS
279#endif
280
281#include "scsi.h"
282#include <scsi/scsi_dbg.h>
283#include <scsi/scsi_host.h>
284#include <scsi/scsi_transport_spi.h>
285#include "53c7xx.h"
286#include <linux/stat.h>
287#include <linux/stddef.h>
288
289#ifdef NO_IO_SPACE
290/*
291 * The following make the definitions in 53c7xx.h (write8, etc) smaller,
292 * we don't have separate i/o space anyway.
293 */
294#undef inb
295#undef outb
296#undef inw
297#undef outw
298#undef inl
299#undef outl
300#define inb(x) 1
301#define inw(x) 1
302#define inl(x) 1
303#define outb(x,y) 1
304#define outw(x,y) 1
305#define outl(x,y) 1
306#endif
307
308static int check_address (unsigned long addr, int size);
309static void dump_events (struct Scsi_Host *host, int count);
310static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
311 int free, int issue);
312static void hard_reset (struct Scsi_Host *host);
313static void ncr_scsi_reset (struct Scsi_Host *host);
314static void print_lots (struct Scsi_Host *host);
315static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
316 int scntl3, int now_connected);
317static int datapath_residual (struct Scsi_Host *host);
318static const char * sbcl_to_phase (int sbcl);
319static void print_progress (Scsi_Cmnd *cmd);
320static void print_queues (struct Scsi_Host *host);
321static void process_issue_queue (unsigned long flags);
322static int shutdown (struct Scsi_Host *host);
323static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
324static int disable (struct Scsi_Host *host);
325static int NCR53c7xx_run_tests (struct Scsi_Host *host);
326static irqreturn_t NCR53c7x0_intr(int irq, void *dev_id);
327static void NCR53c7x0_intfly (struct Scsi_Host *host);
328static int ncr_halt (struct Scsi_Host *host);
329static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
330 *cmd);
331static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
332static void print_dsa (struct Scsi_Host *host, u32 *dsa,
333 const char *prefix);
334static int print_insn (struct Scsi_Host *host, const u32 *insn,
335 const char *prefix, int kernel);
336
337static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
338static void NCR53c7x0_init_fixup (struct Scsi_Host *host);
339static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
340 NCR53c7x0_cmd *cmd);
341static void NCR53c7x0_soft_reset (struct Scsi_Host *host);
342
343/* Size of event list (per host adapter) */
344static int track_events = 0;
345static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
346static struct scsi_host_template *the_template = NULL;
347
348/* NCR53c710 script handling code */
349
350#include "53c7xx_d.h"
351#ifdef A_int_debug_sync
352#define DEBUG_SYNC_INTR A_int_debug_sync
353#endif
354int NCR53c7xx_script_len = sizeof (SCRIPT);
355int NCR53c7xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
356#ifdef FORCE_DSA_ALIGNMENT
357int CmdPageStart = (0 - Ent_dsa_zero - sizeof(struct NCR53c7x0_cmd)) & 0xff;
358#endif
359
360static char *setup_strings[] =
361 {"","","","","","","",""};
362
363#define MAX_SETUP_STRINGS ARRAY_SIZE(setup_strings)
364#define SETUP_BUFFER_SIZE 200
365static char setup_buffer[SETUP_BUFFER_SIZE];
366static char setup_used[MAX_SETUP_STRINGS];
367
368void ncr53c7xx_setup (char *str, int *ints)
369{
370 int i;
371 char *p1, *p2;
372
373 p1 = setup_buffer;
374 *p1 = '\0';
375 if (str)
376 strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer));
377 setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
378 p1 = setup_buffer;
379 i = 0;
380 while (*p1 && (i < MAX_SETUP_STRINGS)) {
381 p2 = strchr(p1, ',');
382 if (p2) {
383 *p2 = '\0';
384 if (p1 != p2)
385 setup_strings[i] = p1;
386 p1 = p2 + 1;
387 i++;
388 }
389 else {
390 setup_strings[i] = p1;
391 break;
392 }
393 }
394 for (i=0; i<MAX_SETUP_STRINGS; i++)
395 setup_used[i] = 0;
396}
397
398
399/* check_setup_strings() returns index if key found, 0 if not
400 */
401
402static int check_setup_strings(char *key, int *flags, int *val, char *buf)
403{
404int x;
405char *cp;
406
407 for (x=0; x<MAX_SETUP_STRINGS; x++) {
408 if (setup_used[x])
409 continue;
410 if (!strncmp(setup_strings[x], key, strlen(key)))
411 break;
412 if (!strncmp(setup_strings[x], "next", strlen("next")))
413 return 0;
414 }
415 if (x == MAX_SETUP_STRINGS)
416 return 0;
417 setup_used[x] = 1;
418 cp = setup_strings[x] + strlen(key);
419 *val = -1;
420 if (*cp != ':')
421 return ++x;
422 cp++;
423 if ((*cp >= '0') && (*cp <= '9')) {
424 *val = simple_strtoul(cp,NULL,0);
425 }
426 return ++x;
427}
428
429
430
431/*
432 * KNOWN BUGS :
433 * - There is some sort of conflict when the PPP driver is compiled with
434 * support for 16 channels?
435 *
436 * - On systems which predate the 1.3.x initialization order change,
437 * the NCR driver will cause Cannot get free page messages to appear.
438 * These are harmless, but I don't know of an easy way to avoid them.
439 *
440 * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
441 * we get a PHASE MISMATCH with DSA set to zero (suggests that we
442 * are occurring somewhere in the reselection code) where
443 * DSP=some value DCMD|DBC=same value.
444 *
445 * Closer inspection suggests that we may be trying to execute
446 * some portion of the DSA?
447 * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
448 * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
449 * scsi0 : no current command : unexpected phase MSGIN.
450 * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
451 * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
452 * scsi0 : DSP->
453 * 001c46cc : 0x001c46cc 0x00000000
454 * 001c46d4 : 0x001c5ea0 0x000011f8
455 *
456 * Changed the print code in the phase_mismatch handler so
457 * that we call print_lots to try to diagnose this.
458 *
459 */
460
461/*
462 * Possible future direction of architecture for max performance :
463 *
464 * We're using a single start array for the NCR chip. This is
465 * sub-optimal, because we cannot add a command which would conflict with
466 * an executing command to this start queue, and therefore must insert the
467 * next command for a given I/T/L combination after the first has completed;
468 * incurring our interrupt latency between SCSI commands.
469 *
470 * To allow further pipelining of the NCR and host CPU operation, we want
471 * to set things up so that immediately on termination of a command destined
472 * for a given LUN, we get that LUN busy again.
473 *
474 * To do this, we need to add a 32 bit pointer to which is jumped to
475 * on completion of a command. If no new command is available, this
476 * would point to the usual DSA issue queue select routine.
477 *
478 * If one were, it would point to a per-NCR53c7x0_cmd select routine
479 * which starts execution immediately, inserting the command at the head
480 * of the start queue if the NCR chip is selected or reselected.
481 *
482 * We would change so that we keep a list of outstanding commands
483 * for each unit, rather than a single running_list. We'd insert
484 * a new command into the right running list; if the NCR didn't
485 * have something running for that yet, we'd put it in the
486 * start queue as well. Some magic needs to happen to handle the
487 * race condition between the first command terminating before the
488 * new one is written.
489 *
490 * Potential for profiling :
491 * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
492 */
493
494
495/*
496 * TODO :
497 * 1. To support WIDE transfers, not much needs to happen. We
498 * should do CHMOVE instructions instead of MOVEs when
499 * we have scatter/gather segments of uneven length. When
500 * we do this, we need to handle the case where we disconnect
501 * between segments.
502 *
503 * 2. Currently, when Icky things happen we do a FATAL(). Instead,
504 * we want to do an integrity check on the parts of the NCR hostdata
505 * structure which were initialized at boot time; FATAL() if that
506 * fails, and otherwise try to recover. Keep track of how many
507 * times this has happened within a single SCSI command; if it
508 * gets excessive, then FATAL().
509 *
510 * 3. Parity checking is currently disabled, and a few things should
511 * happen here now that we support synchronous SCSI transfers :
512 * 1. On soft-reset, we shoould set the EPC (Enable Parity Checking)
513 * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
514 *
515 * 2. We should enable the parity interrupt in the SIEN0 register.
516 *
517 * 3. intr_phase_mismatch() needs to believe that message out is
518 * always an "acceptable" phase to have a mismatch in. If
519 * the old phase was MSG_IN, we should send a MESSAGE PARITY
520 * error. If the old phase was something else, we should send
521 * a INITIATOR_DETECTED_ERROR message. Note that this could
522 * cause a RESTORE POINTERS message; so we should handle that
523 * correctly first. Instead, we should probably do an
524 * initiator_abort.
525 *
526 * 4. MPEE bit of CTEST4 should be set so we get interrupted if
527 * we detect an error.
528 *
529 *
530 * 5. The initial code has been tested on the NCR53c810. I don't
531 * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
532 * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
533 * finish development on those platforms.
534 *
535 * NCR53c820/825/720 - need to add wide transfer support, including WDTR
536 * negotiation, programming of wide transfer capabilities
537 * on reselection and table indirect selection.
538 *
539 * NCR53c710 - need to add fatal interrupt or GEN code for
540 * command completion signaling. Need to modify all
541 * SDID, SCID, etc. registers, and table indirect select code
542 * since these use bit fielded (ie 1<<target) instead of
543 * binary encoded target ids. Need to accommodate
544 * different register mappings, probably scan through
545 * the SCRIPT code and change the non SFBR register operand
546 * of all MOVE instructions.
547 *
548 * It is rather worse than this actually, the 710 corrupts
549 * both TEMP and DSA when you do a MOVE MEMORY. This
550 * screws you up all over the place. MOVE MEMORY 4 with a
551 * destination of DSA seems to work OK, which helps some.
552 * Richard Hirst richard@sleepie.demon.co.uk
553 *
554 * NCR53c700/700-66 - need to add code to refix addresses on
555 * every nexus change, eliminate all table indirect code,
556 * very messy.
557 *
558 * 6. The NCR53c7x0 series is very popular on other platforms that
559 * could be running Linux - ie, some high performance AMIGA SCSI
560 * boards use it.
561 *
562 * So, I should include #ifdef'd code so that it is
563 * compatible with these systems.
564 *
565 * Specifically, the little Endian assumptions I made in my
566 * bit fields need to change, and if the NCR doesn't see memory
567 * the right way, we need to provide options to reverse words
568 * when the scripts are relocated.
569 *
570 * 7. Use vremap() to access memory mapped boards.
571 */
572
573/*
574 * Allow for simultaneous existence of multiple SCSI scripts so we
575 * can have a single driver binary for all of the family.
576 *
577 * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
578 * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
579 * supported)
580 *
581 * So that we only need two SCSI scripts, we need to modify things so
582 * that we fixup register accesses in READ/WRITE instructions, and
583 * we'll also have to accommodate the bit vs. binary encoding of IDs
584 * with the 7xx chips.
585 */
586
587#define ROUNDUP(adr,type) \
588 ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
589
590
591/*
592 * Function: issue_to_cmd
593 *
594 * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
595 * structure pointer.
596 *
597 * Inputs; issue - pointer to start of NOP or JUMP instruction
598 * in issue array.
599 *
600 * Returns: pointer to command on success; 0 if opcode is NOP.
601 */
602
603static inline struct NCR53c7x0_cmd *
604issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
605 u32 *issue)
606{
607 return (issue[0] != hostdata->NOP_insn) ?
608 /*
609 * If the IF TRUE bit is set, it's a JUMP instruction. The
610 * operand is a bus pointer to the dsa_begin routine for this DSA. The
611 * dsa field of the NCR53c7x0_cmd structure starts with the
612 * DSA code template. By converting to a virtual address,
613 * subtracting the code template size, and offset of the
614 * dsa field, we end up with a pointer to the start of the
615 * structure (alternatively, we could use the
616 * dsa_cmnd field, an anachronism from when we weren't
617 * sure what the relationship between the NCR structures
618 * and host structures were going to be.
619 */
620 (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
621 (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
622 offsetof(struct NCR53c7x0_cmd, dsa))
623 /* If the IF TRUE bit is not set, it's a NOP */
624 : NULL;
625}
626
627
628/*
629 * FIXME: we should junk these, in favor of synchronous_want and
630 * wide_want in the NCR53c7x0_hostdata structure.
631 */
632
633/* Template for "preferred" synchronous transfer parameters. */
634
635static const unsigned char sdtr_message[] = {
636#ifdef CONFIG_SCSI_NCR53C7xx_FAST
637 EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 25 /* *4ns */, 8 /* off */
638#else
639 EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
640#endif
641};
642
643/* Template to request asynchronous transfers */
644
645static const unsigned char async_message[] = {
646 EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
647};
648
649/* Template for "preferred" WIDE transfer parameters */
650
651static const unsigned char wdtr_message[] = {
652 EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
653};
654
655#if 0
656/*
657 * Function : struct Scsi_Host *find_host (int host)
658 *
659 * Purpose : KGDB support function which translates a host number
660 * to a host structure.
661 *
662 * Inputs : host - number of SCSI host
663 *
664 * Returns : NULL on failure, pointer to host structure on success.
665 */
666
667static struct Scsi_Host *
668find_host (int host) {
669 struct Scsi_Host *h;
670 for (h = first_host; h && h->host_no != host; h = h->next);
671 if (!h) {
672 printk (KERN_ALERT "scsi%d not found\n", host);
673 return NULL;
674 } else if (h->hostt != the_template) {
675 printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
676 return NULL;
677 }
678 return h;
679}
680
681#if 0
682/*
683 * Function : request_synchronous (int host, int target)
684 *
685 * Purpose : KGDB interface which will allow us to negotiate for
686 * synchronous transfers. This ill be replaced with a more
687 * integrated function; perhaps a new entry in the scsi_host
688 * structure, accessible via an ioctl() or perhaps /proc/scsi.
689 *
690 * Inputs : host - number of SCSI host; target - number of target.
691 *
692 * Returns : 0 when negotiation has been setup for next SCSI command,
693 * -1 on failure.
694 */
695
696static int
697request_synchronous (int host, int target) {
698 struct Scsi_Host *h;
699 struct NCR53c7x0_hostdata *hostdata;
700 unsigned long flags;
701 if (target < 0) {
702 printk (KERN_ALERT "target %d is bogus\n", target);
703 return -1;
704 }
705 if (!(h = find_host (host)))
706 return -1;
707 else if (h->this_id == target) {
708 printk (KERN_ALERT "target %d is host ID\n", target);
709 return -1;
710 }
711 else if (target >= h->max_id) {
712 printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
713 h->max_id);
714 return -1;
715 }
716 hostdata = (struct NCR53c7x0_hostdata *)h->hostdata[0];
717
718 local_irq_save(flags);
719 if (hostdata->initiate_sdtr & (1 << target)) {
720 local_irq_restore(flags);
721 printk (KERN_ALERT "target %d already doing SDTR\n", target);
722 return -1;
723 }
724 hostdata->initiate_sdtr |= (1 << target);
725 local_irq_restore(flags);
726 return 0;
727}
728#endif
729
730/*
731 * Function : request_disconnect (int host, int on_or_off)
732 *
733 * Purpose : KGDB support function, tells us to allow or disallow
734 * disconnections.
735 *
736 * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
737 * zero to disallow.
738 *
739 * Returns : 0 on success, * -1 on failure.
740 */
741
742static int
743request_disconnect (int host, int on_or_off) {
744 struct Scsi_Host *h;
745 struct NCR53c7x0_hostdata *hostdata;
746 if (!(h = find_host (host)))
747 return -1;
748 hostdata = (struct NCR53c7x0_hostdata *) h->hostdata[0];
749 if (on_or_off)
750 hostdata->options |= OPTION_DISCONNECT;
751 else
752 hostdata->options &= ~OPTION_DISCONNECT;
753 return 0;
754}
755#endif
756
757/*
758 * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
759 *
760 * Purpose : Initialize internal structures, as required on startup, or
761 * after a SCSI bus reset.
762 *
763 * Inputs : host - pointer to this host adapter's structure
764 */
765
766static void
767NCR53c7x0_driver_init (struct Scsi_Host *host) {
768 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
769 host->hostdata[0];
770 int i, j;
771 u32 *ncrcurrent;
772
773 for (i = 0; i < 16; ++i) {
774 hostdata->request_sense[i] = 0;
775 for (j = 0; j < 8; ++j)
776 hostdata->busy[i][j] = 0;
777 set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
778 }
779 hostdata->issue_queue = NULL;
780 hostdata->running_list = hostdata->finished_queue =
781 hostdata->ncrcurrent = NULL;
782 for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
783 i < host->can_queue; ++i, ncrcurrent += 2) {
784 ncrcurrent[0] = hostdata->NOP_insn;
785 ncrcurrent[1] = 0xdeadbeef;
786 }
787 ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
788 ncrcurrent[1] = (u32) virt_to_bus (hostdata->script) +
789 hostdata->E_wait_reselect;
790 hostdata->reconnect_dsa_head = 0;
791 hostdata->addr_reconnect_dsa_head = (u32)
792 virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
793 hostdata->expecting_iid = 0;
794 hostdata->expecting_sto = 0;
795 if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
796 hostdata->initiate_sdtr = 0xffff;
797 else
798 hostdata->initiate_sdtr = 0;
799 hostdata->talked_to = 0;
800 hostdata->idle = 1;
801}
802
803/*
804 * Function : static int clock_to_ccf_710 (int clock)
805 *
806 * Purpose : Return the clock conversion factor for a given SCSI clock.
807 *
808 * Inputs : clock - SCSI clock expressed in Hz.
809 *
810 * Returns : ccf on success, -1 on failure.
811 */
812
813static int
814clock_to_ccf_710 (int clock) {
815 if (clock <= 16666666)
816 return -1;
817 if (clock <= 25000000)
818 return 2; /* Divide by 1.0 */
819 else if (clock <= 37500000)
820 return 1; /* Divide by 1.5 */
821 else if (clock <= 50000000)
822 return 0; /* Divide by 2.0 */
823 else if (clock <= 66000000)
824 return 3; /* Divide by 3.0 */
825 else
826 return -1;
827}
828
829/*
830 * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
831 *
832 * Purpose : initialize the internal structures for a given SCSI host
833 *
834 * Inputs : host - pointer to this host adapter's structure
835 *
836 * Preconditions : when this function is called, the chip_type
837 * field of the hostdata structure MUST have been set.
838 *
839 * Returns : 0 on success, -1 on failure.
840 */
841
842int
843NCR53c7x0_init (struct Scsi_Host *host) {
844 NCR53c7x0_local_declare();
845 int i, ccf;
846 unsigned char revision;
847 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
848 host->hostdata[0];
849 /*
850 * There are some things which we need to know about in order to provide
851 * a semblance of support. Print 'em if they aren't what we expect,
852 * otherwise don't add to the noise.
853 *
854 * -1 means we don't know what to expect.
855 */
856 int val, flags;
857 char buf[32];
858 int expected_id = -1;
859 int expected_clock = -1;
860 int uninitialized = 0;
861#ifdef NO_IO_SPACE
862 int expected_mapping = OPTION_MEMORY_MAPPED;
863#else
864 int expected_mapping = OPTION_IO_MAPPED;
865#endif
866 for (i=0;i<7;i++)
867 hostdata->valid_ids[i] = 1; /* Default all ID's to scan */
868
869 /* Parse commandline flags */
870 if (check_setup_strings("noasync",&flags,&val,buf))
871 {
872 hostdata->options |= OPTION_NO_ASYNC;
873 hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
874 }
875
876 if (check_setup_strings("nosync",&flags,&val,buf))
877 {
878 hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
879 }
880
881 if (check_setup_strings("nodisconnect",&flags,&val,buf))
882 hostdata->options &= ~OPTION_DISCONNECT;
883
884 if (check_setup_strings("validids",&flags,&val,buf))
885 {
886 for (i=0;i<7;i++)
887 hostdata->valid_ids[i] = val & (1<<i);
888 }
889
890 if ((i = check_setup_strings("next",&flags,&val,buf)))
891 {
892 while (i)
893 setup_used[--i] = 1;
894 }
895
896 if (check_setup_strings("opthi",&flags,&val,buf))
897 hostdata->options = (long long)val << 32;
898 if (check_setup_strings("optlo",&flags,&val,buf))
899 hostdata->options |= val;
900
901 NCR53c7x0_local_setup(host);
902 switch (hostdata->chip) {
903 case 710:
904 case 770:
905 hostdata->dstat_sir_intr = NCR53c7x0_dstat_sir_intr;
906 hostdata->init_save_regs = NULL;
907 hostdata->dsa_fixup = NCR53c7xx_dsa_fixup;
908 hostdata->init_fixup = NCR53c7x0_init_fixup;
909 hostdata->soft_reset = NCR53c7x0_soft_reset;
910 hostdata->run_tests = NCR53c7xx_run_tests;
911 expected_clock = hostdata->scsi_clock;
912 expected_id = 7;
913 break;
914 default:
915 printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
916 host->host_no, hostdata->chip);
917 scsi_unregister (host);
918 return -1;
919 }
920
921 /* Assign constants accessed by NCR */
922 hostdata->NCR53c7xx_zero = 0;
923 hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
924 hostdata->NCR53c7xx_msg_abort = ABORT;
925 hostdata->NCR53c7xx_msg_nop = NOP;
926 hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
927 if (expected_mapping == -1 ||
928 (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
929 (expected_mapping & OPTION_MEMORY_MAPPED))
930 printk ("scsi%d : using %s mapped access\n", host->host_no,
931 (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
932 "io");
933
934 hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
935 DMODE_REG_00 : DMODE_REG_10;
936 hostdata->istat = ((hostdata->chip / 100) == 8) ?
937 ISTAT_REG_800 : ISTAT_REG_700;
938
939/* We have to assume that this may be the first access to the chip, so
940 * we must set EA in DCNTL. */
941
942 NCR53c7x0_write8 (DCNTL_REG, DCNTL_10_EA|DCNTL_10_COM);
943
944
945/* Only the ISTAT register is readable when the NCR is running, so make
946 sure it's halted. */
947 ncr_halt(host);
948
949/*
950 * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
951 * as does the 710 with one bit per SCSI ID. Conversely, the NCR
952 * uses a normal, 3 bit binary representation of these values.
953 *
954 * Get the rest of the NCR documentation, and FIND OUT where the change
955 * was.
956 */
957
958#if 0
959 /* May not be able to do this - chip my not have been set up yet */
960 tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
961 for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
962#else
963 host->this_id = 7;
964#endif
965
966/*
967 * Note : we should never encounter a board setup for ID0. So,
968 * if we see ID0, assume that it was uninitialized and set it
969 * to the industry standard 7.
970 */
971 if (!host->this_id) {
972 printk("scsi%d : initiator ID was %d, changing to 7\n",
973 host->host_no, host->this_id);
974 host->this_id = 7;
975 hostdata->this_id_mask = 1 << 7;
976 uninitialized = 1;
977 };
978
979 if (expected_id == -1 || host->this_id != expected_id)
980 printk("scsi%d : using initiator ID %d\n", host->host_no,
981 host->this_id);
982
983 /*
984 * Save important registers to allow a soft reset.
985 */
986
987 /*
988 * CTEST7 controls cache snooping, burst mode, and support for
989 * external differential drivers. This isn't currently used - the
990 * default value may not be optimal anyway.
991 * Even worse, it may never have been set up since reset.
992 */
993 hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
994 revision = (NCR53c7x0_read8(CTEST8_REG) & 0xF0) >> 4;
995 switch (revision) {
996 case 1: revision = 0; break;
997 case 2: revision = 1; break;
998 case 4: revision = 2; break;
999 case 8: revision = 3; break;
1000 default: revision = 255; break;
1001 }
1002 printk("scsi%d: Revision 0x%x\n",host->host_no,revision);
1003
1004 if ((revision == 0 || revision == 255) && (hostdata->options & (OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS)))
1005 {
1006 printk ("scsi%d: Disabling sync working and disconnect/reselect\n",
1007 host->host_no);
1008 hostdata->options &= ~(OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS);
1009 }
1010
1011 /*
1012 * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
1013 * on 800 series chips, it allows for a totem-pole IRQ driver.
1014 * NOTE saved_dcntl currently overwritten in init function.
1015 * The value read here may be garbage anyway, MVME16x board at least
1016 * does not initialise chip if kernel arrived via tftp.
1017 */
1018
1019 hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
1020
1021 /*
1022 * DMODE controls DMA burst length, and on 700 series chips,
1023 * 286 mode and bus width
1024 * NOTE: On MVME16x, chip may have been reset, so this could be a
1025 * power-on/reset default value.
1026 */
1027 hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
1028
1029 /*
1030 * Now that burst length and enabled/disabled status is known,
1031 * clue the user in on it.
1032 */
1033
1034 ccf = clock_to_ccf_710 (expected_clock);
1035
1036 for (i = 0; i < 16; ++i)
1037 hostdata->cmd_allocated[i] = 0;
1038
1039 if (hostdata->init_save_regs)
1040 hostdata->init_save_regs (host);
1041 if (hostdata->init_fixup)
1042 hostdata->init_fixup (host);
1043
1044 if (!the_template) {
1045 the_template = host->hostt;
1046 first_host = host;
1047 }
1048
1049 /*
1050 * Linux SCSI drivers have always been plagued with initialization
1051 * problems - some didn't work with the BIOS disabled since they expected
1052 * initialization from it, some didn't work when the networking code
1053 * was enabled and registers got scrambled, etc.
1054 *
1055 * To avoid problems like this, in the future, we will do a soft
1056 * reset on the SCSI chip, taking it back to a sane state.
1057 */
1058
1059 hostdata->soft_reset (host);
1060
1061#if 1
1062 hostdata->debug_count_limit = -1;
1063#else
1064 hostdata->debug_count_limit = 1;
1065#endif
1066 hostdata->intrs = -1;
1067 hostdata->resets = -1;
1068 memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
1069 sizeof (hostdata->synchronous_want));
1070
1071 NCR53c7x0_driver_init (host);
1072
1073 if (request_irq(host->irq, NCR53c7x0_intr, IRQF_SHARED, "53c7xx", host))
1074 {
1075 printk("scsi%d : IRQ%d not free, detaching\n",
1076 host->host_no, host->irq);
1077 goto err_unregister;
1078 }
1079
1080 if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
1081 (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
1082 /* XXX Should disable interrupts, etc. here */
1083 goto err_free_irq;
1084 } else {
1085 if (host->io_port) {
1086 host->n_io_port = 128;
1087 if (!request_region (host->io_port, host->n_io_port, "ncr53c7xx"))
1088 goto err_free_irq;
1089 }
1090 }
1091
1092 if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
1093 printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
1094 hard_reset (host);
1095 }
1096 return 0;
1097
1098 err_free_irq:
1099 free_irq(host->irq, NCR53c7x0_intr);
1100 err_unregister:
1101 scsi_unregister(host);
1102 return -1;
1103}
1104
1105/*
1106 * Function : int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
1107 * unsigned long base, int io_port, int irq, int dma, long long options,
1108 * int clock);
1109 *
1110 * Purpose : initializes a NCR53c7,8x0 based on base addresses,
1111 * IRQ, and DMA channel.
1112 *
1113 * Inputs : tpnt - Template for this SCSI adapter, board - board level
1114 * product, chip - 710
1115 *
1116 * Returns : 0 on success, -1 on failure.
1117 *
1118 */
1119
1120int
1121ncr53c7xx_init (struct scsi_host_template *tpnt, int board, int chip,
1122 unsigned long base, int io_port, int irq, int dma,
1123 long long options, int clock)
1124{
1125 struct Scsi_Host *instance;
1126 struct NCR53c7x0_hostdata *hostdata;
1127 char chip_str[80];
1128 int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
1129 schedule_size = 0, ok = 0;
1130 void *tmp;
1131 unsigned long page;
1132
1133 switch (chip) {
1134 case 710:
1135 case 770:
1136 schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
1137 script_len = NCR53c7xx_script_len;
1138 dsa_len = NCR53c7xx_dsa_len;
1139 options |= OPTION_INTFLY;
1140 sprintf (chip_str, "NCR53c%d", chip);
1141 break;
1142 default:
1143 printk("scsi-ncr53c7xx : unsupported SCSI chip %d\n", chip);
1144 return -1;
1145 }
1146
1147 printk("scsi-ncr53c7xx : %s at memory 0x%lx, io 0x%x, irq %d",
1148 chip_str, base, io_port, irq);
1149 if (dma == DMA_NONE)
1150 printk("\n");
1151 else
1152 printk(", dma %d\n", dma);
1153
1154 if (options & OPTION_DEBUG_PROBE_ONLY) {
1155 printk ("scsi-ncr53c7xx : probe only enabled, aborting initialization\n");
1156 return -1;
1157 }
1158
1159 max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
1160 /* Size of dynamic part of command structure : */
1161 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
1162 ( 2 * /* Current instructions per scatter/gather segment */
1163 tpnt->sg_tablesize +
1164 3 /* Current startup / termination required per phase */
1165 ) *
1166 8 /* Each instruction is eight bytes */;
1167
1168 /* Allocate fixed part of hostdata, dynamic part to hold appropriate
1169 SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
1170
1171 We need a NCR53c7x0_cmd structure for scan_scsis() when we are
1172 not loaded as a module, and when we're loaded as a module, we
1173 can't use a non-dynamically allocated structure because modules
1174 are vmalloc()'d, which can allow structures to cross page
1175 boundaries and breaks our physical/virtual address assumptions
1176 for DMA.
1177
1178 So, we stick it past the end of our hostdata structure.
1179
1180 ASSUMPTION :
1181 Regardless of how many simultaneous SCSI commands we allow,
1182 the probe code only executes a _single_ instruction at a time,
1183 so we only need one here, and don't need to allocate NCR53c7x0_cmd
1184 structures for each target until we are no longer in scan_scsis
1185 and kmalloc() has become functional (memory_init() happens
1186 after all device driver initialization).
1187 */
1188
1189 size = sizeof(struct NCR53c7x0_hostdata) + script_len +
1190 /* Note that alignment will be guaranteed, since we put the command
1191 allocated at probe time after the fixed-up SCSI script, which
1192 consists of 32 bit words, aligned on a 32 bit boundary. But
1193 on a 64bit machine we need 8 byte alignment for hostdata->free, so
1194 we add in another 4 bytes to take care of potential misalignment
1195 */
1196 (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
1197
1198 page = __get_free_pages(GFP_ATOMIC,1);
1199 if(page==0)
1200 {
1201 printk(KERN_ERR "53c7xx: out of memory.\n");
1202 return -ENOMEM;
1203 }
1204#ifdef FORCE_DSA_ALIGNMENT
1205 /*
1206 * 53c710 rev.0 doesn't have an add-with-carry instruction.
1207 * Ensure we allocate enough memory to force DSA alignment.
1208 */
1209 size += 256;
1210#endif
1211 /* Size should be < 8K, so we can fit it in two pages. */
1212 if (size > 8192) {
1213 printk(KERN_ERR "53c7xx: hostdata > 8K\n");
1214 return -1;
1215 }
1216
1217 instance = scsi_register (tpnt, 4);
1218 if (!instance)
1219 {
1220 free_page(page);
1221 return -1;
1222 }
1223 instance->hostdata[0] = page;
1224 memset((void *)instance->hostdata[0], 0, 8192);
1225 cache_push(virt_to_phys((void *)(instance->hostdata[0])), 8192);
1226 cache_clear(virt_to_phys((void *)(instance->hostdata[0])), 8192);
1227 kernel_set_cachemode((void *)instance->hostdata[0], 8192, IOMAP_NOCACHE_SER);
1228
1229 /* FIXME : if we ever support an ISA NCR53c7xx based board, we
1230 need to check if the chip is running in a 16 bit mode, and if so
1231 unregister it if it is past the 16M (0x1000000) mark */
1232
1233 hostdata = (struct NCR53c7x0_hostdata *)instance->hostdata[0];
1234 hostdata->size = size;
1235 hostdata->script_count = script_len / sizeof(u32);
1236 hostdata->board = board;
1237 hostdata->chip = chip;
1238
1239 /*
1240 * Being memory mapped is more desirable, since
1241 *
1242 * - Memory accesses may be faster.
1243 *
1244 * - The destination and source address spaces are the same for
1245 * all instructions, meaning we don't have to twiddle dmode or
1246 * any other registers.
1247 *
1248 * So, we try for memory mapped, and if we don't get it,
1249 * we go for port mapped, and that failing we tell the user
1250 * it can't work.
1251 */
1252
1253 if (base) {
1254 instance->base = base;
1255 /* Check for forced I/O mapping */
1256 if (!(options & OPTION_IO_MAPPED)) {
1257 options |= OPTION_MEMORY_MAPPED;
1258 ok = 1;
1259 }
1260 } else {
1261 options &= ~OPTION_MEMORY_MAPPED;
1262 }
1263
1264 if (io_port) {
1265 instance->io_port = io_port;
1266 options |= OPTION_IO_MAPPED;
1267 ok = 1;
1268 } else {
1269 options &= ~OPTION_IO_MAPPED;
1270 }
1271
1272 if (!ok) {
1273 printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
1274 instance->host_no);
1275 scsi_unregister (instance);
1276 return -1;
1277 }
1278 instance->irq = irq;
1279 instance->dma_channel = dma;
1280
1281 hostdata->options = options;
1282 hostdata->dsa_len = dsa_len;
1283 hostdata->max_cmd_size = max_cmd_size;
1284 hostdata->num_cmds = 1;
1285 hostdata->scsi_clock = clock;
1286 /* Initialize single command */
1287 tmp = (hostdata->script + hostdata->script_count);
1288#ifdef FORCE_DSA_ALIGNMENT
1289 {
1290 void *t = ROUNDUP(tmp, void *);
1291 if (((u32)t & 0xff) > CmdPageStart)
1292 t = (void *)((u32)t + 255);
1293 t = (void *)(((u32)t & ~0xff) + CmdPageStart);
1294 hostdata->free = t;
1295#if 0
1296 printk ("scsi: Registered size increased by 256 to %d\n", size);
1297 printk ("scsi: CmdPageStart = 0x%02x\n", CmdPageStart);
1298 printk ("scsi: tmp = 0x%08x, hostdata->free set to 0x%08x\n",
1299 (u32)tmp, (u32)t);
1300#endif
1301 }
1302#else
1303 hostdata->free = ROUNDUP(tmp, void *);
1304#endif
1305 hostdata->free->real = tmp;
1306 hostdata->free->size = max_cmd_size;
1307 hostdata->free->free = NULL;
1308 hostdata->free->next = NULL;
1309 hostdata->extra_allocate = 0;
1310
1311 /* Allocate command start code space */
1312 hostdata->schedule = (chip == 700 || chip == 70066) ?
1313 NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
1314
1315/*
1316 * For diagnostic purposes, we don't really care how fast things blaze.
1317 * For profiling, we want to access the 800ns resolution system clock,
1318 * using a 'C' call on the host processor.
1319 *
1320 * Therefore, there's no need for the NCR chip to directly manipulate
1321 * this data, and we should put it wherever is most convenient for
1322 * Linux.
1323 */
1324 if (track_events)
1325 hostdata->events = (struct NCR53c7x0_event *) (track_events ?
1326 vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
1327 else
1328 hostdata->events = NULL;
1329
1330 if (hostdata->events) {
1331 memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
1332 track_events);
1333 hostdata->event_size = track_events;
1334 hostdata->event_index = 0;
1335 } else
1336 hostdata->event_size = 0;
1337
1338 return NCR53c7x0_init(instance);
1339}
1340
1341
1342/*
1343 * Function : static void NCR53c7x0_init_fixup (struct Scsi_Host *host)
1344 *
1345 * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
1346 *
1347 * Inputs : host - pointer to this host adapter's structure
1348 *
1349 */
1350
1351static void
1352NCR53c7x0_init_fixup (struct Scsi_Host *host) {
1353 NCR53c7x0_local_declare();
1354 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1355 host->hostdata[0];
1356 unsigned char tmp;
1357 int i, ncr_to_memory, memory_to_ncr;
1358 u32 base;
1359 NCR53c7x0_local_setup(host);
1360
1361
1362 /* XXX - NOTE : this code MUST be made endian aware */
1363 /* Copy code into buffer that was allocated at detection time. */
1364 memcpy ((void *) hostdata->script, (void *) SCRIPT,
1365 sizeof(SCRIPT));
1366 /* Fixup labels */
1367 for (i = 0; i < PATCHES; ++i)
1368 hostdata->script[LABELPATCHES[i]] +=
1369 virt_to_bus(hostdata->script);
1370 /* Fixup addresses of constants that used to be EXTERNAL */
1371
1372 patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
1373 virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
1374 patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
1375 virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
1376 patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
1377 virt_to_bus(&(hostdata->NCR53c7xx_zero)));
1378 patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
1379 virt_to_bus(&(hostdata->NCR53c7xx_sink)));
1380 patch_abs_32 (hostdata->script, 0, NOP_insn,
1381 virt_to_bus(&(hostdata->NOP_insn)));
1382 patch_abs_32 (hostdata->script, 0, schedule,
1383 virt_to_bus((void *) hostdata->schedule));
1384
1385 /* Fixup references to external variables: */
1386 for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
1387 hostdata->script[EXTERNAL_PATCHES[i].offset] +=
1388 virt_to_bus(EXTERNAL_PATCHES[i].address);
1389
1390 /*
1391 * Fixup absolutes set at boot-time.
1392 *
1393 * All non-code absolute variables suffixed with "dsa_" and "int_"
1394 * are constants, and need no fixup provided the assembler has done
1395 * it for us (I don't know what the "real" NCR assembler does in
1396 * this case, my assembler does the right magic).
1397 */
1398
1399 patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
1400 Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
1401 patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
1402 Ent_dsa_code_restore_pointers - Ent_dsa_zero);
1403 patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
1404 Ent_dsa_code_check_reselect - Ent_dsa_zero);
1405
1406 /*
1407 * Just for the hell of it, preserve the settings of
1408 * Burst Length and Enable Read Line bits from the DMODE
1409 * register. Make sure SCRIPTS start automagically.
1410 */
1411
1412#if defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000)
1413 /* We know better what we want than 16xBug does! */
1414 tmp = DMODE_10_BL_8 | DMODE_10_FC2;
1415#else
1416 tmp = NCR53c7x0_read8(DMODE_REG_10);
1417 tmp &= (DMODE_BL_MASK | DMODE_10_FC2 | DMODE_10_FC1 | DMODE_710_PD |
1418 DMODE_710_UO);
1419#endif
1420
1421 if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
1422 base = (u32) host->io_port;
1423 memory_to_ncr = tmp|DMODE_800_DIOM;
1424 ncr_to_memory = tmp|DMODE_800_SIOM;
1425 } else {
1426 base = virt_to_bus((void *)host->base);
1427 memory_to_ncr = ncr_to_memory = tmp;
1428 }
1429
1430 /* SCRATCHB_REG_10 == SCRATCHA_REG_800, as it happens */
1431 patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
1432 patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
1433 patch_abs_32 (hostdata->script, 0, addr_dsa, base + DSA_REG);
1434
1435 /*
1436 * I needed some variables in the script to be accessible to
1437 * both the NCR chip and the host processor. For these variables,
1438 * I made the arbitrary decision to store them directly in the
1439 * hostdata structure rather than in the RELATIVE area of the
1440 * SCRIPTS.
1441 */
1442
1443
1444 patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
1445 patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
1446 patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
1447
1448 patch_abs_32 (hostdata->script, 0, msg_buf,
1449 virt_to_bus((void *)&(hostdata->msg_buf)));
1450 patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
1451 virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
1452 patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
1453 virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
1454 patch_abs_32 (hostdata->script, 0, reselected_identify,
1455 virt_to_bus((void *)&(hostdata->reselected_identify)));
1456/* reselected_tag is currently unused */
1457#if 0
1458 patch_abs_32 (hostdata->script, 0, reselected_tag,
1459 virt_to_bus((void *)&(hostdata->reselected_tag)));
1460#endif
1461
1462 patch_abs_32 (hostdata->script, 0, test_dest,
1463 virt_to_bus((void*)&hostdata->test_dest));
1464 patch_abs_32 (hostdata->script, 0, test_src,
1465 virt_to_bus(&hostdata->test_source));
1466 patch_abs_32 (hostdata->script, 0, saved_dsa,
1467 virt_to_bus((void *)&hostdata->saved2_dsa));
1468 patch_abs_32 (hostdata->script, 0, emulfly,
1469 virt_to_bus((void *)&hostdata->emulated_intfly));
1470
1471 patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
1472 (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
1473
1474/* These are for event logging; the ncr_event enum contains the
1475 actual interrupt numbers. */
1476#ifdef A_int_EVENT_SELECT
1477 patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
1478#endif
1479#ifdef A_int_EVENT_DISCONNECT
1480 patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
1481#endif
1482#ifdef A_int_EVENT_RESELECT
1483 patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
1484#endif
1485#ifdef A_int_EVENT_COMPLETE
1486 patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
1487#endif
1488#ifdef A_int_EVENT_IDLE
1489 patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
1490#endif
1491#ifdef A_int_EVENT_SELECT_FAILED
1492 patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
1493 (u32) EVENT_SELECT_FAILED);
1494#endif
1495#ifdef A_int_EVENT_BEFORE_SELECT
1496 patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
1497 (u32) EVENT_BEFORE_SELECT);
1498#endif
1499#ifdef A_int_EVENT_RESELECT_FAILED
1500 patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
1501 (u32) EVENT_RESELECT_FAILED);
1502#endif
1503
1504 /*
1505 * Make sure the NCR and Linux code agree on the location of
1506 * certain fields.
1507 */
1508
1509 hostdata->E_accept_message = Ent_accept_message;
1510 hostdata->E_command_complete = Ent_command_complete;
1511 hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
1512 hostdata->E_data_transfer = Ent_data_transfer;
1513 hostdata->E_debug_break = Ent_debug_break;
1514 hostdata->E_dsa_code_template = Ent_dsa_code_template;
1515 hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
1516 hostdata->E_end_data_transfer = Ent_end_data_transfer;
1517 hostdata->E_initiator_abort = Ent_initiator_abort;
1518 hostdata->E_msg_in = Ent_msg_in;
1519 hostdata->E_other_transfer = Ent_other_transfer;
1520 hostdata->E_other_in = Ent_other_in;
1521 hostdata->E_other_out = Ent_other_out;
1522 hostdata->E_reject_message = Ent_reject_message;
1523 hostdata->E_respond_message = Ent_respond_message;
1524 hostdata->E_select = Ent_select;
1525 hostdata->E_select_msgout = Ent_select_msgout;
1526 hostdata->E_target_abort = Ent_target_abort;
1527#ifdef Ent_test_0
1528 hostdata->E_test_0 = Ent_test_0;
1529#endif
1530 hostdata->E_test_1 = Ent_test_1;
1531 hostdata->E_test_2 = Ent_test_2;
1532#ifdef Ent_test_3
1533 hostdata->E_test_3 = Ent_test_3;
1534#endif
1535 hostdata->E_wait_reselect = Ent_wait_reselect;
1536 hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
1537
1538 hostdata->dsa_cmdout = A_dsa_cmdout;
1539 hostdata->dsa_cmnd = A_dsa_cmnd;
1540 hostdata->dsa_datain = A_dsa_datain;
1541 hostdata->dsa_dataout = A_dsa_dataout;
1542 hostdata->dsa_end = A_dsa_end;
1543 hostdata->dsa_msgin = A_dsa_msgin;
1544 hostdata->dsa_msgout = A_dsa_msgout;
1545 hostdata->dsa_msgout_other = A_dsa_msgout_other;
1546 hostdata->dsa_next = A_dsa_next;
1547 hostdata->dsa_select = A_dsa_select;
1548 hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
1549 hostdata->dsa_status = A_dsa_status;
1550 hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
1551 8 /* destination operand */;
1552
1553 /* sanity check */
1554 if (A_dsa_fields_start != Ent_dsa_code_template_end -
1555 Ent_dsa_zero)
1556 printk("scsi%d : NCR dsa_fields start is %d not %d\n",
1557 host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
1558 Ent_dsa_zero);
1559
1560 printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
1561 virt_to_bus(hostdata->script), hostdata->script);
1562}
1563
1564/*
1565 * Function : static int NCR53c7xx_run_tests (struct Scsi_Host *host)
1566 *
1567 * Purpose : run various verification tests on the NCR chip,
1568 * including interrupt generation, and proper bus mastering
1569 * operation.
1570 *
1571 * Inputs : host - a properly initialized Scsi_Host structure
1572 *
1573 * Preconditions : the NCR chip must be in a halted state.
1574 *
1575 * Returns : 0 if all tests were successful, -1 on error.
1576 *
1577 */
1578
1579static int
1580NCR53c7xx_run_tests (struct Scsi_Host *host) {
1581 NCR53c7x0_local_declare();
1582 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1583 host->hostdata[0];
1584 unsigned long timeout;
1585 u32 start;
1586 int failed, i;
1587 unsigned long flags;
1588 NCR53c7x0_local_setup(host);
1589
1590 /* The NCR chip _must_ be idle to run the test scripts */
1591
1592 local_irq_save(flags);
1593 if (!hostdata->idle) {
1594 printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
1595 local_irq_restore(flags);
1596 return -1;
1597 }
1598
1599 /*
1600 * Check for functional interrupts, this could work as an
1601 * autoprobe routine.
1602 */
1603
1604 if ((hostdata->options & OPTION_DEBUG_TEST1) &&
1605 hostdata->state != STATE_DISABLED) {
1606 hostdata->idle = 0;
1607 hostdata->test_running = 1;
1608 hostdata->test_completed = -1;
1609 hostdata->test_dest = 0;
1610 hostdata->test_source = 0xdeadbeef;
1611 start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
1612 hostdata->state = STATE_RUNNING;
1613 printk ("scsi%d : test 1", host->host_no);
1614 NCR53c7x0_write32 (DSP_REG, start);
1615 if (hostdata->options & OPTION_DEBUG_TRACE)
1616 NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM |
1617 DCNTL_STD);
1618 printk (" started\n");
1619 local_irq_restore(flags);
1620
1621 /*
1622 * This is currently a .5 second timeout, since (in theory) no slow
1623 * board will take that long. In practice, we've seen one
1624 * pentium which occassionally fails with this, but works with
1625 * 10 times as much?
1626 */
1627
1628 timeout = jiffies + 5 * HZ / 10;
1629 while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
1630 barrier();
1631
1632 failed = 1;
1633 if (hostdata->test_completed == -1)
1634 printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
1635 (hostdata->test_dest == 0xdeadbeef) ?
1636 " due to lost interrupt.\n"
1637 " Please verify that the correct IRQ is being used for your board,\n"
1638 : "");
1639 else if (hostdata->test_completed != 1)
1640 printk ("scsi%d : test 1 bad interrupt value (%d)\n",
1641 host->host_no, hostdata->test_completed);
1642 else
1643 failed = (hostdata->test_dest != 0xdeadbeef);
1644
1645 if (hostdata->test_dest != 0xdeadbeef) {
1646 printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
1647 " probable cache invalidation problem. Please configure caching\n"
1648 " as write-through or disabled\n",
1649 host->host_no, hostdata->test_dest);
1650 }
1651
1652 if (failed) {
1653 printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
1654 host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
1655 hostdata->script, start);
1656 printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
1657 NCR53c7x0_read32(DSPS_REG));
1658 local_irq_restore(flags);
1659 return -1;
1660 }
1661 hostdata->test_running = 0;
1662 }
1663
1664 if ((hostdata->options & OPTION_DEBUG_TEST2) &&
1665 hostdata->state != STATE_DISABLED) {
1666 u32 dsa[48];
1667 unsigned char identify = IDENTIFY(0, 0);
1668 unsigned char cmd[6];
1669 unsigned char data[36];
1670 unsigned char status = 0xff;
1671 unsigned char msg = 0xff;
1672
1673 cmd[0] = INQUIRY;
1674 cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
1675 cmd[4] = sizeof(data);
1676
1677 dsa[2] = 1;
1678 dsa[3] = virt_to_bus(&identify);
1679 dsa[4] = 6;
1680 dsa[5] = virt_to_bus(&cmd);
1681 dsa[6] = sizeof(data);
1682 dsa[7] = virt_to_bus(&data);
1683 dsa[8] = 1;
1684 dsa[9] = virt_to_bus(&status);
1685 dsa[10] = 1;
1686 dsa[11] = virt_to_bus(&msg);
1687
1688 for (i = 0; i < 6; ++i) {
1689#ifdef VALID_IDS
1690 if (!hostdata->valid_ids[i])
1691 continue;
1692#endif
1693 local_irq_disable();
1694 if (!hostdata->idle) {
1695 printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
1696 local_irq_restore(flags);
1697 return -1;
1698 }
1699
1700 /* 710: bit mapped scsi ID, async */
1701 dsa[0] = (1 << i) << 16;
1702 hostdata->idle = 0;
1703 hostdata->test_running = 2;
1704 hostdata->test_completed = -1;
1705 start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
1706 hostdata->state = STATE_RUNNING;
1707 NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
1708 NCR53c7x0_write32 (DSP_REG, start);
1709 if (hostdata->options & OPTION_DEBUG_TRACE)
1710 NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
1711 DCNTL_SSM | DCNTL_STD);
1712 local_irq_restore(flags);
1713
1714 timeout = jiffies + 5 * HZ; /* arbitrary */
1715 while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
1716 barrier();
1717
1718 NCR53c7x0_write32 (DSA_REG, 0);
1719
1720 if (hostdata->test_completed == 2) {
1721 data[35] = 0;
1722 printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
1723 host->host_no, i, data + 8);
1724 printk ("scsi%d : status ", host->host_no);
1725 scsi_print_status (status);
1726 printk ("\nscsi%d : message ", host->host_no);
1727 spi_print_msg(&msg);
1728 printk ("\n");
1729 } else if (hostdata->test_completed == 3) {
1730 printk("scsi%d : test 2 no connection with target %d\n",
1731 host->host_no, i);
1732 if (!hostdata->idle) {
1733 printk("scsi%d : not idle\n", host->host_no);
1734 local_irq_restore(flags);
1735 return -1;
1736 }
1737 } else if (hostdata->test_completed == -1) {
1738 printk ("scsi%d : test 2 timed out\n", host->host_no);
1739 local_irq_restore(flags);
1740 return -1;
1741 }
1742 hostdata->test_running = 0;
1743 }
1744 }
1745
1746 local_irq_restore(flags);
1747 return 0;
1748}
1749
1750/*
1751 * Function : static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
1752 *
1753 * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
1754 * performing all necessary relocation.
1755 *
1756 * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
1757 * enough to hold the NCR53c8xx dsa.
1758 */
1759
1760static void
1761NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
1762 Scsi_Cmnd *c = cmd->cmd;
1763 struct Scsi_Host *host = c->device->host;
1764 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1765 host->hostdata[0];
1766 int i;
1767
1768 memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
1769 hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
1770
1771 /*
1772 * Note : within the NCR 'C' code, dsa points to the _start_
1773 * of the DSA structure, and _not_ the offset of dsa_zero within
1774 * that structure used to facilitate shorter signed offsets
1775 * for the 8 bit ALU.
1776 *
1777 * The implications of this are that
1778 *
1779 * - 32 bit A_dsa_* absolute values require an additional
1780 * dsa_zero added to their value to be correct, since they are
1781 * relative to dsa_zero which is in essentially a separate
1782 * space from the code symbols.
1783 *
1784 * - All other symbols require no special treatment.
1785 */
1786
1787 patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1788 dsa_temp_lun, c->device->lun);
1789 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1790 dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
1791 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1792 dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
1793 Ent_dsa_code_template + A_dsa_next);
1794 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1795 dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->device->id].script));
1796 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1797 dsa_sscf_710, virt_to_bus((void *)&hostdata->sync[c->device->id].sscf_710));
1798 patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1799 dsa_temp_target, 1 << c->device->id);
1800 /* XXX - new pointer stuff */
1801 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1802 dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
1803 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1804 dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
1805 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1806 dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
1807
1808 /* XXX - new start stuff */
1809
1810 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1811 dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
1812}
1813
1814/*
1815 * Function : run_process_issue_queue (void)
1816 *
1817 * Purpose : insure that the coroutine is running and will process our
1818 * request. process_issue_queue_running is checked/set here (in an
1819 * inline function) rather than in process_issue_queue itself to reduce
1820 * the chances of stack overflow.
1821 *
1822 */
1823
1824static volatile int process_issue_queue_running = 0;
1825
1826static __inline__ void
1827run_process_issue_queue(void) {
1828 unsigned long flags;
1829 local_irq_save(flags);
1830 if (!process_issue_queue_running) {
1831 process_issue_queue_running = 1;
1832 process_issue_queue(flags);
1833 /*
1834 * process_issue_queue_running is cleared in process_issue_queue
1835 * once it can't do more work, and process_issue_queue exits with
1836 * interrupts disabled.
1837 */
1838 }
1839 local_irq_restore(flags);
1840}
1841
1842/*
1843 * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
1844 * result)
1845 *
1846 * Purpose : mark SCSI command as finished, OR'ing the host portion
1847 * of the result word into the result field of the corresponding
1848 * Scsi_Cmnd structure, and removing it from the internal queues.
1849 *
1850 * Inputs : cmd - command, result - entire result field
1851 *
1852 * Preconditions : the NCR chip should be in a halted state when
1853 * abnormal_finished is run, since it modifies structures which
1854 * the NCR expects to have exclusive access to.
1855 */
1856
1857static void
1858abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
1859 Scsi_Cmnd *c = cmd->cmd;
1860 struct Scsi_Host *host = c->device->host;
1861 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1862 host->hostdata[0];
1863 unsigned long flags;
1864 int left, found;
1865 volatile struct NCR53c7x0_cmd * linux_search;
1866 volatile struct NCR53c7x0_cmd * volatile *linux_prev;
1867 volatile u32 *ncr_prev, *ncrcurrent, ncr_search;
1868
1869#if 0
1870 printk ("scsi%d: abnormal finished\n", host->host_no);
1871#endif
1872
1873 local_irq_save(flags);
1874 found = 0;
1875 /*
1876 * Traverse the NCR issue array until we find a match or run out
1877 * of instructions. Instructions in the NCR issue array are
1878 * either JUMP or NOP instructions, which are 2 words in length.
1879 */
1880
1881
1882 for (found = 0, left = host->can_queue, ncrcurrent = hostdata->schedule;
1883 left > 0; --left, ncrcurrent += 2)
1884 {
1885 if (issue_to_cmd (host, hostdata, (u32 *) ncrcurrent) == cmd)
1886 {
1887 ncrcurrent[0] = hostdata->NOP_insn;
1888 ncrcurrent[1] = 0xdeadbeef;
1889 ++found;
1890 break;
1891 }
1892 }
1893
1894 /*
1895 * Traverse the NCR reconnect list of DSA structures until we find
1896 * a pointer to this dsa or have found too many command structures.
1897 * We let prev point at the next field of the previous element or
1898 * head of the list, so we don't do anything different for removing
1899 * the head element.
1900 */
1901
1902 for (left = host->can_queue,
1903 ncr_search = hostdata->reconnect_dsa_head,
1904 ncr_prev = &hostdata->reconnect_dsa_head;
1905 left >= 0 && ncr_search &&
1906 ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
1907 != (char *) cmd->dsa;
1908 ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
1909 hostdata->dsa_next), ncr_search = *ncr_prev, --left);
1910
1911 if (left < 0)
1912 printk("scsi%d: loop detected in ncr reconncect list\n",
1913 host->host_no);
1914 else if (ncr_search) {
1915 if (found)
1916 printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
1917 host->host_no, c->pid);
1918 else {
1919 volatile u32 * next = (u32 *)
1920 ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
1921 *ncr_prev = *next;
1922/* If we're at the tail end of the issue queue, update that pointer too. */
1923 found = 1;
1924 }
1925 }
1926
1927 /*
1928 * Traverse the host running list until we find this command or discover
1929 * we have too many elements, pointing linux_prev at the next field of the
1930 * linux_previous element or head of the list, search at this element.
1931 */
1932
1933 for (left = host->can_queue, linux_search = hostdata->running_list,
1934 linux_prev = &hostdata->running_list;
1935 left >= 0 && linux_search && linux_search != cmd;
1936 linux_prev = &(linux_search->next),
1937 linux_search = linux_search->next, --left);
1938
1939 if (left < 0)
1940 printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
1941 host->host_no, c->pid);
1942 else if (linux_search) {
1943 *linux_prev = linux_search->next;
1944 --hostdata->busy[c->device->id][c->device->lun];
1945 }
1946
1947 /* Return the NCR command structure to the free list */
1948 cmd->next = hostdata->free;
1949 hostdata->free = cmd;
1950 c->host_scribble = NULL;
1951
1952 /* And return */
1953 c->result = result;
1954 c->scsi_done(c);
1955
1956 local_irq_restore(flags);
1957 run_process_issue_queue();
1958}
1959
1960/*
1961 * Function : static void intr_break (struct Scsi_Host *host,
1962 * struct NCR53c7x0_cmd *cmd)
1963 *
1964 * Purpose : Handler for breakpoint interrupts from a SCSI script
1965 *
1966 * Inputs : host - pointer to this host adapter's structure,
1967 * cmd - pointer to the command (if any) dsa was pointing
1968 * to.
1969 *
1970 */
1971
1972static void
1973intr_break (struct Scsi_Host *host, struct
1974 NCR53c7x0_cmd *cmd) {
1975 NCR53c7x0_local_declare();
1976 struct NCR53c7x0_break *bp;
1977#if 0
1978 Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
1979#endif
1980 u32 *dsp;
1981 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1982 host->hostdata[0];
1983 unsigned long flags;
1984 NCR53c7x0_local_setup(host);
1985
1986 /*
1987 * Find the break point corresponding to this address, and
1988 * dump the appropriate debugging information to standard
1989 * output.
1990 */
1991 local_irq_save(flags);
1992 dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
1993 for (bp = hostdata->breakpoints; bp && bp->address != dsp;
1994 bp = bp->next);
1995 if (!bp)
1996 panic("scsi%d : break point interrupt from %p with no breakpoint!",
1997 host->host_no, dsp);
1998
1999 /*
2000 * Configure the NCR chip for manual start mode, so that we can
2001 * point the DSP register at the instruction that follows the
2002 * INT int_debug_break instruction.
2003 */
2004
2005 NCR53c7x0_write8 (hostdata->dmode,
2006 NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
2007
2008 /*
2009 * And update the DSP register, using the size of the old
2010 * instruction in bytes.
2011 */
2012
2013 local_irq_restore(flags);
2014}
2015/*
2016 * Function : static void print_synchronous (const char *prefix,
2017 * const unsigned char *msg)
2018 *
2019 * Purpose : print a pretty, user and machine parsable representation
2020 * of a SDTR message, including the "real" parameters, data
2021 * clock so we can tell transfer rate at a glance.
2022 *
2023 * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
2024 */
2025
2026static void
2027print_synchronous (const char *prefix, const unsigned char *msg) {
2028 if (msg[4]) {
2029 int Hz = 1000000000 / (msg[3] * 4);
2030 int integer = Hz / 1000000;
2031 int fraction = (Hz - (integer * 1000000)) / 10000;
2032 printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
2033 prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
2034 (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
2035 (((msg[3] * 4) < 200) ? "-II" : ""));
2036 } else
2037 printk ("%sasynchronous SCSI\n", prefix);
2038}
2039
2040/*
2041 * Function : static void set_synchronous (struct Scsi_Host *host,
2042 * int target, int sxfer, int scntl3, int now_connected)
2043 *
2044 * Purpose : reprogram transfers between the selected SCSI initiator and
2045 * target with the given register values; in the indirect
2046 * select operand, reselection script, and chip registers.
2047 *
2048 * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
2049 * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
2050 * we should reprogram the registers now too.
2051 *
2052 * NOTE: For 53c710, scntl3 is actually used for SCF bits from
2053 * SBCL, as we don't have a SCNTL3.
2054 */
2055
2056static void
2057set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
2058 int now_connected) {
2059 NCR53c7x0_local_declare();
2060 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2061 host->hostdata[0];
2062 u32 *script;
2063 NCR53c7x0_local_setup(host);
2064
2065 /* These are eight bit registers */
2066 sxfer &= 0xff;
2067 scntl3 &= 0xff;
2068
2069 hostdata->sync[target].sxfer_sanity = sxfer;
2070 hostdata->sync[target].scntl3_sanity = scntl3;
2071
2072/*
2073 * HARD CODED : synchronous script is EIGHT words long. This
2074 * must agree with 53c7.8xx.h
2075 */
2076
2077 if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
2078 hostdata->sync[target].select_indirect = (1 << target) << 16 |
2079 (sxfer << 8);
2080 hostdata->sync[target].sscf_710 = scntl3;
2081
2082 script = (u32 *) hostdata->sync[target].script;
2083
2084 /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
2085 script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
2086 DCMD_RWRI_OP_MOVE) << 24) |
2087 (SBCL_REG << 16) | (scntl3 << 8);
2088 script[1] = 0;
2089 script += 2;
2090
2091 script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
2092 DCMD_RWRI_OP_MOVE) << 24) |
2093 (SXFER_REG << 16) | (sxfer << 8);
2094 script[1] = 0;
2095 script += 2;
2096
2097#ifdef DEBUG_SYNC_INTR
2098 if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
2099 script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
2100 script[1] = DEBUG_SYNC_INTR;
2101 script += 2;
2102 }
2103#endif
2104
2105 script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
2106 script[1] = 0;
2107 script += 2;
2108 }
2109
2110 if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
2111 printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
2112 host->host_no, target, sxfer, scntl3);
2113
2114 if (now_connected) {
2115 NCR53c7x0_write8(SBCL_REG, scntl3);
2116 NCR53c7x0_write8(SXFER_REG, sxfer);
2117 }
2118}
2119
2120
2121/*
2122 * Function : static int asynchronous (struct Scsi_Host *host, int target)
2123 *
2124 * Purpose : reprogram between the selected SCSI Host adapter and target
2125 * (assumed to be currently connected) for asynchronous transfers.
2126 *
2127 * Inputs : host - SCSI host structure, target - numeric target ID.
2128 *
2129 * Preconditions : the NCR chip should be in one of the halted states
2130 */
2131
2132static void
2133asynchronous (struct Scsi_Host *host, int target) {
2134 NCR53c7x0_local_declare();
2135 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2136 host->hostdata[0];
2137 NCR53c7x0_local_setup(host);
2138 set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
2139 1);
2140 printk ("scsi%d : setting target %d to asynchronous SCSI\n",
2141 host->host_no, target);
2142}
2143
2144/*
2145 * XXX - do we want to go out of our way (ie, add extra code to selection
2146 * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
2147 * conversion bits, or can we be content in just setting the
2148 * sxfer bits? I chose to do so [richard@sleepie.demon.co.uk]
2149 */
2150
2151/* Table for NCR53c8xx synchronous values */
2152
2153/* This table is also correct for 710, allowing that scf=4 is equivalent
2154 * of SSCF=0 (ie use DCNTL, divide by 3) for a 50.01-66.00MHz clock.
2155 * For any other clock values, we cannot use entries with SCF values of
2156 * 4. I guess that for a 66MHz clock, the slowest it will set is 2MHz,
2157 * and for a 50MHz clock, the slowest will be 2.27Mhz. Should check
2158 * that a device doesn't try and negotiate sync below these limits!
2159 */
2160
2161static const struct {
2162 int div; /* Total clock divisor * 10 */
2163 unsigned char scf; /* */
2164 unsigned char tp; /* 4 + tp = xferp divisor */
2165} syncs[] = {
2166/* div scf tp div scf tp div scf tp */
2167 { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
2168 { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
2169 { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
2170 { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
2171 { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
2172 { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
2173 { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
2174 { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
2175};
2176
2177/*
2178 * Function : static void synchronous (struct Scsi_Host *host, int target,
2179 * char *msg)
2180 *
2181 * Purpose : reprogram transfers between the selected SCSI initiator and
2182 * target for synchronous SCSI transfers such that the synchronous
2183 * offset is less than that requested and period at least as long
2184 * as that requested. Also modify *msg such that it contains
2185 * an appropriate response.
2186 *
2187 * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
2188 * msg - synchronous transfer request.
2189 */
2190
2191
2192static void
2193synchronous (struct Scsi_Host *host, int target, char *msg) {
2194 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2195 host->hostdata[0];
2196 int desire, divisor, i, limit;
2197 unsigned char scntl3, sxfer;
2198/* The diagnostic message fits on one line, even with max. width integers */
2199 char buf[80];
2200
2201/* Desired transfer clock in Hz */
2202 desire = 1000000000L / (msg[3] * 4);
2203/* Scale the available SCSI clock by 10 so we get tenths */
2204 divisor = (hostdata->scsi_clock * 10) / desire;
2205
2206/* NCR chips can handle at most an offset of 8 */
2207 if (msg[4] > 8)
2208 msg[4] = 8;
2209
2210 if (hostdata->options & OPTION_DEBUG_SDTR)
2211 printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
2212 host->host_no, divisor / 10, divisor % 10);
2213
2214 limit = ARRAY_SIZE(syncs) - 1;
2215 for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
2216
2217 if (hostdata->options & OPTION_DEBUG_SDTR)
2218 printk("scsi%d : selected synchronous divisor of %d.%01d\n",
2219 host->host_no, syncs[i].div / 10, syncs[i].div % 10);
2220
2221 msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
2222
2223 if (hostdata->options & OPTION_DEBUG_SDTR)
2224 printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
2225 msg[3] * 4);
2226
2227 scntl3 = syncs[i].scf;
2228 sxfer = (msg[4] << SXFER_MO_SHIFT) | (syncs[i].tp << 4);
2229 if (hostdata->options & OPTION_DEBUG_SDTR)
2230 printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
2231 host->host_no, (int) sxfer, (int) scntl3);
2232 set_synchronous (host, target, sxfer, scntl3, 1);
2233 sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
2234 print_synchronous (buf, msg);
2235}
2236
2237/*
2238 * Function : static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host,
2239 * struct NCR53c7x0_cmd *cmd)
2240 *
2241 * Purpose : Handler for INT generated instructions for the
2242 * NCR53c810/820 SCSI SCRIPT
2243 *
2244 * Inputs : host - pointer to this host adapter's structure,
2245 * cmd - pointer to the command (if any) dsa was pointing
2246 * to.
2247 *
2248 */
2249
2250static int
2251NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
2252 NCR53c7x0_cmd *cmd) {
2253 NCR53c7x0_local_declare();
2254 int print;
2255 Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
2256 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2257 host->hostdata[0];
2258 u32 dsps,*dsp; /* Argument of the INT instruction */
2259
2260 NCR53c7x0_local_setup(host);
2261 dsps = NCR53c7x0_read32(DSPS_REG);
2262 dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
2263
2264 /* RGH 150597: Frig. Commands which fail with Check Condition are
2265 * Flagged as successful - hack dsps to indicate check condition */
2266#if 0
2267 /* RGH 200597: Need to disable for BVME6000, as it gets Check Conditions
2268 * and then dies. Seems to handle Check Condition at startup, but
2269 * not mid kernel build. */
2270 if (dsps == A_int_norm_emulateintfly && cmd && cmd->result == 2)
2271 dsps = A_int_err_check_condition;
2272#endif
2273
2274 if (hostdata->options & OPTION_DEBUG_INTR)
2275 printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
2276
2277 switch (dsps) {
2278 case A_int_msg_1:
2279 print = 1;
2280 switch (hostdata->msg_buf[0]) {
2281 /*
2282 * Unless we've initiated synchronous negotiation, I don't
2283 * think that this should happen.
2284 */
2285 case MESSAGE_REJECT:
2286 hostdata->dsp = hostdata->script + hostdata->E_accept_message /
2287 sizeof(u32);
2288 hostdata->dsp_changed = 1;
2289 if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
2290 printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
2291 c->device->id);
2292 cmd->flags &= ~CMD_FLAG_SDTR;
2293 asynchronous (host, c->device->id);
2294 print = 0;
2295 }
2296 break;
2297 case INITIATE_RECOVERY:
2298 printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
2299 host->host_no);
2300 /* Fall through to default */
2301 hostdata->dsp = hostdata->script + hostdata->E_reject_message /
2302 sizeof(u32);
2303 hostdata->dsp_changed = 1;
2304 break;
2305 default:
2306 printk ("scsi%d : unsupported message, rejecting\n",
2307 host->host_no);
2308 hostdata->dsp = hostdata->script + hostdata->E_reject_message /
2309 sizeof(u32);
2310 hostdata->dsp_changed = 1;
2311 }
2312 if (print) {
2313 printk ("scsi%d : received message", host->host_no);
2314 if (c)
2315 printk (" from target %d lun %d ", c->device->id, c->device->lun);
2316 spi_print_msg((unsigned char *) hostdata->msg_buf);
2317 printk("\n");
2318 }
2319
2320 return SPECIFIC_INT_NOTHING;
2321
2322
2323 case A_int_msg_sdtr:
2324/*
2325 * At this point, hostdata->msg_buf contains
2326 * 0 EXTENDED MESSAGE
2327 * 1 length
2328 * 2 SDTR
2329 * 3 period * 4ns
2330 * 4 offset
2331 */
2332
2333 if (cmd) {
2334 char buf[80];
2335 sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->device->id,
2336 (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
2337 print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
2338
2339 /*
2340 * Initiator initiated, won't happen unless synchronous
2341 * transfers are enabled. If we get a SDTR message in
2342 * response to our SDTR, we should program our parameters
2343 * such that
2344 * offset <= requested offset
2345 * period >= requested period
2346 */
2347 if (cmd->flags & CMD_FLAG_SDTR) {
2348 cmd->flags &= ~CMD_FLAG_SDTR;
2349 if (hostdata->msg_buf[4])
2350 synchronous (host, c->device->id, (unsigned char *)
2351 hostdata->msg_buf);
2352 else
2353 asynchronous (host, c->device->id);
2354 hostdata->dsp = hostdata->script + hostdata->E_accept_message /
2355 sizeof(u32);
2356 hostdata->dsp_changed = 1;
2357 return SPECIFIC_INT_NOTHING;
2358 } else {
2359 if (hostdata->options & OPTION_SYNCHRONOUS) {
2360 cmd->flags |= CMD_FLAG_DID_SDTR;
2361 synchronous (host, c->device->id, (unsigned char *)
2362 hostdata->msg_buf);
2363 } else {
2364 hostdata->msg_buf[4] = 0; /* 0 offset = async */
2365 asynchronous (host, c->device->id);
2366 }
2367 patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
2368 patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
2369 virt_to_bus ((void *)&hostdata->msg_buf));
2370 hostdata->dsp = hostdata->script +
2371 hostdata->E_respond_message / sizeof(u32);
2372 hostdata->dsp_changed = 1;
2373 }
2374 return SPECIFIC_INT_NOTHING;
2375 }
2376 /* Fall through to abort if we couldn't find a cmd, and
2377 therefore a dsa structure to twiddle */
2378 case A_int_msg_wdtr:
2379 hostdata->dsp = hostdata->script + hostdata->E_reject_message /
2380 sizeof(u32);
2381 hostdata->dsp_changed = 1;
2382 return SPECIFIC_INT_NOTHING;
2383 case A_int_err_unexpected_phase:
2384 if (hostdata->options & OPTION_DEBUG_INTR)
2385 printk ("scsi%d : unexpected phase\n", host->host_no);
2386 return SPECIFIC_INT_ABORT;
2387 case A_int_err_selected:
2388 if ((hostdata->chip / 100) == 8)
2389 printk ("scsi%d : selected by target %d\n", host->host_no,
2390 (int) NCR53c7x0_read8(SDID_REG_800) &7);
2391 else
2392 printk ("scsi%d : selected by target LCRC=0x%02x\n", host->host_no,
2393 (int) NCR53c7x0_read8(LCRC_REG_10));
2394 hostdata->dsp = hostdata->script + hostdata->E_target_abort /
2395 sizeof(u32);
2396 hostdata->dsp_changed = 1;
2397 return SPECIFIC_INT_NOTHING;
2398 case A_int_err_unexpected_reselect:
2399 if ((hostdata->chip / 100) == 8)
2400 printk ("scsi%d : unexpected reselect by target %d lun %d\n",
2401 host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
2402 hostdata->reselected_identify & 7);
2403 else
2404 printk ("scsi%d : unexpected reselect LCRC=0x%02x\n", host->host_no,
2405 (int) NCR53c7x0_read8(LCRC_REG_10));
2406 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
2407 sizeof(u32);
2408 hostdata->dsp_changed = 1;
2409 return SPECIFIC_INT_NOTHING;
2410/*
2411 * Since contingent allegiance conditions are cleared by the next
2412 * command issued to a target, we must issue a REQUEST SENSE
2413 * command after receiving a CHECK CONDITION status, before
2414 * another command is issued.
2415 *
2416 * Since this NCR53c7x0_cmd will be freed after use, we don't
2417 * care if we step on the various fields, so modify a few things.
2418 */
2419 case A_int_err_check_condition:
2420#if 0
2421 if (hostdata->options & OPTION_DEBUG_INTR)
2422#endif
2423 printk ("scsi%d : CHECK CONDITION\n", host->host_no);
2424 if (!c) {
2425 printk("scsi%d : CHECK CONDITION with no SCSI command\n",
2426 host->host_no);
2427 return SPECIFIC_INT_PANIC;
2428 }
2429
2430 /*
2431 * FIXME : this uses the normal one-byte selection message.
2432 * We may want to renegotiate for synchronous & WIDE transfers
2433 * since these could be the crux of our problem.
2434 *
2435 hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
2436 * have to set this up so that the rest of the DSA
2437 * agrees with this being an untagged queue'd command.
2438 */
2439
2440 patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
2441
2442 /*
2443 * Modify the table indirect for COMMAND OUT phase, since
2444 * Request Sense is a six byte command.
2445 */
2446
2447 patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
2448
2449 /*
2450 * The CDB is now mirrored in our local non-cached
2451 * structure, but keep the old structure up to date as well,
2452 * just in case anyone looks at it.
2453 */
2454
2455 /*
2456 * XXX Need to worry about data buffer alignment/cache state
2457 * XXX here, but currently never get A_int_err_check_condition,
2458 * XXX so ignore problem for now.
2459 */
2460 cmd->cmnd[0] = c->cmnd[0] = REQUEST_SENSE;
2461 cmd->cmnd[0] = c->cmnd[1] &= 0xe0; /* Zero all but LUN */
2462 cmd->cmnd[0] = c->cmnd[2] = 0;
2463 cmd->cmnd[0] = c->cmnd[3] = 0;
2464 cmd->cmnd[0] = c->cmnd[4] = sizeof(c->sense_buffer);
2465 cmd->cmnd[0] = c->cmnd[5] = 0;
2466
2467 /*
2468 * Disable dataout phase, and program datain to transfer to the
2469 * sense buffer, and add a jump to other_transfer after the
2470 * command so overflow/underrun conditions are detected.
2471 */
2472
2473 patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
2474 virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
2475 patch_dsa_32 (cmd->dsa, dsa_datain, 0,
2476 virt_to_bus(cmd->data_transfer_start));
2477 cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
2478 DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
2479 cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
2480
2481 cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
2482 << 24) | DBC_TCI_TRUE;
2483 cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
2484 hostdata->E_other_transfer;
2485
2486 /*
2487 * Currently, this command is flagged as completed, ie
2488 * it has valid status and message data. Reflag it as
2489 * incomplete. Q - need to do something so that original
2490 * status, etc are used.
2491 */
2492
2493 cmd->result = cmd->cmd->result = 0xffff;
2494
2495 /*
2496 * Restart command as a REQUEST SENSE.
2497 */
2498 hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
2499 sizeof(u32);
2500 hostdata->dsp_changed = 1;
2501 return SPECIFIC_INT_NOTHING;
2502 case A_int_debug_break:
2503 return SPECIFIC_INT_BREAK;
2504 case A_int_norm_aborted:
2505 hostdata->dsp = (u32 *) hostdata->schedule;
2506 hostdata->dsp_changed = 1;
2507 if (cmd)
2508 abnormal_finished (cmd, DID_ERROR << 16);
2509 return SPECIFIC_INT_NOTHING;
2510 case A_int_norm_emulateintfly:
2511 NCR53c7x0_intfly(host);
2512 return SPECIFIC_INT_NOTHING;
2513 case A_int_test_1:
2514 case A_int_test_2:
2515 hostdata->idle = 1;
2516 hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
2517 if (hostdata->options & OPTION_DEBUG_INTR)
2518 printk("scsi%d : test%d complete\n", host->host_no,
2519 hostdata->test_completed);
2520 return SPECIFIC_INT_NOTHING;
2521#ifdef A_int_debug_reselected_ok
2522 case A_int_debug_reselected_ok:
2523 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2524 OPTION_DEBUG_DISCONNECT)) {
2525 /*
2526 * Note - this dsa is not based on location relative to
2527 * the command structure, but to location relative to the
2528 * DSA register
2529 */
2530 u32 *dsa;
2531 dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
2532
2533 printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
2534 host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
2535 printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
2536 host->host_no, cmd->saved_data_pointer,
2537 bus_to_virt(cmd->saved_data_pointer));
2538 print_insn (host, hostdata->script + Ent_reselected_ok /
2539 sizeof(u32), "", 1);
2540 if ((hostdata->chip / 100) == 8)
2541 printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
2542 host->host_no, NCR53c7x0_read8(SXFER_REG),
2543 NCR53c7x0_read8(SCNTL3_REG_800));
2544 else
2545 printk ("scsi%d : sxfer=0x%x, cannot read SBCL\n",
2546 host->host_no, NCR53c7x0_read8(SXFER_REG));
2547 if (c) {
2548 print_insn (host, (u32 *)
2549 hostdata->sync[c->device->id].script, "", 1);
2550 print_insn (host, (u32 *)
2551 hostdata->sync[c->device->id].script + 2, "", 1);
2552 }
2553 }
2554 return SPECIFIC_INT_RESTART;
2555#endif
2556#ifdef A_int_debug_reselect_check
2557 case A_int_debug_reselect_check:
2558 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2559 u32 *dsa;
2560#if 0
2561 u32 *code;
2562#endif
2563 /*
2564 * Note - this dsa is not based on location relative to
2565 * the command structure, but to location relative to the
2566 * DSA register
2567 */
2568 dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
2569 printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
2570 host->host_no, virt_to_bus(dsa), dsa);
2571 if (dsa) {
2572 printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
2573 host->host_no, cmd->saved_data_pointer,
2574 bus_to_virt (cmd->saved_data_pointer));
2575#if 0
2576 printk("scsi%d : template code :\n", host->host_no);
2577 for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
2578 / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
2579 code += print_insn (host, code, "", 1));
2580#endif
2581 }
2582 print_insn (host, hostdata->script + Ent_reselected_ok /
2583 sizeof(u32), "", 1);
2584 }
2585 return SPECIFIC_INT_RESTART;
2586#endif
2587#ifdef A_int_debug_dsa_schedule
2588 case A_int_debug_dsa_schedule:
2589 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2590 u32 *dsa;
2591 /*
2592 * Note - this dsa is not based on location relative to
2593 * the command structure, but to location relative to the
2594 * DSA register
2595 */
2596 dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
2597 printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
2598 host->host_no, virt_to_bus(dsa), dsa);
2599 if (dsa)
2600 printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
2601 " (temp was 0x%x (virt 0x%p))\n",
2602 host->host_no, cmd->saved_data_pointer,
2603 bus_to_virt (cmd->saved_data_pointer),
2604 NCR53c7x0_read32 (TEMP_REG),
2605 bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
2606 }
2607 return SPECIFIC_INT_RESTART;
2608#endif
2609#ifdef A_int_debug_scheduled
2610 case A_int_debug_scheduled:
2611 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2612 printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
2613 host->host_no, NCR53c7x0_read32(DSA_REG),
2614 bus_to_virt(NCR53c7x0_read32(DSA_REG)));
2615 }
2616 return SPECIFIC_INT_RESTART;
2617#endif
2618#ifdef A_int_debug_idle
2619 case A_int_debug_idle:
2620 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2621 printk("scsi%d : idle\n", host->host_no);
2622 }
2623 return SPECIFIC_INT_RESTART;
2624#endif
2625#ifdef A_int_debug_cmd
2626 case A_int_debug_cmd:
2627 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2628 printk("scsi%d : command sent\n");
2629 }
2630 return SPECIFIC_INT_RESTART;
2631#endif
2632#ifdef A_int_debug_dsa_loaded
2633 case A_int_debug_dsa_loaded:
2634 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2635 printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
2636 NCR53c7x0_read32(DSA_REG),
2637 bus_to_virt(NCR53c7x0_read32(DSA_REG)));
2638 }
2639 return SPECIFIC_INT_RESTART;
2640#endif
2641#ifdef A_int_debug_reselected
2642 case A_int_debug_reselected:
2643 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2644 OPTION_DEBUG_DISCONNECT)) {
2645 if ((hostdata->chip / 100) == 8)
2646 printk("scsi%d : reselected by target %d lun %d\n",
2647 host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
2648 (int) hostdata->reselected_identify & 7);
2649 else
2650 printk("scsi%d : reselected by LCRC=0x%02x lun %d\n",
2651 host->host_no, (int) NCR53c7x0_read8(LCRC_REG_10),
2652 (int) hostdata->reselected_identify & 7);
2653 print_queues(host);
2654 }
2655 return SPECIFIC_INT_RESTART;
2656#endif
2657#ifdef A_int_debug_disconnect_msg
2658 case A_int_debug_disconnect_msg:
2659 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2660 if (c)
2661 printk("scsi%d : target %d lun %d disconnecting\n",
2662 host->host_no, c->device->id, c->device->lun);
2663 else
2664 printk("scsi%d : unknown target disconnecting\n",
2665 host->host_no);
2666 }
2667 return SPECIFIC_INT_RESTART;
2668#endif
2669#ifdef A_int_debug_disconnected
2670 case A_int_debug_disconnected:
2671 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2672 OPTION_DEBUG_DISCONNECT)) {
2673 printk ("scsi%d : disconnected, new queues are\n",
2674 host->host_no);
2675 print_queues(host);
2676#if 0
2677 /* Not valid on ncr53c710! */
2678 printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
2679 host->host_no, NCR53c7x0_read8(SXFER_REG),
2680 NCR53c7x0_read8(SCNTL3_REG_800));
2681#endif
2682 if (c) {
2683 print_insn (host, (u32 *)
2684 hostdata->sync[c->device->id].script, "", 1);
2685 print_insn (host, (u32 *)
2686 hostdata->sync[c->device->id].script + 2, "", 1);
2687 }
2688 }
2689 return SPECIFIC_INT_RESTART;
2690#endif
2691#ifdef A_int_debug_panic
2692 case A_int_debug_panic:
2693 printk("scsi%d : int_debug_panic received\n", host->host_no);
2694 print_lots (host);
2695 return SPECIFIC_INT_PANIC;
2696#endif
2697#ifdef A_int_debug_saved
2698 case A_int_debug_saved:
2699 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2700 OPTION_DEBUG_DISCONNECT)) {
2701 printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
2702 host->host_no, cmd->saved_data_pointer,
2703 bus_to_virt (cmd->saved_data_pointer));
2704 print_progress (c);
2705 }
2706 return SPECIFIC_INT_RESTART;
2707#endif
2708#ifdef A_int_debug_restored
2709 case A_int_debug_restored:
2710 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2711 OPTION_DEBUG_DISCONNECT)) {
2712 if (cmd) {
2713 int size;
2714 printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
2715 host->host_no, cmd->saved_data_pointer, bus_to_virt (
2716 cmd->saved_data_pointer));
2717 size = print_insn (host, (u32 *)
2718 bus_to_virt(cmd->saved_data_pointer), "", 1);
2719 size = print_insn (host, (u32 *)
2720 bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
2721 print_progress (c);
2722 }
2723#if 0
2724 printk ("scsi%d : datapath residual %d\n",
2725 host->host_no, datapath_residual (host)) ;
2726#endif
2727 }
2728 return SPECIFIC_INT_RESTART;
2729#endif
2730#ifdef A_int_debug_sync
2731 case A_int_debug_sync:
2732 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2733 OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
2734 unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG), scntl3;
2735 if ((hostdata->chip / 100) == 8) {
2736 scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
2737 if (c) {
2738 if (sxfer != hostdata->sync[c->device->id].sxfer_sanity ||
2739 scntl3 != hostdata->sync[c->device->id].scntl3_sanity) {
2740 printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
2741 host->host_no, sxfer, scntl3);
2742 NCR53c7x0_write8 (SXFER_REG, sxfer);
2743 NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
2744 }
2745 } else
2746 printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
2747 host->host_no, (int) sxfer, (int) scntl3);
2748 } else {
2749 if (c) {
2750 if (sxfer != hostdata->sync[c->device->id].sxfer_sanity) {
2751 printk ("scsi%d : sync sanity check failed sxfer=0x%x",
2752 host->host_no, sxfer);
2753 NCR53c7x0_write8 (SXFER_REG, sxfer);
2754 NCR53c7x0_write8 (SBCL_REG,
2755 hostdata->sync[c->device->id].sscf_710);
2756 }
2757 } else
2758 printk ("scsi%d : unknown command sxfer=0x%x\n",
2759 host->host_no, (int) sxfer);
2760 }
2761 }
2762 return SPECIFIC_INT_RESTART;
2763#endif
2764#ifdef A_int_debug_datain
2765 case A_int_debug_datain:
2766 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2767 OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
2768 int size;
2769 if ((hostdata->chip / 100) == 8)
2770 printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
2771 " datapath residual=%d\n",
2772 host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
2773 (int) NCR53c7x0_read8(SXFER_REG),
2774 (int) NCR53c7x0_read8(SCNTL3_REG_800),
2775 datapath_residual (host)) ;
2776 else
2777 printk ("scsi%d : In do_datain (%s) sxfer=0x%x\n"
2778 " datapath residual=%d\n",
2779 host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
2780 (int) NCR53c7x0_read8(SXFER_REG),
2781 datapath_residual (host)) ;
2782 print_insn (host, dsp, "", 1);
2783 size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
2784 print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
2785 }
2786 return SPECIFIC_INT_RESTART;
2787#endif
2788#ifdef A_int_debug_check_dsa
2789 case A_int_debug_check_dsa:
2790 if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
2791 int sdid;
2792 int tmp;
2793 char *where;
2794 if (hostdata->chip / 100 == 8)
2795 sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
2796 else {
2797 tmp = NCR53c7x0_read8 (SDID_REG_700);
2798 if (!tmp)
2799 panic ("SDID_REG_700 = 0");
2800 tmp >>= 1;
2801 sdid = 0;
2802 while (tmp) {
2803 tmp >>= 1;
2804 sdid++;
2805 }
2806 }
2807 where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
2808 (DCMD_REG)) == hostdata->script +
2809 Ent_select_check_dsa / sizeof(u32) ?
2810 "selection" : "reselection";
2811 if (c && sdid != c->device->id) {
2812 printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
2813 host->host_no, sdid, c->device->id, where);
2814 print_lots(host);
2815 dump_events (host, 20);
2816 return SPECIFIC_INT_PANIC;
2817 }
2818 }
2819 return SPECIFIC_INT_RESTART;
2820#endif
2821 default:
2822 if ((dsps & 0xff000000) == 0x03000000) {
2823 printk ("scsi%d : misc debug interrupt 0x%x\n",
2824 host->host_no, dsps);
2825 return SPECIFIC_INT_RESTART;
2826 } else if ((dsps & 0xff000000) == 0x05000000) {
2827 if (hostdata->events) {
2828 struct NCR53c7x0_event *event;
2829 ++hostdata->event_index;
2830 if (hostdata->event_index >= hostdata->event_size)
2831 hostdata->event_index = 0;
2832 event = (struct NCR53c7x0_event *) hostdata->events +
2833 hostdata->event_index;
2834 event->event = (enum ncr_event) dsps;
2835 event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
2836 if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
2837 if (hostdata->chip / 100 == 8)
2838 event->target = NCR53c7x0_read8(SSID_REG_800);
2839 else {
2840 unsigned char tmp, sdid;
2841 tmp = NCR53c7x0_read8 (SDID_REG_700);
2842 if (!tmp)
2843 panic ("SDID_REG_700 = 0");
2844 tmp >>= 1;
2845 sdid = 0;
2846 while (tmp) {
2847 tmp >>= 1;
2848 sdid++;
2849 }
2850 event->target = sdid;
2851 }
2852 }
2853 else
2854 event->target = 255;
2855
2856 if (event->event == EVENT_RESELECT)
2857 event->lun = hostdata->reselected_identify & 0xf;
2858 else if (c)
2859 event->lun = c->device->lun;
2860 else
2861 event->lun = 255;
2862 do_gettimeofday(&(event->time));
2863 if (c) {
2864 event->pid = c->pid;
2865 memcpy ((void *) event->cmnd, (void *) c->cmnd,
2866 sizeof (event->cmnd));
2867 } else {
2868 event->pid = -1;
2869 }
2870 }
2871 return SPECIFIC_INT_RESTART;
2872 }
2873
2874 printk ("scsi%d : unknown user interrupt 0x%x\n",
2875 host->host_no, (unsigned) dsps);
2876 return SPECIFIC_INT_PANIC;
2877 }
2878}
2879
2880/*
2881 * XXX - the stock NCR assembler won't output the scriptu.h file,
2882 * which undefine's all #define'd CPP symbols from the script.h
2883 * file, which will create problems if you use multiple scripts
2884 * with the same symbol names.
2885 *
2886 * If you insist on using NCR's assembler, you could generate
2887 * scriptu.h from script.h using something like
2888 *
2889 * grep #define script.h | \
2890 * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
2891 * > scriptu.h
2892 */
2893
2894#include "53c7xx_u.h"
2895
2896/* XXX - add alternate script handling code here */
2897
2898
2899/*
2900 * Function : static void NCR537xx_soft_reset (struct Scsi_Host *host)
2901 *
2902 * Purpose : perform a soft reset of the NCR53c7xx chip
2903 *
2904 * Inputs : host - pointer to this host adapter's structure
2905 *
2906 * Preconditions : NCR53c7x0_init must have been called for this
2907 * host.
2908 *
2909 */
2910
2911static void
2912NCR53c7x0_soft_reset (struct Scsi_Host *host) {
2913 NCR53c7x0_local_declare();
2914 unsigned long flags;
2915 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2916 host->hostdata[0];
2917 NCR53c7x0_local_setup(host);
2918
2919 local_irq_save(flags);
2920
2921 /* Disable scsi chip and s/w level 7 ints */
2922
2923#ifdef CONFIG_MVME16x
2924 if (MACH_IS_MVME16x)
2925 {
2926 volatile unsigned long v;
2927
2928 v = *(volatile unsigned long *)0xfff4006c;
2929 v &= ~0x8000;
2930 *(volatile unsigned long *)0xfff4006c = v;
2931 v = *(volatile unsigned long *)0xfff4202c;
2932 v &= ~0x10;
2933 *(volatile unsigned long *)0xfff4202c = v;
2934 }
2935#endif
2936 /* Anything specific for your hardware? */
2937
2938 /*
2939 * Do a soft reset of the chip so that everything is
2940 * reinitialized to the power-on state.
2941 *
2942 * Basically follow the procedure outlined in the NCR53c700
2943 * data manual under Chapter Six, How to Use, Steps Necessary to
2944 * Start SCRIPTS, with the exception of actually starting the
2945 * script and setting up the synchronous transfer gunk.
2946 */
2947
2948 /* Should we reset the scsi bus here??????????????????? */
2949
2950 NCR53c7x0_write8(ISTAT_REG_700, ISTAT_10_SRST);
2951 NCR53c7x0_write8(ISTAT_REG_700, 0);
2952
2953 /*
2954 * saved_dcntl is set up in NCR53c7x0_init() before it is overwritten
2955 * here. We should have some better way of working out the CF bit
2956 * setting..
2957 */
2958
2959 hostdata->saved_dcntl = DCNTL_10_EA|DCNTL_10_COM;
2960 if (hostdata->scsi_clock > 50000000)
2961 hostdata->saved_dcntl |= DCNTL_700_CF_3;
2962 else
2963 if (hostdata->scsi_clock > 37500000)
2964 hostdata->saved_dcntl |= DCNTL_700_CF_2;
2965#if 0
2966 else
2967 /* Any clocks less than 37.5MHz? */
2968#endif
2969
2970 if (hostdata->options & OPTION_DEBUG_TRACE)
2971 NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM);
2972 else
2973 NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
2974 /* Following disables snooping - snooping is not required, as non-
2975 * cached pages are used for shared data, and appropriate use is
2976 * made of cache_push/cache_clear. Indeed, for 68060
2977 * enabling snooping causes disk corruption of ext2fs free block
2978 * bitmaps and the like. If you have a 68060 with snooping hardwared
2979 * on, then you need to enable CONFIG_060_WRITETHROUGH.
2980 */
2981 NCR53c7x0_write8(CTEST7_REG, CTEST7_10_TT1|CTEST7_STD);
2982 /* Actually burst of eight, according to my 53c710 databook */
2983 NCR53c7x0_write8(hostdata->dmode, DMODE_10_BL_8 | DMODE_10_FC2);
2984 NCR53c7x0_write8(SCID_REG, 1 << host->this_id);
2985 NCR53c7x0_write8(SBCL_REG, 0);
2986 NCR53c7x0_write8(SCNTL1_REG, SCNTL1_ESR_700);
2987 NCR53c7x0_write8(SCNTL0_REG, ((hostdata->options & OPTION_PARITY) ?
2988 SCNTL0_EPC : 0) | SCNTL0_EPG_700 | SCNTL0_ARB1 | SCNTL0_ARB2);
2989
2990 /*
2991 * Enable all interrupts, except parity which we only want when
2992 * the user requests it.
2993 */
2994
2995 NCR53c7x0_write8(DIEN_REG, DIEN_700_BF |
2996 DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_700_OPC);
2997
2998 NCR53c7x0_write8(SIEN_REG_700, ((hostdata->options & OPTION_PARITY) ?
2999 SIEN_PAR : 0) | SIEN_700_STO | SIEN_RST | SIEN_UDC |
3000 SIEN_SGE | SIEN_MA);
3001
3002#ifdef CONFIG_MVME16x
3003 if (MACH_IS_MVME16x)
3004 {
3005 volatile unsigned long v;
3006
3007 /* Enable scsi chip and s/w level 7 ints */
3008 v = *(volatile unsigned long *)0xfff40080;
3009 v = (v & ~(0xf << 28)) | (4 << 28);
3010 *(volatile unsigned long *)0xfff40080 = v;
3011 v = *(volatile unsigned long *)0xfff4006c;
3012 v |= 0x8000;
3013 *(volatile unsigned long *)0xfff4006c = v;
3014 v = *(volatile unsigned long *)0xfff4202c;
3015 v = (v & ~0xff) | 0x10 | 4;
3016 *(volatile unsigned long *)0xfff4202c = v;
3017 }
3018#endif
3019 /* Anything needed for your hardware? */
3020 local_irq_restore(flags);
3021}
3022
3023
3024/*
3025 * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
3026 *
3027 * Purpose : Return the first free NCR53c7x0_cmd structure (which are
3028 * reused in a LIFO manner to minimize cache thrashing).
3029 *
3030 * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
3031 * structures for this device, do so. Attempt to complete all scheduled
3032 * allocations using get_zeroed_page(), putting NCR53c7x0_cmd structures on
3033 * the free list. Teach programmers not to drink and hack.
3034 *
3035 * Inputs : cmd - SCSI command
3036 *
3037 * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
3038 * NULL on failure.
3039 */
3040
3041static void
3042my_free_page (void *addr, int dummy)
3043{
3044 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
3045 * XXX may be invalid (CONFIG_060_WRITETHROUGH)
3046 */
3047 kernel_set_cachemode((void *)addr, 4096, IOMAP_FULL_CACHING);
3048 free_page ((u32)addr);
3049}
3050
3051static struct NCR53c7x0_cmd *
3052allocate_cmd (Scsi_Cmnd *cmd) {
3053 struct Scsi_Host *host = cmd->device->host;
3054 struct NCR53c7x0_hostdata *hostdata =
3055 (struct NCR53c7x0_hostdata *) host->hostdata[0];
3056 u32 real; /* Real address */
3057 int size; /* Size of *tmp */
3058 struct NCR53c7x0_cmd *tmp;
3059 unsigned long flags;
3060
3061 if (hostdata->options & OPTION_DEBUG_ALLOCATION)
3062 printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
3063 " target = %d, lun = %d, %s\n",
3064 host->host_no, hostdata->num_cmds, host->can_queue,
3065 cmd->device->id, cmd->device->lun, (hostdata->cmd_allocated[cmd->device->id] &
3066 (1 << cmd->device->lun)) ? "already allocated" : "not allocated");
3067
3068/*
3069 * If we have not yet reserved commands for this I_T_L nexus, and
3070 * the device exists (as indicated by permanent Scsi_Cmnd structures
3071 * being allocated under 1.3.x, or being outside of scan_scsis in
3072 * 1.2.x), do so now.
3073 */
3074 if (!(hostdata->cmd_allocated[cmd->device->id] & (1 << cmd->device->lun)) &&
3075 cmd->device && cmd->device->has_cmdblocks) {
3076 if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
3077 hostdata->extra_allocate += host->cmd_per_lun;
3078 hostdata->cmd_allocated[cmd->device->id] |= (1 << cmd->device->lun);
3079 }
3080
3081 for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
3082 ++hostdata->num_cmds) {
3083 /* historically, kmalloc has returned unaligned addresses; pad so we
3084 have enough room to ROUNDUP */
3085 size = hostdata->max_cmd_size + sizeof (void *);
3086#ifdef FORCE_DSA_ALIGNMENT
3087 /*
3088 * 53c710 rev.0 doesn't have an add-with-carry instruction.
3089 * Ensure we allocate enough memory to force alignment.
3090 */
3091 size += 256;
3092#endif
3093/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
3094
3095 if (size > 4096) {
3096 printk (KERN_ERR "53c7xx: allocate_cmd size > 4K\n");
3097 return NULL;
3098 }
3099 real = get_zeroed_page(GFP_ATOMIC);
3100 if (real == 0)
3101 return NULL;
3102 cache_push(virt_to_phys((void *)real), 4096);
3103 cache_clear(virt_to_phys((void *)real), 4096);
3104 kernel_set_cachemode((void *)real, 4096, IOMAP_NOCACHE_SER);
3105 tmp = ROUNDUP(real, void *);
3106#ifdef FORCE_DSA_ALIGNMENT
3107 {
3108 if (((u32)tmp & 0xff) > CmdPageStart)
3109 tmp = (struct NCR53c7x0_cmd *)((u32)tmp + 255);
3110 tmp = (struct NCR53c7x0_cmd *)(((u32)tmp & ~0xff) + CmdPageStart);
3111#if 0
3112 printk ("scsi: size = %d, real = 0x%08x, tmp set to 0x%08x\n",
3113 size, real, (u32)tmp);
3114#endif
3115 }
3116#endif
3117 tmp->real = (void *)real;
3118 tmp->size = size;
3119 tmp->free = ((void (*)(void *, int)) my_free_page);
3120 local_irq_save(flags);
3121 tmp->next = hostdata->free;
3122 hostdata->free = tmp;
3123 local_irq_restore(flags);
3124 }
3125 local_irq_save(flags);
3126 tmp = (struct NCR53c7x0_cmd *) hostdata->free;
3127 if (tmp) {
3128 hostdata->free = tmp->next;
3129 }
3130 local_irq_restore(flags);
3131 if (!tmp)
3132 printk ("scsi%d : can't allocate command for target %d lun %d\n",
3133 host->host_no, cmd->device->id, cmd->device->lun);
3134 return tmp;
3135}
3136
3137/*
3138 * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
3139 *
3140 *
3141 * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
3142 * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
3143 * initialization, and dsa code relocation.
3144 *
3145 * Inputs : cmd - SCSI command
3146 *
3147 * Returns : NCR53c7x0_cmd structure corresponding to cmd,
3148 * NULL on failure.
3149 */
3150static struct NCR53c7x0_cmd *
3151create_cmd (Scsi_Cmnd *cmd) {
3152 NCR53c7x0_local_declare();
3153 struct Scsi_Host *host = cmd->device->host;
3154 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
3155 host->hostdata[0];
3156 struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
3157 int datain, /* Number of instructions per phase */
3158 dataout;
3159 int data_transfer_instructions, /* Count of dynamic instructions */
3160 i; /* Counter */
3161 u32 *cmd_datain, /* Address of datain/dataout code */
3162 *cmd_dataout; /* Incremented as we assemble */
3163#ifdef notyet
3164 unsigned char *msgptr; /* Current byte in select message */
3165 int msglen; /* Length of whole select message */
3166#endif
3167 unsigned long flags;
3168 u32 exp_select_indirect; /* Used in sanity check */
3169 NCR53c7x0_local_setup(cmd->device->host);
3170
3171 if (!(tmp = allocate_cmd (cmd)))
3172 return NULL;
3173
3174 /*
3175 * Copy CDB and initialised result fields from Scsi_Cmnd to NCR53c7x0_cmd.
3176 * We do this because NCR53c7x0_cmd may have a special cache mode
3177 * selected to cope with lack of bus snooping, etc.
3178 */
3179
3180 memcpy(tmp->cmnd, cmd->cmnd, 12);
3181 tmp->result = cmd->result;
3182
3183 /*
3184 * Decide whether we need to generate commands for DATA IN,
3185 * DATA OUT, neither, or both based on the SCSI command
3186 */
3187
3188 switch (cmd->cmnd[0]) {
3189 /* These commands do DATA IN */
3190 case INQUIRY:
3191 case MODE_SENSE:
3192 case READ_6:
3193 case READ_10:
3194 case READ_CAPACITY:
3195 case REQUEST_SENSE:
3196 case READ_BLOCK_LIMITS:
3197 case READ_TOC:
3198 datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
3199 dataout = 0;
3200 break;
3201 /* These commands do DATA OUT */
3202 case MODE_SELECT:
3203 case WRITE_6:
3204 case WRITE_10:
3205#if 0
3206 printk("scsi%d : command is ", host->host_no);
3207 __scsi_print_command(cmd->cmnd);
3208#endif
3209#if 0
3210 printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
3211 cmd->use_sg);
3212#endif
3213 datain = 0;
3214 dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
3215#if 0
3216 hostdata->options |= OPTION_DEBUG_INTR;
3217#endif
3218 break;
3219 /*
3220 * These commands do no data transfer, we should force an
3221 * interrupt if a data phase is attempted on them.
3222 */
3223 case TEST_UNIT_READY:
3224 case ALLOW_MEDIUM_REMOVAL:
3225 case START_STOP:
3226 datain = dataout = 0;
3227 break;
3228 /*
3229 * We don't know about these commands, so generate code to handle
3230 * both DATA IN and DATA OUT phases. More efficient to identify them
3231 * and add them to the above cases.
3232 */
3233 default:
3234 printk("scsi%d : datain+dataout for command ", host->host_no);
3235 __scsi_print_command(cmd->cmnd);
3236 datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
3237 }
3238
3239 /*
3240 * New code : so that active pointers work correctly regardless
3241 * of where the saved data pointer is at, we want to immediately
3242 * enter the dynamic code after selection, and on a non-data
3243 * phase perform a CALL to the non-data phase handler, with
3244 * returns back to this address.
3245 *
3246 * If a phase mismatch is encountered in the middle of a
3247 * Block MOVE instruction, we want to _leave_ that instruction
3248 * unchanged as the current case is, modify a temporary buffer,
3249 * and point the active pointer (TEMP) at that.
3250 *
3251 * Furthermore, we want to implement a saved data pointer,
3252 * set by the SAVE_DATA_POINTERs message.
3253 *
3254 * So, the data transfer segments will change to
3255 * CALL data_transfer, WHEN NOT data phase
3256 * MOVE x, x, WHEN data phase
3257 * ( repeat )
3258 * JUMP other_transfer
3259 */
3260
3261 data_transfer_instructions = datain + dataout;
3262
3263 /*
3264 * When we perform a request sense, we overwrite various things,
3265 * including the data transfer code. Make sure we have enough
3266 * space to do that.
3267 */
3268
3269 if (data_transfer_instructions < 2)
3270 data_transfer_instructions = 2;
3271
3272
3273 /*
3274 * The saved data pointer is set up so that a RESTORE POINTERS message
3275 * will start the data transfer over at the beginning.
3276 */
3277
3278 tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
3279 hostdata->E_data_transfer;
3280
3281 /*
3282 * Initialize Linux specific fields.
3283 */
3284
3285 tmp->cmd = cmd;
3286 tmp->next = NULL;
3287 tmp->flags = 0;
3288 tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
3289 hostdata->dsa_start;
3290 tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
3291
3292 /*
3293 * Calculate addresses of dynamic code to fill in DSA
3294 */
3295
3296 tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
3297 hostdata->dsa_start) / sizeof(u32);
3298 tmp->data_transfer_end = tmp->data_transfer_start +
3299 2 * data_transfer_instructions;
3300
3301 cmd_datain = datain ? tmp->data_transfer_start : NULL;
3302 cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
3303 data_transfer_start) : NULL;
3304
3305 /*
3306 * Fill in the NCR53c7x0_cmd structure as follows
3307 * dsa, with fixed up DSA code
3308 * datain code
3309 * dataout code
3310 */
3311
3312 /* Copy template code into dsa and perform all necessary fixups */
3313 if (hostdata->dsa_fixup)
3314 hostdata->dsa_fixup(tmp);
3315
3316 patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
3317 /*
3318 * XXX is this giving 53c710 access to the Scsi_Cmnd in some way?
3319 * Do we need to change it for caching reasons?
3320 */
3321 patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
3322
3323 if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS) {
3324
3325 exp_select_indirect = ((1 << cmd->device->id) << 16) |
3326 (hostdata->sync[cmd->device->id].sxfer_sanity << 8);
3327
3328 if (hostdata->sync[cmd->device->id].select_indirect !=
3329 exp_select_indirect) {
3330 printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
3331 host->host_no, hostdata->sync[cmd->device->id].select_indirect);
3332 FATAL(host);
3333
3334 }
3335 }
3336
3337 patch_dsa_32(tmp->dsa, dsa_select, 0,
3338 hostdata->sync[cmd->device->id].select_indirect);
3339
3340 /*
3341 * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
3342 * different commands; although it should be trivial to do them
3343 * both at the same time.
3344 */
3345 if (hostdata->initiate_wdtr & (1 << cmd->device->id)) {
3346 memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
3347 sizeof(wdtr_message));
3348 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
3349 local_irq_save(flags);
3350 hostdata->initiate_wdtr &= ~(1 << cmd->device->id);
3351 local_irq_restore(flags);
3352 } else if (hostdata->initiate_sdtr & (1 << cmd->device->id)) {
3353 memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
3354 sizeof(sdtr_message));
3355 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
3356 tmp->flags |= CMD_FLAG_SDTR;
3357 local_irq_save(flags);
3358 hostdata->initiate_sdtr &= ~(1 << cmd->device->id);
3359 local_irq_restore(flags);
3360
3361 }
3362#if 1
3363 else if (!(hostdata->talked_to & (1 << cmd->device->id)) &&
3364 !(hostdata->options & OPTION_NO_ASYNC)) {
3365
3366 memcpy ((void *) (tmp->select + 1), (void *) async_message,
3367 sizeof(async_message));
3368 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
3369 tmp->flags |= CMD_FLAG_SDTR;
3370 }
3371#endif
3372 else
3373 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
3374
3375 hostdata->talked_to |= (1 << cmd->device->id);
3376 tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
3377 IDENTIFY (1, cmd->device->lun) : IDENTIFY (0, cmd->device->lun);
3378 patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
3379 patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
3380 patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(tmp->cmnd));
3381 patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
3382 virt_to_bus (cmd_dataout)
3383 : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
3384 patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
3385 virt_to_bus (cmd_datain)
3386 : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
3387 /*
3388 * XXX - need to make endian aware, should use separate variables
3389 * for both status and message bytes.
3390 */
3391 patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
3392/*
3393 * FIXME : these only works for little endian. We probably want to
3394 * provide message and status fields in the NCR53c7x0_cmd
3395 * structure, and assign them to cmd->result when we're done.
3396 */
3397#ifdef BIG_ENDIAN
3398 patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 2);
3399 patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
3400 patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result) + 3);
3401#else
3402 patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 1);
3403 patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
3404 patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result));
3405#endif
3406 patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
3407 patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
3408 virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
3409
3410 /*
3411 * Generate code for zero or more of the DATA IN, DATA OUT phases
3412 * in the format
3413 *
3414 * CALL data_transfer, WHEN NOT phase
3415 * MOVE first buffer length, first buffer address, WHEN phase
3416 * ...
3417 * MOVE last buffer length, last buffer address, WHEN phase
3418 * JUMP other_transfer
3419 */
3420
3421/*
3422 * See if we're getting to data transfer by generating an unconditional
3423 * interrupt.
3424 */
3425#if 0
3426 if (datain) {
3427 cmd_datain[0] = 0x98080000;
3428 cmd_datain[1] = 0x03ffd00d;
3429 cmd_datain += 2;
3430 }
3431#endif
3432
3433/*
3434 * XXX - I'm undecided whether all of this nonsense is faster
3435 * in the long run, or whether I should just go and implement a loop
3436 * on the NCR chip using table indirect mode?
3437 *
3438 * In any case, this is how it _must_ be done for 53c700/700-66 chips,
3439 * so this stays even when we come up with something better.
3440 *
3441 * When we're limited to 1 simultaneous command, no overlapping processing,
3442 * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
3443 * drive.
3444 *
3445 * Not bad, not good. We'll see.
3446 */
3447
3448 tmp->bounce.len = 0; /* Assume aligned buffer */
3449
3450 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
3451 cmd_dataout += 4, ++i) {
3452 u32 vbuf = cmd->use_sg
3453 ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
3454 ((struct scatterlist *)cmd->request_buffer)[i].offset
3455 : (u32)(cmd->request_buffer);
3456 u32 bbuf = virt_to_bus((void *)vbuf);
3457 u32 count = cmd->use_sg ?
3458 ((struct scatterlist *)cmd->request_buffer)[i].length :
3459 cmd->request_bufflen;
3460
3461 /*
3462 * If we have buffers which are not aligned with 16 byte cache
3463 * lines, then we just hope nothing accesses the other parts of
3464 * those cache lines while the transfer is in progress. That would
3465 * fill the cache, and subsequent reads of the dma data would pick
3466 * up the wrong thing.
3467 * XXX We need a bounce buffer to handle that correctly.
3468 */
3469
3470 if (((bbuf & 15) || (count & 15)) && (datain || dataout))
3471 {
3472 /* Bounce buffer needed */
3473 if (cmd->use_sg)
3474 printk ("53c7xx: Non-aligned buffer with use_sg\n");
3475 else if (datain && dataout)
3476 printk ("53c7xx: Non-aligned buffer with datain && dataout\n");
3477 else if (count > 256)
3478 printk ("53c7xx: Non-aligned transfer > 256 bytes\n");
3479 else
3480 {
3481 if (datain)
3482 {
3483 tmp->bounce.len = count;
3484 tmp->bounce.addr = vbuf;
3485 bbuf = virt_to_bus(tmp->bounce.buf);
3486 tmp->bounce.buf[0] = 0xff;
3487 tmp->bounce.buf[1] = 0xfe;
3488 tmp->bounce.buf[2] = 0xfd;
3489 tmp->bounce.buf[3] = 0xfc;
3490 }
3491 if (dataout)
3492 {
3493 memcpy ((void *)tmp->bounce.buf, (void *)vbuf, count);
3494 bbuf = virt_to_bus(tmp->bounce.buf);
3495 }
3496 }
3497 }
3498
3499 if (datain) {
3500 cache_clear(virt_to_phys((void *)vbuf), count);
3501 /* CALL other_in, WHEN NOT DATA_IN */
3502 cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
3503 DCMD_TCI_IO) << 24) |
3504 DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
3505 cmd_datain[1] = virt_to_bus (hostdata->script) +
3506 hostdata->E_other_in;
3507 /* MOVE count, buf, WHEN DATA_IN */
3508 cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
3509 << 24) | count;
3510 cmd_datain[3] = bbuf;
3511#if 0
3512 print_insn (host, cmd_datain, "dynamic ", 1);
3513 print_insn (host, cmd_datain + 2, "dynamic ", 1);
3514#endif
3515 }
3516 if (dataout) {
3517 cache_push(virt_to_phys((void *)vbuf), count);
3518 /* CALL other_out, WHEN NOT DATA_OUT */
3519 cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
3520 DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
3521 cmd_dataout[1] = virt_to_bus(hostdata->script) +
3522 hostdata->E_other_out;
3523 /* MOVE count, buf, WHEN DATA+OUT */
3524 cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
3525 | count;
3526 cmd_dataout[3] = bbuf;
3527#if 0
3528 print_insn (host, cmd_dataout, "dynamic ", 1);
3529 print_insn (host, cmd_dataout + 2, "dynamic ", 1);
3530#endif
3531 }
3532 }
3533
3534 /*
3535 * Install JUMP instructions after the data transfer routines to return
3536 * control to the do_other_transfer routines.
3537 */
3538
3539
3540 if (datain) {
3541 cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
3542 DBC_TCI_TRUE;
3543 cmd_datain[1] = virt_to_bus(hostdata->script) +
3544 hostdata->E_other_transfer;
3545#if 0
3546 print_insn (host, cmd_datain, "dynamic jump ", 1);
3547#endif
3548 cmd_datain += 2;
3549 }
3550#if 0
3551 if (datain) {
3552 cmd_datain[0] = 0x98080000;
3553 cmd_datain[1] = 0x03ffdeed;
3554 cmd_datain += 2;
3555 }
3556#endif
3557 if (dataout) {
3558 cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
3559 DBC_TCI_TRUE;
3560 cmd_dataout[1] = virt_to_bus(hostdata->script) +
3561 hostdata->E_other_transfer;
3562#if 0
3563 print_insn (host, cmd_dataout, "dynamic jump ", 1);
3564#endif
3565 cmd_dataout += 2;
3566 }
3567
3568 return tmp;
3569}
3570
3571/*
3572 * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
3573 * void (*done)(Scsi_Cmnd *))
3574 *
3575 * Purpose : enqueues a SCSI command
3576 *
3577 * Inputs : cmd - SCSI command, done - function called on completion, with
3578 * a pointer to the command descriptor.
3579 *
3580 * Returns : 0
3581 *
3582 * Side effects :
3583 * cmd is added to the per instance driver issue_queue, with major
3584 * twiddling done to the host specific fields of cmd. If the
3585 * process_issue_queue coroutine isn't running, it is restarted.
3586 *
3587 * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
3588 * hold our own data, and pervert the ptr field of the SCp field
3589 * to create a linked list.
3590 */
3591
3592int
3593NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
3594 struct Scsi_Host *host = cmd->device->host;
3595 struct NCR53c7x0_hostdata *hostdata =
3596 (struct NCR53c7x0_hostdata *) host->hostdata[0];
3597 unsigned long flags;
3598 Scsi_Cmnd *tmp;
3599
3600 cmd->scsi_done = done;
3601 cmd->host_scribble = NULL;
3602 cmd->SCp.ptr = NULL;
3603 cmd->SCp.buffer = NULL;
3604
3605#ifdef VALID_IDS
3606 /* Ignore commands on invalid IDs */
3607 if (!hostdata->valid_ids[cmd->device->id]) {
3608 printk("scsi%d : ignoring target %d lun %d\n", host->host_no,
3609 cmd->device->id, cmd->device->lun);
3610 cmd->result = (DID_BAD_TARGET << 16);
3611 done(cmd);
3612 return 0;
3613 }
3614#endif
3615
3616 local_irq_save(flags);
3617 if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
3618 || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
3619 !(hostdata->debug_lun_limit[cmd->device->id] & (1 << cmd->device->lun)))
3620#ifdef LINUX_1_2
3621 || cmd->device->id > 7
3622#else
3623 || cmd->device->id >= host->max_id
3624#endif
3625 || cmd->device->id == host->this_id
3626 || hostdata->state == STATE_DISABLED) {
3627 printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
3628 cmd->device->id, cmd->device->lun);
3629 cmd->result = (DID_BAD_TARGET << 16);
3630 done(cmd);
3631 local_irq_restore(flags);
3632 return 0;
3633 }
3634
3635 if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
3636 (hostdata->debug_count_limit == 0)) {
3637 printk("scsi%d : maximum commands exceeded\n", host->host_no);
3638 cmd->result = (DID_BAD_TARGET << 16);
3639 done(cmd);
3640 local_irq_restore(flags);
3641 return 0;
3642 }
3643
3644 if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
3645 switch (cmd->cmnd[0]) {
3646 case WRITE_6:
3647 case WRITE_10:
3648 printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
3649 host->host_no);
3650 cmd->result = (DID_BAD_TARGET << 16);
3651 done(cmd);
3652 local_irq_restore(flags);
3653 return 0;
3654 }
3655 }
3656
3657 if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
3658 hostdata->debug_count_limit != -1)
3659 --hostdata->debug_count_limit;
3660
3661 cmd->result = 0xffff; /* The NCR will overwrite message
3662 and status with valid data */
3663 cmd->host_scribble = (unsigned char *) tmp = create_cmd (cmd);
3664
3665 /*
3666 * REQUEST SENSE commands are inserted at the head of the queue
3667 * so that we do not clear the contingent allegiance condition
3668 * they may be looking at.
3669 */
3670
3671 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
3672 cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
3673 hostdata->issue_queue = cmd;
3674 } else {
3675 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
3676 tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
3677 tmp->SCp.ptr = (unsigned char *) cmd;
3678 }
3679 local_irq_restore(flags);
3680 run_process_issue_queue();
3681 return 0;
3682}
3683
3684/*
3685 * Function : void to_schedule_list (struct Scsi_Host *host,
3686 * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
3687 *
3688 * Purpose : takes a SCSI command which was just removed from the
3689 * issue queue, and deals with it by inserting it in the first
3690 * free slot in the schedule list or by terminating it immediately.
3691 *
3692 * Inputs :
3693 * host - SCSI host adapter; hostdata - hostdata structure for
3694 * this adapter; cmd - a pointer to the command; should have
3695 * the host_scribble field initialized to point to a valid
3696 *
3697 * Side effects :
3698 * cmd is added to the per instance schedule list, with minor
3699 * twiddling done to the host specific fields of cmd.
3700 *
3701 */
3702
3703static __inline__ void
3704to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
3705 struct NCR53c7x0_cmd *cmd) {
3706 NCR53c7x0_local_declare();
3707 Scsi_Cmnd *tmp = cmd->cmd;
3708 unsigned long flags;
3709 /* dsa start is negative, so subtraction is used */
3710 volatile u32 *ncrcurrent;
3711
3712 int i;
3713 NCR53c7x0_local_setup(host);
3714#if 0
3715 printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
3716 virt_to_bus(hostdata->dsa), hostdata->dsa);
3717#endif
3718
3719 local_irq_save(flags);
3720
3721 /*
3722 * Work around race condition : if an interrupt fired and we
3723 * got disabled forget about this command.
3724 */
3725
3726 if (hostdata->state == STATE_DISABLED) {
3727 printk("scsi%d : driver disabled\n", host->host_no);
3728 tmp->result = (DID_BAD_TARGET << 16);
3729 cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
3730 hostdata->free = cmd;
3731 tmp->scsi_done(tmp);
3732 local_irq_restore(flags);
3733 return;
3734 }
3735
3736 for (i = host->can_queue, ncrcurrent = hostdata->schedule;
3737 i > 0 && ncrcurrent[0] != hostdata->NOP_insn;
3738 --i, ncrcurrent += 2 /* JUMP instructions are two words */);
3739
3740 if (i > 0) {
3741 ++hostdata->busy[tmp->device->id][tmp->device->lun];
3742 cmd->next = hostdata->running_list;
3743 hostdata->running_list = cmd;
3744
3745 /* Restore this instruction to a NOP once the command starts */
3746 cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
3747 sizeof(u32)] = (u32) virt_to_bus ((void *)ncrcurrent);
3748 /* Replace the current jump operand. */
3749 ncrcurrent[1] =
3750 virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
3751 hostdata->E_dsa_code_template;
3752 /* Replace the NOP instruction with a JUMP */
3753 ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
3754 DBC_TCI_TRUE;
3755 } else {
3756 printk ("scsi%d: no free slot\n", host->host_no);
3757 disable(host);
3758 tmp->result = (DID_ERROR << 16);
3759 cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
3760 hostdata->free = cmd;
3761 tmp->scsi_done(tmp);
3762 local_irq_restore(flags);
3763 return;
3764 }
3765
3766 /*
3767 * If the NCR chip is in an idle state, start it running the scheduler
3768 * immediately. Otherwise, signal the chip to jump to schedule as
3769 * soon as it is idle.
3770 */
3771
3772 if (hostdata->idle) {
3773 hostdata->idle = 0;
3774 hostdata->state = STATE_RUNNING;
3775 NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
3776 if (hostdata->options & OPTION_DEBUG_TRACE)
3777 NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
3778 DCNTL_SSM | DCNTL_STD);
3779 } else {
3780 NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
3781 }
3782
3783 local_irq_restore(flags);
3784}
3785
3786/*
3787 * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
3788 * *hostdata, Scsi_Cmnd *cmd)
3789 *
3790 * Purpose : decide if we can pass the given SCSI command on to the
3791 * device in question or not.
3792 *
3793 * Returns : non-zero when we're busy, 0 when we aren't.
3794 */
3795
3796static __inline__ int
3797busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
3798 Scsi_Cmnd *cmd) {
3799 /* FIXME : in the future, this needs to accommodate SCSI-II tagged
3800 queuing, and we may be able to play with fairness here a bit.
3801 */
3802 return hostdata->busy[cmd->device->id][cmd->device->lun];
3803}
3804
3805/*
3806 * Function : process_issue_queue (void)
3807 *
3808 * Purpose : transfer commands from the issue queue to NCR start queue
3809 * of each NCR53c7/8xx in the system, avoiding kernel stack
3810 * overflows when the scsi_done() function is invoked recursively.
3811 *
3812 * NOTE : process_issue_queue exits with interrupts *disabled*, so the
3813 * caller must reenable them if it desires.
3814 *
3815 * NOTE : process_issue_queue should be called from both
3816 * NCR53c7x0_queue_command() and from the interrupt handler
3817 * after command completion in case NCR53c7x0_queue_command()
3818 * isn't invoked again but we've freed up resources that are
3819 * needed.
3820 */
3821
3822static void
3823process_issue_queue (unsigned long flags) {
3824 Scsi_Cmnd *tmp, *prev;
3825 struct Scsi_Host *host;
3826 struct NCR53c7x0_hostdata *hostdata;
3827 int done;
3828
3829 /*
3830 * We run (with interrupts disabled) until we're sure that none of
3831 * the host adapters have anything that can be done, at which point
3832 * we set process_issue_queue_running to 0 and exit.
3833 *
3834 * Interrupts are enabled before doing various other internal
3835 * instructions, after we've decided that we need to run through
3836 * the loop again.
3837 *
3838 */
3839
3840 do {
3841 local_irq_disable(); /* Freeze request queues */
3842 done = 1;
3843 for (host = first_host; host && host->hostt == the_template;
3844 host = host->next) {
3845 hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
3846 local_irq_disable();
3847 if (hostdata->issue_queue) {
3848 if (hostdata->state == STATE_DISABLED) {
3849 tmp = (Scsi_Cmnd *) hostdata->issue_queue;
3850 hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
3851 tmp->result = (DID_BAD_TARGET << 16);
3852 if (tmp->host_scribble) {
3853 ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
3854 hostdata->free;
3855 hostdata->free =
3856 (struct NCR53c7x0_cmd *)tmp->host_scribble;
3857 tmp->host_scribble = NULL;
3858 }
3859 tmp->scsi_done (tmp);
3860 done = 0;
3861 } else
3862 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
3863 prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
3864 tmp->SCp.ptr)
3865 if (!tmp->host_scribble ||
3866 !busyp (host, hostdata, tmp)) {
3867 if (prev)
3868 prev->SCp.ptr = tmp->SCp.ptr;
3869 else
3870 hostdata->issue_queue = (Scsi_Cmnd *)
3871 tmp->SCp.ptr;
3872 tmp->SCp.ptr = NULL;
3873 if (tmp->host_scribble) {
3874 if (hostdata->options & OPTION_DEBUG_QUEUES)
3875 printk ("scsi%d : moving command for target %d lun %d to start list\n",
3876 host->host_no, tmp->device->id, tmp->device->lun);
3877
3878
3879 to_schedule_list (host, hostdata,
3880 (struct NCR53c7x0_cmd *)
3881 tmp->host_scribble);
3882 } else {
3883 if (((tmp->result & 0xff) == 0xff) ||
3884 ((tmp->result & 0xff00) == 0xff00)) {
3885 printk ("scsi%d : danger Will Robinson!\n",
3886 host->host_no);
3887 tmp->result = DID_ERROR << 16;
3888 disable (host);
3889 }
3890 tmp->scsi_done(tmp);
3891 }
3892 done = 0;
3893 } /* if target/lun is not busy */
3894 } /* if hostdata->issue_queue */
3895 if (!done)
3896 local_irq_restore(flags);
3897 } /* for host */
3898 } while (!done);
3899 process_issue_queue_running = 0;
3900}
3901
3902/*
3903 * Function : static void intr_scsi (struct Scsi_Host *host,
3904 * struct NCR53c7x0_cmd *cmd)
3905 *
3906 * Purpose : handle all SCSI interrupts, indicated by the setting
3907 * of the SIP bit in the ISTAT register.
3908 *
3909 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
3910 * may be NULL.
3911 */
3912
3913static void
3914intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
3915 NCR53c7x0_local_declare();
3916 struct NCR53c7x0_hostdata *hostdata =
3917 (struct NCR53c7x0_hostdata *) host->hostdata[0];
3918 unsigned char sstat0_sist0, sist1, /* Registers */
3919 fatal; /* Did a fatal interrupt
3920 occur ? */
3921
3922 NCR53c7x0_local_setup(host);
3923
3924 fatal = 0;
3925
3926 sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
3927 sist1 = 0;
3928
3929 if (hostdata->options & OPTION_DEBUG_INTR)
3930 printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
3931 sstat0_sist0, sist1);
3932
3933 /* 250ms selection timeout */
3934 if (sstat0_sist0 & SSTAT0_700_STO) {
3935 fatal = 1;
3936 if (hostdata->options & OPTION_DEBUG_INTR) {
3937 printk ("scsi%d : Selection Timeout\n", host->host_no);
3938 if (cmd) {
3939 printk("scsi%d : target %d, lun %d, command ",
3940 host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
3941 __scsi_print_command (cmd->cmd->cmnd);
3942 printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
3943 NCR53c7x0_read32(DSP_REG),
3944 bus_to_virt(NCR53c7x0_read32(DSP_REG)));
3945 } else {
3946 printk("scsi%d : no command\n", host->host_no);
3947 }
3948 }
3949/*
3950 * XXX - question : how do we want to handle the Illegal Instruction
3951 * interrupt, which may occur before or after the Selection Timeout
3952 * interrupt?
3953 */
3954
3955 if (1) {
3956 hostdata->idle = 1;
3957 hostdata->expecting_sto = 0;
3958
3959 if (hostdata->test_running) {
3960 hostdata->test_running = 0;
3961 hostdata->test_completed = 3;
3962 } else if (cmd) {
3963 abnormal_finished(cmd, DID_BAD_TARGET << 16);
3964 }
3965#if 0
3966 hostdata->intrs = 0;
3967#endif
3968 }
3969 }
3970
3971/*
3972 * FIXME : in theory, we can also get a UDC when a STO occurs.
3973 */
3974 if (sstat0_sist0 & SSTAT0_UDC) {
3975 fatal = 1;
3976 if (cmd) {
3977 printk("scsi%d : target %d lun %d unexpected disconnect\n",
3978 host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
3979 print_lots (host);
3980 abnormal_finished(cmd, DID_ERROR << 16);
3981 } else
3982 printk("scsi%d : unexpected disconnect (no command)\n",
3983 host->host_no);
3984
3985 hostdata->dsp = (u32 *) hostdata->schedule;
3986 hostdata->dsp_changed = 1;
3987 }
3988
3989 /* SCSI PARITY error */
3990 if (sstat0_sist0 & SSTAT0_PAR) {
3991 fatal = 1;
3992 if (cmd && cmd->cmd) {
3993 printk("scsi%d : target %d lun %d parity error.\n",
3994 host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
3995 abnormal_finished (cmd, DID_PARITY << 16);
3996 } else
3997 printk("scsi%d : parity error\n", host->host_no);
3998 /* Should send message out, parity error */
3999
4000 /* XXX - Reduce synchronous transfer rate! */
4001 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
4002 sizeof(u32);
4003 hostdata->dsp_changed = 1;
4004 /* SCSI GROSS error */
4005 }
4006
4007 if (sstat0_sist0 & SSTAT0_SGE) {
4008 fatal = 1;
4009 printk("scsi%d : gross error, saved2_dsa = 0x%x\n", host->host_no,
4010 (unsigned int)hostdata->saved2_dsa);
4011 print_lots (host);
4012
4013 /*
4014 * A SCSI gross error may occur when we have
4015 *
4016 * - A synchronous offset which causes the SCSI FIFO to be overwritten.
4017 *
4018 * - A REQ which causes the maximum synchronous offset programmed in
4019 * the SXFER register to be exceeded.
4020 *
4021 * - A phase change with an outstanding synchronous offset.
4022 *
4023 * - Residual data in the synchronous data FIFO, with a transfer
4024 * other than a synchronous receive is started.$#
4025 */
4026
4027
4028 /* XXX Should deduce synchronous transfer rate! */
4029 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
4030 sizeof(u32);
4031 hostdata->dsp_changed = 1;
4032 /* Phase mismatch */
4033 }
4034
4035 if (sstat0_sist0 & SSTAT0_MA) {
4036 fatal = 1;
4037 if (hostdata->options & OPTION_DEBUG_INTR)
4038 printk ("scsi%d : SSTAT0_MA\n", host->host_no);
4039 intr_phase_mismatch (host, cmd);
4040 }
4041
4042#if 0
4043 if (sstat0_sist0 & SIST0_800_RSL)
4044 printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
4045#endif
4046
4047/*
4048 * If a fatal SCSI interrupt occurs, we must insure that the DMA and
4049 * SCSI FIFOs were flushed.
4050 */
4051
4052 if (fatal) {
4053 if (!hostdata->dstat_valid) {
4054 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4055 hostdata->dstat_valid = 1;
4056 }
4057
4058 if (!(hostdata->dstat & DSTAT_DFE)) {
4059 printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
4060 /*
4061 * Really need to check this code for 710 RGH.
4062 * Havn't seen any problems, but maybe we should FLUSH before
4063 * clearing sometimes.
4064 */
4065 NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
4066 while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
4067 ;
4068 hostdata->dstat |= DSTAT_DFE;
4069 }
4070 }
4071}
4072
4073#ifdef CYCLIC_TRACE
4074
4075/*
4076 * The following implements a cyclic log of instructions executed, if you turn
4077 * TRACE on. It will also print the log for you. Very useful when debugging
4078 * 53c710 support, possibly not really needed any more.
4079 */
4080
4081u32 insn_log[4096];
4082u32 insn_log_index = 0;
4083
4084void log1 (u32 i)
4085{
4086 insn_log[insn_log_index++] = i;
4087 if (insn_log_index == 4096)
4088 insn_log_index = 0;
4089}
4090
4091void log_insn (u32 *ip)
4092{
4093 log1 ((u32)ip);
4094 log1 (*ip);
4095 log1 (*(ip+1));
4096 if (((*ip >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
4097 log1 (*(ip+2));
4098}
4099
4100void dump_log(void)
4101{
4102 int cnt = 0;
4103 int i = insn_log_index;
4104 int size;
4105 struct Scsi_Host *host = first_host;
4106
4107 while (cnt < 4096) {
4108 printk ("%08x (+%6x): ", insn_log[i], (insn_log[i] - (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4);
4109 if (++i == 4096)
4110 i = 0;
4111 cnt++;
4112 if (((insn_log[i] >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
4113 size = 3;
4114 else
4115 size = 2;
4116 while (size--) {
4117 printk ("%08x ", insn_log[i]);
4118 if (++i == 4096)
4119 i = 0;
4120 cnt++;
4121 }
4122 printk ("\n");
4123 }
4124}
4125#endif
4126
4127
4128/*
4129 * Function : static void NCR53c7x0_intfly (struct Scsi_Host *host)
4130 *
4131 * Purpose : Scan command queue for specified host, looking for completed
4132 * commands.
4133 *
4134 * Inputs : Scsi_Host pointer.
4135 *
4136 * This is called from the interrupt handler, when a simulated INTFLY
4137 * interrupt occurs.
4138 */
4139
4140static void
4141NCR53c7x0_intfly (struct Scsi_Host *host)
4142{
4143 NCR53c7x0_local_declare();
4144 struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
4145 struct NCR53c7x0_cmd *cmd, /* command which halted */
4146 **cmd_prev_ptr;
4147 unsigned long flags;
4148 char search_found = 0; /* Got at least one ? */
4149
4150 hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
4151 NCR53c7x0_local_setup(host);
4152
4153 if (hostdata->options & OPTION_DEBUG_INTR)
4154 printk ("scsi%d : INTFLY\n", host->host_no);
4155
4156 /*
4157 * Traverse our list of running commands, and look
4158 * for those with valid (non-0xff ff) status and message
4159 * bytes encoded in the result which signify command
4160 * completion.
4161 */
4162
4163 local_irq_save(flags);
4164restart:
4165 for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)&(hostdata->running_list),
4166 cmd = (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
4167 cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
4168 cmd = (struct NCR53c7x0_cmd *) cmd->next)
4169 {
4170 Scsi_Cmnd *tmp;
4171
4172 if (!cmd) {
4173 printk("scsi%d : very weird.\n", host->host_no);
4174 break;
4175 }
4176
4177 if (!(tmp = cmd->cmd)) {
4178 printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
4179 host->host_no);
4180 continue;
4181 }
4182 /* Copy the result over now; may not be complete,
4183 * but subsequent tests may as well be done on
4184 * cached memory.
4185 */
4186 tmp->result = cmd->result;
4187
4188 if (((tmp->result & 0xff) == 0xff) ||
4189 ((tmp->result & 0xff00) == 0xff00))
4190 continue;
4191
4192 search_found = 1;
4193
4194 if (cmd->bounce.len)
4195 memcpy ((void *)cmd->bounce.addr,
4196 (void *)cmd->bounce.buf, cmd->bounce.len);
4197
4198 /* Important - remove from list _before_ done is called */
4199 if (cmd_prev_ptr)
4200 *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
4201
4202 --hostdata->busy[tmp->device->id][tmp->device->lun];
4203 cmd->next = hostdata->free;
4204 hostdata->free = cmd;
4205
4206 tmp->host_scribble = NULL;
4207
4208 if (hostdata->options & OPTION_DEBUG_INTR) {
4209 printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
4210 host->host_no, tmp->pid, tmp->device->id, tmp->device->lun, tmp->result);
4211 __scsi_print_command (tmp->cmnd);
4212 }
4213
4214 tmp->scsi_done(tmp);
4215 goto restart;
4216 }
4217 local_irq_restore(flags);
4218
4219 if (!search_found) {
4220 printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
4221 host->host_no);
4222 } else {
4223 run_process_issue_queue();
4224 }
4225 return;
4226}
4227
4228/*
4229 * Function : static irqreturn_t NCR53c7x0_intr (int irq, void *dev_id)
4230 *
4231 * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
4232 * the same IRQ line.
4233 *
4234 * Inputs : Since we're using the IRQF_DISABLED interrupt handler
4235 * semantics, irq indicates the interrupt which invoked
4236 * this handler.
4237 *
4238 * On the 710 we simualte an INTFLY with a script interrupt, and the
4239 * script interrupt handler will call back to this function.
4240 */
4241
4242static irqreturn_t
4243NCR53c7x0_intr (int irq, void *dev_id)
4244{
4245 NCR53c7x0_local_declare();
4246 struct Scsi_Host *host; /* Host we are looking at */
4247 unsigned char istat; /* Values of interrupt regs */
4248 struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
4249 struct NCR53c7x0_cmd *cmd; /* command which halted */
4250 u32 *dsa; /* DSA */
4251 int handled = 0;
4252
4253#ifdef NCR_DEBUG
4254 char buf[80]; /* Debugging sprintf buffer */
4255 size_t buflen; /* Length of same */
4256#endif
4257
4258 host = (struct Scsi_Host *)dev_id;
4259 hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
4260 NCR53c7x0_local_setup(host);
4261
4262 /*
4263 * Only read istat once per loop, since reading it again will unstack
4264 * interrupts
4265 */
4266
4267 while ((istat = NCR53c7x0_read8(hostdata->istat)) & (ISTAT_SIP|ISTAT_DIP)) {
4268 handled = 1;
4269 hostdata->dsp_changed = 0;
4270 hostdata->dstat_valid = 0;
4271 hostdata->state = STATE_HALTED;
4272
4273 if (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK)
4274 printk ("scsi%d : SCSI FIFO not empty\n", host->host_no);
4275
4276 /*
4277 * NCR53c700 and NCR53c700-66 change the current SCSI
4278 * process, hostdata->ncrcurrent, in the Linux driver so
4279 * cmd = hostdata->ncrcurrent.
4280 *
4281 * With other chips, we must look through the commands
4282 * executing and find the command structure which
4283 * corresponds to the DSA register.
4284 */
4285
4286 if (hostdata->options & OPTION_700) {
4287 cmd = (struct NCR53c7x0_cmd *) hostdata->ncrcurrent;
4288 } else {
4289 dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
4290 for (cmd = (struct NCR53c7x0_cmd *) hostdata->running_list;
4291 cmd && (dsa + (hostdata->dsa_start / sizeof(u32))) != cmd->dsa;
4292 cmd = (struct NCR53c7x0_cmd *)(cmd->next))
4293 ;
4294 }
4295 if (hostdata->options & OPTION_DEBUG_INTR) {
4296 if (cmd) {
4297 printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
4298 host->host_no, cmd->cmd->pid, (int) cmd->cmd->device->id,
4299 (int) cmd->cmd->device->lun);
4300 __scsi_print_command (cmd->cmd->cmnd);
4301 } else {
4302 printk("scsi%d : no active command\n", host->host_no);
4303 }
4304 }
4305
4306 if (istat & ISTAT_SIP) {
4307 if (hostdata->options & OPTION_DEBUG_INTR)
4308 printk ("scsi%d : ISTAT_SIP\n", host->host_no);
4309 intr_scsi (host, cmd);
4310 }
4311
4312 if (istat & ISTAT_DIP) {
4313 if (hostdata->options & OPTION_DEBUG_INTR)
4314 printk ("scsi%d : ISTAT_DIP\n", host->host_no);
4315 intr_dma (host, cmd);
4316 }
4317
4318 if (!hostdata->dstat_valid) {
4319 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4320 hostdata->dstat_valid = 1;
4321 }
4322
4323 if (!(hostdata->dstat & DSTAT_DFE)) {
4324 printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
4325 /* Really need to check this out for 710 RGH */
4326 NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
4327 while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
4328 ;
4329 hostdata->dstat |= DSTAT_DFE;
4330 }
4331
4332 if (!hostdata->idle && hostdata->state == STATE_HALTED) {
4333 if (!hostdata->dsp_changed)
4334 hostdata->dsp = (u32 *)bus_to_virt(NCR53c7x0_read32(DSP_REG));
4335#if 0
4336 printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
4337 host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
4338#endif
4339
4340 hostdata->state = STATE_RUNNING;
4341 NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
4342 if (hostdata->options & OPTION_DEBUG_TRACE) {
4343#ifdef CYCLIC_TRACE
4344 log_insn (hostdata->dsp);
4345#else
4346 print_insn (host, hostdata->dsp, "t ", 1);
4347#endif
4348 NCR53c7x0_write8 (DCNTL_REG,
4349 hostdata->saved_dcntl | DCNTL_SSM | DCNTL_STD);
4350 }
4351 }
4352 }
4353 return IRQ_HANDLED;
4354}
4355
4356
4357/*
4358 * Function : static int abort_connected (struct Scsi_Host *host)
4359 *
4360 * Purpose : Assuming that the NCR SCSI processor is currently
4361 * halted, break the currently established nexus. Clean
4362 * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
4363 * be done on receipt of the abort interrupt.
4364 *
4365 * Inputs : host - SCSI host
4366 *
4367 */
4368
4369static int
4370abort_connected (struct Scsi_Host *host) {
4371#ifdef NEW_ABORT
4372 NCR53c7x0_local_declare();
4373#endif
4374 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
4375 host->hostdata[0];
4376/* FIXME : this probably should change for production kernels; at the
4377 least, counter should move to a per-host structure. */
4378 static int counter = 5;
4379#ifdef NEW_ABORT
4380 int sstat, phase, offset;
4381 u32 *script;
4382 NCR53c7x0_local_setup(host);
4383#endif
4384
4385 if (--counter <= 0) {
4386 disable(host);
4387 return 0;
4388 }
4389
4390 printk ("scsi%d : DANGER : abort_connected() called \n",
4391 host->host_no);
4392
4393#ifdef NEW_ABORT
4394
4395/*
4396 * New strategy : Rather than using a generic abort routine,
4397 * we'll specifically try to source or sink the appropriate
4398 * amount of data for the phase we're currently in (taking into
4399 * account the current synchronous offset)
4400 */
4401
4402 sstat = NCR53c8x0_read8 (SSTAT2_REG);
4403 offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
4404 phase = sstat & SSTAT2_PHASE_MASK;
4405
4406/*
4407 * SET ATN
4408 * MOVE source_or_sink, WHEN CURRENT PHASE
4409 * < repeat for each outstanding byte >
4410 * JUMP send_abort_message
4411 */
4412
4413 script = hostdata->abort_script = kmalloc (
4414 8 /* instruction size */ * (
4415 1 /* set ATN */ +
4416 (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
4417 1 /* send abort message */),
4418 GFP_ATOMIC);
4419
4420
4421#else /* def NEW_ABORT */
4422 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
4423 sizeof(u32);
4424#endif /* def NEW_ABORT */
4425 hostdata->dsp_changed = 1;
4426
4427/* XXX - need to flag the command as aborted after the abort_connected
4428 code runs
4429 */
4430 return 0;
4431}
4432
4433/*
4434 * Function : static int datapath_residual (Scsi_Host *host)
4435 *
4436 * Purpose : return residual data count of what's in the chip.
4437 *
4438 * Inputs : host - SCSI host
4439 */
4440
4441static int
4442datapath_residual (struct Scsi_Host *host) {
4443 NCR53c7x0_local_declare();
4444 int count, synchronous, sstat;
4445 unsigned int ddir;
4446
4447 NCR53c7x0_local_setup(host);
4448 /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
4449 count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
4450 (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
4451 synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
4452 /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
4453 ddir = NCR53c7x0_read8 (CTEST0_REG_700) & CTEST0_700_DDIR;
4454
4455 if (ddir) {
4456 /* Receive */
4457 if (synchronous)
4458 count += (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
4459 else
4460 if (NCR53c7x0_read8 (SSTAT1_REG) & SSTAT1_ILF)
4461 ++count;
4462 } else {
4463 /* Send */
4464 sstat = NCR53c7x0_read8 (SSTAT1_REG);
4465 if (sstat & SSTAT1_OLF)
4466 ++count;
4467 if (synchronous && (sstat & SSTAT1_ORF))
4468 ++count;
4469 }
4470 return count;
4471}
4472
4473/*
4474 * Function : static const char * sbcl_to_phase (int sbcl)_
4475 *
4476 * Purpose : Convert SBCL register to user-parsable phase representation
4477 *
4478 * Inputs : sbcl - value of sbcl register
4479 */
4480
4481
4482static const char *
4483sbcl_to_phase (int sbcl) {
4484 switch (sbcl & SBCL_PHASE_MASK) {
4485 case SBCL_PHASE_DATAIN:
4486 return "DATAIN";
4487 case SBCL_PHASE_DATAOUT:
4488 return "DATAOUT";
4489 case SBCL_PHASE_MSGIN:
4490 return "MSGIN";
4491 case SBCL_PHASE_MSGOUT:
4492 return "MSGOUT";
4493 case SBCL_PHASE_CMDOUT:
4494 return "CMDOUT";
4495 case SBCL_PHASE_STATIN:
4496 return "STATUSIN";
4497 default:
4498 return "unknown";
4499 }
4500}
4501
4502/*
4503 * Function : static const char * sstat2_to_phase (int sstat)_
4504 *
4505 * Purpose : Convert SSTAT2 register to user-parsable phase representation
4506 *
4507 * Inputs : sstat - value of sstat register
4508 */
4509
4510
4511static const char *
4512sstat2_to_phase (int sstat) {
4513 switch (sstat & SSTAT2_PHASE_MASK) {
4514 case SSTAT2_PHASE_DATAIN:
4515 return "DATAIN";
4516 case SSTAT2_PHASE_DATAOUT:
4517 return "DATAOUT";
4518 case SSTAT2_PHASE_MSGIN:
4519 return "MSGIN";
4520 case SSTAT2_PHASE_MSGOUT:
4521 return "MSGOUT";
4522 case SSTAT2_PHASE_CMDOUT:
4523 return "CMDOUT";
4524 case SSTAT2_PHASE_STATIN:
4525 return "STATUSIN";
4526 default:
4527 return "unknown";
4528 }
4529}
4530
4531/*
4532 * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
4533 * struct NCR53c7x0_cmd *cmd)
4534 *
4535 * Purpose : Handle phase mismatch interrupts
4536 *
4537 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
4538 * may be NULL.
4539 *
4540 * Side effects : The abort_connected() routine is called or the NCR chip
4541 * is restarted, jumping to the command_complete entry point, or
4542 * patching the address and transfer count of the current instruction
4543 * and calling the msg_in entry point as appropriate.
4544 */
4545
4546static void
4547intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
4548 NCR53c7x0_local_declare();
4549 u32 dbc_dcmd, *dsp, *dsp_next;
4550 unsigned char dcmd, sbcl;
4551 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
4552 host->hostdata[0];
4553 int residual;
4554 enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
4555 ACTION_ABORT_PRINT;
4556 const char *where = NULL;
4557
4558 NCR53c7x0_local_setup(host);
4559
4560 /*
4561 * Corrective action is based on where in the SCSI SCRIPT(tm) the error
4562 * occurred, as well as which SCSI phase we are currently in.
4563 */
4564 dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
4565
4566 /*
4567 * Fetch the current instruction, and remove the operands for easier
4568 * interpretation.
4569 */
4570 dbc_dcmd = NCR53c7x0_read32(DBC_REG);
4571 dcmd = (dbc_dcmd & 0xff000000) >> 24;
4572 /*
4573 * Like other processors, the NCR adjusts the instruction pointer before
4574 * instruction decode. Set the DSP address back to what it should
4575 * be for this instruction based on its size (2 or 3 32 bit words).
4576 */
4577 dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
4578
4579
4580 /*
4581 * Read new SCSI phase from the SBCL lines. Since all of our code uses
4582 * a WHEN conditional instead of an IF conditional, we don't need to
4583 * wait for a new REQ.
4584 */
4585 sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
4586
4587 if (!cmd) {
4588 action = ACTION_ABORT_PRINT;
4589 where = "no current command";
4590 /*
4591 * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
4592 * mismatches should only occur where we're doing a multi-byte
4593 * BMI instruction. Specifically, this means
4594 *
4595 * - select messages (a SCSI-I target may ignore additional messages
4596 * after the IDENTIFY; any target may reject a SDTR or WDTR)
4597 *
4598 * - command out (targets may send a message to signal an error
4599 * condition, or go into STATUSIN after they've decided
4600 * they don't like the command.
4601 *
4602 * - reply_message (targets may reject a multi-byte message in the
4603 * middle)
4604 *
4605 * - data transfer routines (command completion with buffer space
4606 * left, disconnect message, or error message)
4607 */
4608 } else if (((dsp >= cmd->data_transfer_start &&
4609 dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
4610 if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
4611 DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
4612 DCMD_BMI_OP_MOVE_I)) {
4613 residual = datapath_residual (host);
4614 if (hostdata->options & OPTION_DEBUG_DISCONNECT)
4615 printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
4616 host->host_no, residual);
4617
4618 /*
4619 * The first instruction is a CALL to the alternate handler for
4620 * this data transfer phase, so we can do calls to
4621 * munge_msg_restart as we would if control were passed
4622 * from normal dynamic code.
4623 */
4624 if (dsp != cmd->residual + 2) {
4625 cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
4626 ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
4627 DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
4628 cmd->residual[1] = virt_to_bus(hostdata->script)
4629 + ((dcmd & DCMD_BMI_IO)
4630 ? hostdata->E_other_in : hostdata->E_other_out);
4631 }
4632
4633 /*
4634 * The second instruction is the a data transfer block
4635 * move instruction, reflecting the pointer and count at the
4636 * time of the phase mismatch.
4637 */
4638 cmd->residual[2] = dbc_dcmd + residual;
4639 cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
4640
4641 /*
4642 * The third and final instruction is a jump to the instruction
4643 * which follows the instruction which had to be 'split'
4644 */
4645 if (dsp != cmd->residual + 2) {
4646 cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
4647 << 24) | DBC_TCI_TRUE;
4648 cmd->residual[5] = virt_to_bus(dsp_next);
4649 }
4650
4651 /*
4652 * For the sake of simplicity, transfer control to the
4653 * conditional CALL at the start of the residual buffer.
4654 */
4655 hostdata->dsp = cmd->residual;
4656 hostdata->dsp_changed = 1;
4657 action = ACTION_CONTINUE;
4658 } else {
4659 where = "non-BMI dynamic DSA code";
4660 action = ACTION_ABORT_PRINT;
4661 }
4662 } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4 + 2)) {
4663 /* RGH 290697: Added +2 above, to compensate for the script
4664 * instruction which disables the selection timer. */
4665 /* Release ATN */
4666 NCR53c7x0_write8 (SOCL_REG, 0);
4667 switch (sbcl) {
4668 /*
4669 * Some devices (SQ555 come to mind) grab the IDENTIFY message
4670 * sent on selection, and decide to go into COMMAND OUT phase
4671 * rather than accepting the rest of the messages or rejecting
4672 * them. Handle these devices gracefully.
4673 */
4674 case SBCL_PHASE_CMDOUT:
4675 hostdata->dsp = dsp + 2 /* two _words_ */;
4676 hostdata->dsp_changed = 1;
4677 printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
4678 host->host_no, cmd->cmd->device->id);
4679 cmd->flags &= ~CMD_FLAG_SDTR;
4680 action = ACTION_CONTINUE;
4681 break;
4682 case SBCL_PHASE_MSGIN:
4683 hostdata->dsp = hostdata->script + hostdata->E_msg_in /
4684 sizeof(u32);
4685 hostdata->dsp_changed = 1;
4686 action = ACTION_CONTINUE;
4687 break;
4688 default:
4689 where="select message out";
4690 action = ACTION_ABORT_PRINT;
4691 }
4692 /*
4693 * Some SCSI devices will interpret a command as they read the bytes
4694 * off the SCSI bus, and may decide that the command is Bogus before
4695 * they've read the entire command off the bus.
4696 */
4697 } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
4698 (u32)) {
4699 hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
4700 sizeof (u32);
4701 hostdata->dsp_changed = 1;
4702 action = ACTION_CONTINUE;
4703 /* FIXME : we need to handle message reject, etc. within msg_respond. */
4704#ifdef notyet
4705 } else if (dsp == hostdata->script + hostdata->E_reply_message) {
4706 switch (sbcl) {
4707 /* Any other phase mismatches abort the currently executing command. */
4708#endif
4709 } else {
4710 where = "unknown location";
4711 action = ACTION_ABORT_PRINT;
4712 }
4713
4714 /* Flush DMA FIFO */
4715 if (!hostdata->dstat_valid) {
4716 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4717 hostdata->dstat_valid = 1;
4718 }
4719 if (!(hostdata->dstat & DSTAT_DFE)) {
4720 /* Really need to check this out for 710 RGH */
4721 NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
4722 while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF);
4723 hostdata->dstat |= DSTAT_DFE;
4724 }
4725
4726 switch (action) {
4727 case ACTION_ABORT_PRINT:
4728 printk("scsi%d : %s : unexpected phase %s.\n",
4729 host->host_no, where ? where : "unknown location",
4730 sbcl_to_phase(sbcl));
4731 print_lots (host);
4732 /* Fall through to ACTION_ABORT */
4733 case ACTION_ABORT:
4734 abort_connected (host);
4735 break;
4736 case ACTION_CONTINUE:
4737 break;
4738 }
4739
4740#if 0
4741 if (hostdata->dsp_changed) {
4742 printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
4743 print_insn (host, hostdata->dsp, "", 1);
4744 }
4745#endif
4746}
4747
4748/*
4749 * Function : static void intr_bf (struct Scsi_Host *host,
4750 * struct NCR53c7x0_cmd *cmd)
4751 *
4752 * Purpose : handle BUS FAULT interrupts
4753 *
4754 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
4755 * may be NULL.
4756 */
4757
4758static void
4759intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
4760 NCR53c7x0_local_declare();
4761 u32 *dsp,
4762 *next_dsp, /* Current dsp */
4763 *dsa,
4764 dbc_dcmd; /* DCMD (high eight bits) + DBC */
4765 char *reason = NULL;
4766 /* Default behavior is for a silent error, with a retry until we've
4767 exhausted retries. */
4768 enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
4769 int report = 0;
4770 NCR53c7x0_local_setup(host);
4771
4772 dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
4773 next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
4774 dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
4775/* FIXME - check chip type */
4776 dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
4777
4778 /*
4779 * Bus faults can be caused by either a Bad Address or
4780 * Target Abort. We should check the Received Target Abort
4781 * bit of the PCI status register and Master Abort Bit.
4782 *
4783 * - Master Abort bit indicates that no device claimed
4784 * the address with DEVSEL within five clocks
4785 *
4786 * - Target Abort bit indicates that a target claimed it,
4787 * but changed its mind once it saw the byte enables.
4788 *
4789 */
4790
4791 /* 53c710, not PCI system */
4792 report = 1;
4793 reason = "Unknown";
4794
4795#ifndef notyet
4796 report = 1;
4797#endif
4798 if (report && reason)
4799 {
4800 printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
4801 host->host_no, reason ? reason : "unknown");
4802 print_lots (host);
4803 }
4804
4805#ifndef notyet
4806 retry = NEVER;
4807#endif
4808
4809 /*
4810 * TODO : we should attempt to recover from any spurious bus
4811 * faults. After X retries, we should figure that things are
4812 * sufficiently wedged, and call NCR53c7xx_reset.
4813 *
4814 * This code should only get executed once we've decided that we
4815 * cannot retry.
4816 */
4817
4818 if (retry == NEVER) {
4819 printk(KERN_ALERT " mail richard@sleepie.demon.co.uk\n");
4820 FATAL (host);
4821 }
4822}
4823
4824/*
4825 * Function : static void intr_dma (struct Scsi_Host *host,
4826 * struct NCR53c7x0_cmd *cmd)
4827 *
4828 * Purpose : handle all DMA interrupts, indicated by the setting
4829 * of the DIP bit in the ISTAT register.
4830 *
4831 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
4832 * may be NULL.
4833 */
4834
4835static void
4836intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
4837 NCR53c7x0_local_declare();
4838 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
4839 host->hostdata[0];
4840 unsigned char dstat; /* DSTAT */
4841 u32 *dsp,
4842 *next_dsp, /* Current dsp */
4843 *dsa,
4844 dbc_dcmd; /* DCMD (high eight bits) + DBC */
4845 int tmp;
4846 unsigned long flags;
4847 NCR53c7x0_local_setup(host);
4848
4849 if (!hostdata->dstat_valid) {
4850 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4851 hostdata->dstat_valid = 1;
4852 }
4853
4854 dstat = hostdata->dstat;
4855
4856 if (hostdata->options & OPTION_DEBUG_INTR)
4857 printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
4858
4859 dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
4860 next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
4861 dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
4862/* XXX - check chip type */
4863 dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
4864
4865 /*
4866 * DSTAT_ABRT is the aborted interrupt. This is set whenever the
4867 * SCSI chip is aborted.
4868 *
4869 * With NCR53c700 and NCR53c700-66 style chips, we should only
4870 * get this when the chip is currently running the accept
4871 * reselect/select code and we have set the abort bit in the
4872 * ISTAT register.
4873 *
4874 */
4875
4876 if (dstat & DSTAT_ABRT) {
4877#if 0
4878 /* XXX - add code here to deal with normal abort */
4879 if ((hostdata->options & OPTION_700) && (hostdata->state ==
4880 STATE_ABORTING)) {
4881 } else
4882#endif
4883 {
4884 printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
4885 " ", host->host_no);
4886 print_insn (host, dsp, KERN_ALERT "s ", 1);
4887 FATAL (host);
4888 }
4889 }
4890
4891 /*
4892 * DSTAT_SSI is the single step interrupt. Should be generated
4893 * whenever we have single stepped or are tracing.
4894 */
4895
4896 if (dstat & DSTAT_SSI) {
4897 if (hostdata->options & OPTION_DEBUG_TRACE) {
4898 /* Don't print instr. until we write DSP at end of intr function */
4899 } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
4900 print_insn (host, dsp, "s ", 0);
4901 local_irq_save(flags);
4902/* XXX - should we do this, or can we get away with writing dsp? */
4903
4904 NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
4905 ~DCNTL_SSM) | DCNTL_STD);
4906 local_irq_restore(flags);
4907 } else {
4908 printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
4909 " ", host->host_no);
4910 print_insn (host, dsp, KERN_ALERT "", 1);
4911 printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
4912 FATAL (host);
4913 }
4914 }
4915
4916 /*
4917 * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
4918 * is different) is generated whenever an illegal instruction is
4919 * encountered.
4920 *
4921 * XXX - we may want to emulate INTFLY here, so we can use
4922 * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
4923 * chips.
4924 */
4925
4926 if (dstat & DSTAT_OPC) {
4927 /*
4928 * Ascertain if this IID interrupts occurred before or after a STO
4929 * interrupt. Since the interrupt handling code now leaves
4930 * DSP unmodified until _after_ all stacked interrupts have been
4931 * processed, reading the DSP returns the original DSP register.
4932 * This means that if dsp lies between the select code, and
4933 * message out following the selection code (where the IID interrupt
4934 * would have to have occurred by due to the implicit wait for REQ),
4935 * we have an IID interrupt resulting from a STO condition and
4936 * can ignore it.
4937 */
4938
4939 if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
4940 (dsp <= (hostdata->script + hostdata->E_select_msgout /
4941 sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
4942 if (hostdata->options & OPTION_DEBUG_INTR)
4943 printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
4944 host->host_no);
4945 if (hostdata->expecting_iid) {
4946 hostdata->expecting_iid = 0;
4947 hostdata->idle = 1;
4948 if (hostdata->test_running == 2) {
4949 hostdata->test_running = 0;
4950 hostdata->test_completed = 3;
4951 } else if (cmd)
4952 abnormal_finished (cmd, DID_BAD_TARGET << 16);
4953 } else {
4954 hostdata->expecting_sto = 1;
4955 }
4956 /*
4957 * We can't guarantee we'll be able to execute the WAIT DISCONNECT
4958 * instruction within the 3.4us of bus free and arbitration delay
4959 * that a target can RESELECT in and assert REQ after we've dropped
4960 * ACK. If this happens, we'll get an illegal instruction interrupt.
4961 * Doing away with the WAIT DISCONNECT instructions broke everything,
4962 * so instead I'll settle for moving one WAIT DISCONNECT a few
4963 * instructions closer to the CLEAR ACK before it to minimize the
4964 * chances of this happening, and handle it if it occurs anyway.
4965 *
4966 * Simply continue with what we were doing, and control should
4967 * be transferred to the schedule routine which will ultimately
4968 * pass control onto the reselection or selection (not yet)
4969 * code.
4970 */
4971 } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
4972 SBCL_REQ)) {
4973 if (!(hostdata->options & OPTION_NO_PRINT_RACE))
4974 {
4975 printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
4976 host->host_no);
4977 hostdata->options |= OPTION_NO_PRINT_RACE;
4978 }
4979 } else {
4980 printk(KERN_ALERT "scsi%d : invalid instruction\n", host->host_no);
4981 print_lots (host);
4982 printk(KERN_ALERT " mail Richard@sleepie.demon.co.uk with ALL\n"
4983 " boot messages and diagnostic output\n");
4984 FATAL (host);
4985 }
4986 }
4987
4988 /*
4989 * DSTAT_BF are bus fault errors. DSTAT_800_BF is valid for 710 also.
4990 */
4991
4992 if (dstat & DSTAT_800_BF) {
4993 intr_bf (host, cmd);
4994 }
4995
4996
4997 /*
4998 * DSTAT_SIR interrupts are generated by the execution of
4999 * the INT instruction. Since the exact values available
5000 * are determined entirely by the SCSI script running,
5001 * and are local to a particular script, a unique handler
5002 * is called for each script.
5003 */
5004
5005 if (dstat & DSTAT_SIR) {
5006 if (hostdata->options & OPTION_DEBUG_INTR)
5007 printk ("scsi%d : DSTAT_SIR\n", host->host_no);
5008 switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
5009 case SPECIFIC_INT_NOTHING:
5010 case SPECIFIC_INT_RESTART:
5011 break;
5012 case SPECIFIC_INT_ABORT:
5013 abort_connected(host);
5014 break;
5015 case SPECIFIC_INT_PANIC:
5016 printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
5017 print_insn (host, dsp, KERN_ALERT "", 1);
5018 printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
5019 FATAL (host);
5020 break;
5021 case SPECIFIC_INT_BREAK:
5022 intr_break (host, cmd);
5023 break;
5024 default:
5025 printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
5026 print_insn (host, dsp, KERN_ALERT "", 1);
5027 printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
5028 tmp);
5029 FATAL (host);
5030 }
5031 }
5032}
5033
5034/*
5035 * Function : static int print_insn (struct Scsi_Host *host,
5036 * u32 *insn, int kernel)
5037 *
5038 * Purpose : print numeric representation of the instruction pointed
5039 * to by insn to the debugging or kernel message buffer
5040 * as appropriate.
5041 *
5042 * If desired, a user level program can interpret this
5043 * information.
5044 *
5045 * Inputs : host, insn - host, pointer to instruction, prefix -
5046 * string to prepend, kernel - use printk instead of debugging buffer.
5047 *
5048 * Returns : size, in u32s, of instruction printed.
5049 */
5050
5051/*
5052 * FIXME: should change kernel parameter so that it takes an ENUM
5053 * specifying severity - either KERN_ALERT or KERN_PANIC so
5054 * all panic messages are output with the same severity.
5055 */
5056
5057static int
5058print_insn (struct Scsi_Host *host, const u32 *insn,
5059 const char *prefix, int kernel) {
5060 char buf[160], /* Temporary buffer and pointer. ICKY
5061 arbitrary length. */
5062
5063
5064 *tmp;
5065 unsigned char dcmd; /* dcmd register for *insn */
5066 int size;
5067
5068 /*
5069 * Check to see if the instruction pointer is not bogus before
5070 * indirecting through it; avoiding red-zone at start of
5071 * memory.
5072 *
5073 * FIXME: icky magic needs to happen here on non-intel boxes which
5074 * don't have kernel memory mapped in like this. Might be reasonable
5075 * to use vverify()?
5076 */
5077
5078 if (virt_to_phys((void *)insn) < PAGE_SIZE ||
5079 virt_to_phys((void *)(insn + 8)) > virt_to_phys(high_memory) ||
5080 ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
5081 virt_to_phys((void *)(insn + 12)) > virt_to_phys(high_memory))) {
5082 size = 0;
5083 sprintf (buf, "%s%p: address out of range\n",
5084 prefix, insn);
5085 } else {
5086/*
5087 * FIXME : (void *) cast in virt_to_bus should be unnecessary, because
5088 * it should take const void * as argument.
5089 */
5090#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
5091 sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
5092 (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
5093 insn[0], insn[1], bus_to_virt (insn[1]));
5094#else
5095 /* Remove virtual addresses to reduce output, as they are the same */
5096 sprintf(buf, "%s0x%x (+%x) : 0x%08x 0x%08x",
5097 (prefix ? prefix : ""), (u32)insn, ((u32)insn -
5098 (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4,
5099 insn[0], insn[1]);
5100#endif
5101 tmp = buf + strlen(buf);
5102 if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
5103#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
5104 sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
5105 bus_to_virt(insn[2]));
5106#else
5107 /* Remove virtual addr to reduce output, as it is the same */
5108 sprintf (tmp, " 0x%08x\n", insn[2]);
5109#endif
5110 size = 3;
5111 } else {
5112 sprintf (tmp, "\n");
5113 size = 2;
5114 }
5115 }
5116
5117 if (kernel)
5118 printk ("%s", buf);
5119#ifdef NCR_DEBUG
5120 else {
5121 size_t len = strlen(buf);
5122 debugger_kernel_write(host, buf, len);
5123 }
5124#endif
5125 return size;
5126}
5127
5128/*
5129 * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
5130 *
5131 * Purpose : Abort an errant SCSI command, doing all necessary
5132 * cleanup of the issue_queue, running_list, shared Linux/NCR
5133 * dsa issue and reconnect queues.
5134 *
5135 * Inputs : cmd - command to abort, code - entire result field
5136 *
5137 * Returns : 0 on success, -1 on failure.
5138 */
5139
5140int
5141NCR53c7xx_abort (Scsi_Cmnd *cmd) {
5142 NCR53c7x0_local_declare();
5143 struct Scsi_Host *host = cmd->device->host;
5144 struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
5145 host->hostdata[0] : NULL;
5146 unsigned long flags;
5147 struct NCR53c7x0_cmd *curr, **prev;
5148 Scsi_Cmnd *me, **last;
5149#if 0
5150 static long cache_pid = -1;
5151#endif
5152
5153
5154 if (!host) {
5155 printk ("Bogus SCSI command pid %ld; no host structure\n",
5156 cmd->pid);
5157 return SCSI_ABORT_ERROR;
5158 } else if (!hostdata) {
5159 printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
5160 return SCSI_ABORT_ERROR;
5161 }
5162 NCR53c7x0_local_setup(host);
5163
5164/*
5165 * CHECK : I don't think that reading ISTAT will unstack any interrupts,
5166 * since we need to write the INTF bit to clear it, and SCSI/DMA
5167 * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
5168 *
5169 * See that this is the case. Appears to be correct on the 710, at least.
5170 *
5171 * I suspect that several of our failures may be coming from a new fatal
5172 * interrupt (possibly due to a phase mismatch) happening after we've left
5173 * the interrupt handler, but before the PIC has had the interrupt condition
5174 * cleared.
5175 */
5176
5177 if (NCR53c7x0_read8(hostdata->istat) & (ISTAT_DIP|ISTAT_SIP)) {
5178 printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
5179 cmd->pid);
5180 NCR53c7x0_intr (host->irq, NULL, NULL);
5181 return SCSI_ABORT_BUSY;
5182 }
5183
5184 local_irq_save(flags);
5185#if 0
5186 if (cache_pid == cmd->pid)
5187 panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
5188 else
5189 cache_pid = cmd->pid;
5190#endif
5191
5192
5193/*
5194 * The command could be hiding in the issue_queue. This would be very
5195 * nice, as commands can't be moved from the high level driver's issue queue
5196 * into the shared queue until an interrupt routine is serviced, and this
5197 * moving is atomic.
5198 *
5199 * If this is the case, we don't have to worry about anything - we simply
5200 * pull the command out of the old queue, and call it aborted.
5201 */
5202
5203 for (me = (Scsi_Cmnd *) hostdata->issue_queue,
5204 last = (Scsi_Cmnd **) &(hostdata->issue_queue);
5205 me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
5206 me = (Scsi_Cmnd *)me->SCp.ptr);
5207
5208 if (me) {
5209 *last = (Scsi_Cmnd *) me->SCp.ptr;
5210 if (me->host_scribble) {
5211 ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
5212 hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
5213 me->host_scribble = NULL;
5214 }
5215 cmd->result = DID_ABORT << 16;
5216 cmd->scsi_done(cmd);
5217 printk ("scsi%d : found command %ld in Linux issue queue\n",
5218 host->host_no, me->pid);
5219 local_irq_restore(flags);
5220 run_process_issue_queue();
5221 return SCSI_ABORT_SUCCESS;
5222 }
5223
5224/*
5225 * That failing, the command could be in our list of already executing
5226 * commands. If this is the case, drastic measures are called for.
5227 */
5228
5229 for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
5230 prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
5231 curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
5232 &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
5233
5234 if (curr) {
5235 if ((curr->result & 0xff) != 0xff && (curr->result & 0xff00) != 0xff00) {
5236 cmd->result = curr->result;
5237 if (prev)
5238 *prev = (struct NCR53c7x0_cmd *) curr->next;
5239 curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
5240 cmd->host_scribble = NULL;
5241 hostdata->free = curr;
5242 cmd->scsi_done(cmd);
5243 printk ("scsi%d : found finished command %ld in running list\n",
5244 host->host_no, cmd->pid);
5245 local_irq_restore(flags);
5246 return SCSI_ABORT_NOT_RUNNING;
5247 } else {
5248 printk ("scsi%d : DANGER : command running, can not abort.\n",
5249 cmd->device->host->host_no);
5250 local_irq_restore(flags);
5251 return SCSI_ABORT_BUSY;
5252 }
5253 }
5254
5255/*
5256 * And if we couldn't find it in any of our queues, it must have been
5257 * a dropped interrupt.
5258 */
5259
5260 curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
5261 if (curr) {
5262 curr->next = hostdata->free;
5263 hostdata->free = curr;
5264 cmd->host_scribble = NULL;
5265 }
5266
5267 if (curr == NULL || ((curr->result & 0xff00) == 0xff00) ||
5268 ((curr->result & 0xff) == 0xff)) {
5269 printk ("scsi%d : did this command ever run?\n", host->host_no);
5270 cmd->result = DID_ABORT << 16;
5271 } else {
5272 printk ("scsi%d : probably lost INTFLY, normal completion\n",
5273 host->host_no);
5274 cmd->result = curr->result;
5275/*
5276 * FIXME : We need to add an additional flag which indicates if a
5277 * command was ever counted as BUSY, so if we end up here we can
5278 * decrement the busy count if and only if it is necessary.
5279 */
5280 --hostdata->busy[cmd->device->id][cmd->device->lun];
5281 }
5282 local_irq_restore(flags);
5283 cmd->scsi_done(cmd);
5284
5285/*
5286 * We need to run process_issue_queue since termination of this command
5287 * may allow another queued command to execute first?
5288 */
5289 return SCSI_ABORT_NOT_RUNNING;
5290}
5291
5292/*
5293 * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
5294 *
5295 * Purpose : perform a hard reset of the SCSI bus and NCR
5296 * chip.
5297 *
5298 * Inputs : cmd - command which caused the SCSI RESET
5299 *
5300 * Returns : 0 on success.
5301 */
5302
5303int
5304NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
5305 NCR53c7x0_local_declare();
5306 unsigned long flags;
5307 int found = 0;
5308 struct NCR53c7x0_cmd * c;
5309 Scsi_Cmnd *tmp;
5310 /*
5311 * When we call scsi_done(), it's going to wake up anything sleeping on the
5312 * resources which were in use by the aborted commands, and we'll start to
5313 * get new commands.
5314 *
5315 * We can't let this happen until after we've re-initialized the driver
5316 * structures, and can't reinitialize those structures until after we've
5317 * dealt with their contents.
5318 *
5319 * So, we need to find all of the commands which were running, stick
5320 * them on a linked list of completed commands (we'll use the host_scribble
5321 * pointer), do our reinitialization, and then call the done function for
5322 * each command.
5323 */
5324 Scsi_Cmnd *nuke_list = NULL;
5325 struct Scsi_Host *host = cmd->device->host;
5326 struct NCR53c7x0_hostdata *hostdata =
5327 (struct NCR53c7x0_hostdata *) host->hostdata[0];
5328
5329 NCR53c7x0_local_setup(host);
5330 local_irq_save(flags);
5331 ncr_halt (host);
5332 print_lots (host);
5333 dump_events (host, 30);
5334 ncr_scsi_reset (host);
5335 for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
5336 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
5337 if (tmp == cmd) {
5338 found = 1;
5339 break;
5340 }
5341
5342 /*
5343 * If we didn't find the command which caused this reset in our running
5344 * list, then we've lost it. See that it terminates normally anyway.
5345 */
5346 if (!found) {
5347 c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
5348 if (c) {
5349 cmd->host_scribble = NULL;
5350 c->next = hostdata->free;
5351 hostdata->free = c;
5352 } else
5353 printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
5354 cmd->SCp.buffer = (struct scatterlist *) nuke_list;
5355 nuke_list = cmd;
5356 }
5357
5358 NCR53c7x0_driver_init (host);
5359 hostdata->soft_reset (host);
5360 if (hostdata->resets == 0)
5361 disable(host);
5362 else if (hostdata->resets != -1)
5363 --hostdata->resets;
5364 local_irq_restore(flags);
5365 for (; nuke_list; nuke_list = tmp) {
5366 tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
5367 nuke_list->result = DID_RESET << 16;
5368 nuke_list->scsi_done (nuke_list);
5369 }
5370 local_irq_restore(flags);
5371 return SCSI_RESET_SUCCESS;
5372}
5373
5374/*
5375 * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
5376 * therefore shares the scsicam_bios_param function.
5377 */
5378
5379/*
5380 * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
5381 *
5382 * Purpose : convert instructions stored at NCR pointer into data
5383 * pointer offset.
5384 *
5385 * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
5386 * DSP, or saved data pointer.
5387 *
5388 * Returns : offset on success, -1 on failure.
5389 */
5390
5391
5392static int
5393insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
5394 struct NCR53c7x0_hostdata *hostdata =
5395 (struct NCR53c7x0_hostdata *) cmd->device->host->hostdata[0];
5396 struct NCR53c7x0_cmd *ncmd =
5397 (struct NCR53c7x0_cmd *) cmd->host_scribble;
5398 int offset = 0, buffers;
5399 struct scatterlist *segment;
5400 char *ptr;
5401 int found = 0;
5402
5403/*
5404 * With the current code implementation, if the insn is inside dynamically
5405 * generated code, the data pointer will be the instruction preceding
5406 * the next transfer segment.
5407 */
5408
5409 if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
5410 ((insn >= ncmd->data_transfer_start &&
5411 insn < ncmd->data_transfer_end) ||
5412 (insn >= ncmd->residual &&
5413 insn < (ncmd->residual +
5414 sizeof(ncmd->residual))))) {
5415 ptr = bus_to_virt(insn[3]);
5416
5417 if ((buffers = cmd->use_sg)) {
5418 for (offset = 0,
5419 segment = (struct scatterlist *) cmd->request_buffer;
5420 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
5421 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
5422 --buffers, offset += segment->length, ++segment)
5423#if 0
5424 printk("scsi%d: comparing 0x%p to 0x%p\n",
5425 cmd->device->host->host_no, saved, page_address(segment->page+segment->offset));
5426#else
5427 ;
5428#endif
5429 offset += ptr - ((char *)page_address(segment->page)+segment->offset);
5430 } else {
5431 found = 1;
5432 offset = ptr - (char *) (cmd->request_buffer);
5433 }
5434 } else if ((insn >= hostdata->script +
5435 hostdata->E_data_transfer / sizeof(u32)) &&
5436 (insn <= hostdata->script +
5437 hostdata->E_end_data_transfer / sizeof(u32))) {
5438 found = 1;
5439 offset = 0;
5440 }
5441 return found ? offset : -1;
5442}
5443
5444
5445
5446/*
5447 * Function : void print_progress (Scsi_Cmnd *cmd)
5448 *
5449 * Purpose : print the current location of the saved data pointer
5450 *
5451 * Inputs : cmd - command we are interested in
5452 *
5453 */
5454
5455static void
5456print_progress (Scsi_Cmnd *cmd) {
5457 NCR53c7x0_local_declare();
5458 struct NCR53c7x0_cmd *ncmd =
5459 (struct NCR53c7x0_cmd *) cmd->host_scribble;
5460 int offset, i;
5461 char *where;
5462 u32 *ptr;
5463 NCR53c7x0_local_setup (cmd->device->host);
5464
5465 if (check_address ((unsigned long) ncmd,sizeof (struct NCR53c7x0_cmd)) == 0)
5466 {
5467 printk("\nNCR53c7x0_cmd fields:\n");
5468 printk(" bounce.len=0x%x, addr=0x%0x, buf[]=0x%02x %02x %02x %02x\n",
5469 ncmd->bounce.len, ncmd->bounce.addr, ncmd->bounce.buf[0],
5470 ncmd->bounce.buf[1], ncmd->bounce.buf[2], ncmd->bounce.buf[3]);
5471 printk(" result=%04x, cdb[0]=0x%02x\n", ncmd->result, ncmd->cmnd[0]);
5472 }
5473
5474 for (i = 0; i < 2; ++i) {
5475 if (check_address ((unsigned long) ncmd,
5476 sizeof (struct NCR53c7x0_cmd)) == -1)
5477 continue;
5478 if (!i) {
5479 where = "saved";
5480 ptr = bus_to_virt(ncmd->saved_data_pointer);
5481 } else {
5482 where = "active";
5483 ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
5484 NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
5485 sizeof(u32));
5486 }
5487 offset = insn_to_offset (cmd, ptr);
5488
5489 if (offset != -1)
5490 printk ("scsi%d : %s data pointer at offset %d\n",
5491 cmd->device->host->host_no, where, offset);
5492 else {
5493 int size;
5494 printk ("scsi%d : can't determine %s data pointer offset\n",
5495 cmd->device->host->host_no, where);
5496 if (ncmd) {
5497 size = print_insn (cmd->device->host,
5498 bus_to_virt(ncmd->saved_data_pointer), "", 1);
5499 print_insn (cmd->device->host,
5500 bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
5501 "", 1);
5502 }
5503 }
5504 }
5505}
5506
5507
5508static void
5509print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
5510 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5511 host->hostdata[0];
5512 int i, len;
5513 char *ptr;
5514 Scsi_Cmnd *cmd;
5515
5516 if (check_address ((unsigned long) dsa, hostdata->dsa_end -
5517 hostdata->dsa_start) == -1) {
5518 printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
5519 return;
5520 }
5521 printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
5522 " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
5523 prefix ? prefix : "",
5524 host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
5525 dsa[hostdata->dsa_msgout / sizeof(u32)],
5526 dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
5527 bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
5528
5529 /*
5530 * Only print messages if they're sane in length so we don't
5531 * blow the kernel printk buffer on something which won't buy us
5532 * anything.
5533 */
5534
5535 if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
5536 sizeof (hostdata->free->select))
5537 for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
5538 ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
5539 i > 0 && !check_address ((unsigned long) ptr, 1);
5540 ptr += len, i -= len) {
5541 printk(" ");
5542 len = spi_print_msg(ptr);
5543 printk("\n");
5544 if (!len)
5545 break;
5546 }
5547
5548 printk(" + %d : select_indirect = 0x%x\n",
5549 hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
5550 cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
5551 printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
5552 (u32) virt_to_bus(cmd));
5553 /* XXX Maybe we should access cmd->host_scribble->result here. RGH */
5554 if (cmd) {
5555 printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
5556 cmd->result, cmd->device->id, cmd->device->lun);
5557 __scsi_print_command(cmd->cmnd);
5558 } else
5559 printk("\n");
5560 printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
5561 dsa[hostdata->dsa_next / sizeof(u32)]);
5562 if (cmd) {
5563 printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
5564 " script : ",
5565 host->host_no, cmd->device->id,
5566 hostdata->sync[cmd->device->id].sxfer_sanity,
5567 hostdata->sync[cmd->device->id].scntl3_sanity);
5568 for (i = 0; i < (sizeof(hostdata->sync[cmd->device->id].script) / 4); ++i)
5569 printk ("0x%x ", hostdata->sync[cmd->device->id].script[i]);
5570 printk ("\n");
5571 print_progress (cmd);
5572 }
5573}
5574/*
5575 * Function : void print_queues (Scsi_Host *host)
5576 *
5577 * Purpose : print the contents of the NCR issue and reconnect queues
5578 *
5579 * Inputs : host - SCSI host we are interested in
5580 *
5581 */
5582
5583static void
5584print_queues (struct Scsi_Host *host) {
5585 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5586 host->hostdata[0];
5587 u32 *dsa, *next_dsa;
5588 volatile u32 *ncrcurrent;
5589 int left;
5590 Scsi_Cmnd *cmd, *next_cmd;
5591 unsigned long flags;
5592
5593 printk ("scsi%d : issue queue\n", host->host_no);
5594
5595 for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
5596 left >= 0 && cmd;
5597 cmd = next_cmd) {
5598 next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
5599 local_irq_save(flags);
5600 if (cmd->host_scribble) {
5601 if (check_address ((unsigned long) (cmd->host_scribble),
5602 sizeof (cmd->host_scribble)) == -1)
5603 printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
5604 host->host_no, cmd->pid);
5605 /* print_dsa does sanity check on address, no need to check */
5606 else
5607 print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
5608 -> dsa, "");
5609 } else
5610 printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
5611 host->host_no, cmd->pid, cmd->device->id, cmd->device->lun);
5612 local_irq_restore(flags);
5613 }
5614
5615 if (left <= 0) {
5616 printk ("scsi%d : loop detected in issue queue\n",
5617 host->host_no);
5618 }
5619
5620 /*
5621 * Traverse the NCR reconnect and start DSA structures, printing out
5622 * each element until we hit the end or detect a loop. Currently,
5623 * the reconnect structure is a linked list; and the start structure
5624 * is an array. Eventually, the reconnect structure will become a
5625 * list as well, since this simplifies the code.
5626 */
5627
5628 printk ("scsi%d : schedule dsa array :\n", host->host_no);
5629 for (left = host->can_queue, ncrcurrent = hostdata->schedule;
5630 left > 0; ncrcurrent += 2, --left)
5631 if (ncrcurrent[0] != hostdata->NOP_insn)
5632/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
5633 print_dsa (host, bus_to_virt (ncrcurrent[1] -
5634 (hostdata->E_dsa_code_begin -
5635 hostdata->E_dsa_code_template)), "");
5636 printk ("scsi%d : end schedule dsa array\n", host->host_no);
5637
5638 printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
5639
5640 for (left = host->can_queue,
5641 dsa = bus_to_virt (hostdata->reconnect_dsa_head);
5642 left >= 0 && dsa;
5643 dsa = next_dsa) {
5644 local_irq_save(flags);
5645 if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
5646 printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
5647 dsa);
5648 next_dsa = NULL;
5649 }
5650 else
5651 {
5652 next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
5653 print_dsa (host, dsa, "");
5654 }
5655 local_irq_restore(flags);
5656 }
5657 printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
5658 if (left < 0)
5659 printk("scsi%d: possible loop in ncr reconnect list\n",
5660 host->host_no);
5661}
5662
5663static void
5664print_lots (struct Scsi_Host *host) {
5665 NCR53c7x0_local_declare();
5666 struct NCR53c7x0_hostdata *hostdata =
5667 (struct NCR53c7x0_hostdata *) host->hostdata[0];
5668 u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
5669 unsigned char dcmd, sbcl;
5670 int i, size;
5671 NCR53c7x0_local_setup(host);
5672
5673 if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
5674 dbc_dcmd = NCR53c7x0_read32(DBC_REG);
5675 dcmd = (dbc_dcmd & 0xff000000) >> 24;
5676 dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
5677 dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
5678 sbcl = NCR53c7x0_read8 (SBCL_REG);
5679
5680 /*
5681 * For the 53c710, the following will report value 0 for SCNTL3
5682 * and STEST0 - we don't have these registers.
5683 */
5684 printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
5685 " DSA=0x%lx (virt 0x%p)\n"
5686 " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
5687 " SXFER=0x%x, SCNTL3=0x%x\n"
5688 " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
5689 " SCRATCH=0x%x, saved2_dsa=0x%0lx\n",
5690 host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
5691 bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
5692 virt_to_bus(dsa), dsa,
5693 NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
5694 bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
5695 (int) NCR53c7x0_read8(hostdata->dmode),
5696 (int) NCR53c7x0_read8(SXFER_REG),
5697 ((hostdata->chip / 100) == 8) ?
5698 (int) NCR53c7x0_read8(SCNTL3_REG_800) : 0,
5699 (sbcl & SBCL_BSY) ? "BSY " : "",
5700 (sbcl & SBCL_SEL) ? "SEL " : "",
5701 (sbcl & SBCL_REQ) ? "REQ " : "",
5702 sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
5703 SSTAT1_REG : SSTAT2_REG)),
5704 (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
5705 SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
5706 ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (STEST0_REG_800) :
5707 NCR53c7x0_read32(SCRATCHA_REG_800),
5708 hostdata->saved2_dsa);
5709 printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
5710 virt_to_bus(dsp), dsp);
5711 for (i = 6; i > 0; --i, dsp += size)
5712 size = print_insn (host, dsp, "", 1);
5713 if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
5714 if ((hostdata->chip / 100) == 8)
5715 printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
5716 host->host_no, NCR53c7x0_read8 (SDID_REG_800),
5717 NCR53c7x0_read8 (SSID_REG_800));
5718 else
5719 printk ("scsi%d : connected (SDID=0x%x)\n",
5720 host->host_no, NCR53c7x0_read8 (SDID_REG_700));
5721 print_dsa (host, dsa, "");
5722 }
5723
5724#if 1
5725 print_queues (host);
5726#endif
5727 }
5728}
5729
5730/*
5731 * Function : static int shutdown (struct Scsi_Host *host)
5732 *
5733 * Purpose : does a clean (we hope) shutdown of the NCR SCSI
5734 * chip. Use prior to dumping core, unloading the NCR driver,
5735 *
5736 * Returns : 0 on success
5737 */
5738static int
5739shutdown (struct Scsi_Host *host) {
5740 NCR53c7x0_local_declare();
5741 unsigned long flags;
5742 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5743 host->hostdata[0];
5744 NCR53c7x0_local_setup(host);
5745 local_irq_save(flags);
5746/* Get in a state where we can reset the SCSI bus */
5747 ncr_halt (host);
5748 ncr_scsi_reset (host);
5749 hostdata->soft_reset(host);
5750
5751 disable (host);
5752 local_irq_restore(flags);
5753 return 0;
5754}
5755
5756/*
5757 * Function : void ncr_scsi_reset (struct Scsi_Host *host)
5758 *
5759 * Purpose : reset the SCSI bus.
5760 */
5761
5762static void
5763ncr_scsi_reset (struct Scsi_Host *host) {
5764 NCR53c7x0_local_declare();
5765 unsigned long flags;
5766 NCR53c7x0_local_setup(host);
5767 local_irq_save(flags);
5768 NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
5769 udelay(25); /* Minimum amount of time to assert RST */
5770 NCR53c7x0_write8(SCNTL1_REG, 0);
5771 local_irq_restore(flags);
5772}
5773
5774/*
5775 * Function : void hard_reset (struct Scsi_Host *host)
5776 *
5777 */
5778
5779static void
5780hard_reset (struct Scsi_Host *host) {
5781 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5782 host->hostdata[0];
5783 unsigned long flags;
5784 local_irq_save(flags);
5785 ncr_scsi_reset(host);
5786 NCR53c7x0_driver_init (host);
5787 if (hostdata->soft_reset)
5788 hostdata->soft_reset (host);
5789 local_irq_restore(flags);
5790}
5791
5792
5793/*
5794 * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
5795 * int free, int issue)
5796 *
5797 * Purpose : return a linked list (using the SCp.buffer field as next,
5798 * so we don't perturb hostdata. We don't use a field of the
5799 * NCR53c7x0_cmd structure since we may not have allocated one
5800 * for the command causing the reset.) of Scsi_Cmnd structures that
5801 * had propagated below the Linux issue queue level. If free is set,
5802 * free the NCR53c7x0_cmd structures which are associated with
5803 * the Scsi_Cmnd structures, and clean up any internal
5804 * NCR lists that the commands were on. If issue is set,
5805 * also return commands in the issue queue.
5806 *
5807 * Returns : linked list of commands
5808 *
5809 * NOTE : the caller should insure that the NCR chip is halted
5810 * if the free flag is set.
5811 */
5812
5813static Scsi_Cmnd *
5814return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
5815 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5816 host->hostdata[0];
5817 struct NCR53c7x0_cmd *c;
5818 int i;
5819 u32 *ncrcurrent;
5820 Scsi_Cmnd *list = NULL, *tmp;
5821 for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
5822 c = (struct NCR53c7x0_cmd *) c->next) {
5823 if (c->cmd->SCp.buffer) {
5824 printk ("scsi%d : loop detected in running list!\n", host->host_no);
5825 break;
5826 } else {
5827 printk ("Duh? Bad things happening in the NCR driver\n");
5828 break;
5829 }
5830
5831 c->cmd->SCp.buffer = (struct scatterlist *) list;
5832 list = c->cmd;
5833 if (free) {
5834 c->next = hostdata->free;
5835 hostdata->free = c;
5836 }
5837 }
5838
5839 if (free) {
5840 for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
5841 i < host->can_queue; ++i, ncrcurrent += 2) {
5842 ncrcurrent[0] = hostdata->NOP_insn;
5843 ncrcurrent[1] = 0xdeadbeef;
5844 }
5845 hostdata->ncrcurrent = NULL;
5846 }
5847
5848 if (issue) {
5849 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
5850 if (tmp->SCp.buffer) {
5851 printk ("scsi%d : loop detected in issue queue!\n",
5852 host->host_no);
5853 break;
5854 }
5855 tmp->SCp.buffer = (struct scatterlist *) list;
5856 list = tmp;
5857 }
5858 if (free)
5859 hostdata->issue_queue = NULL;
5860
5861 }
5862 return list;
5863}
5864
5865/*
5866 * Function : static int disable (struct Scsi_Host *host)
5867 *
5868 * Purpose : disables the given NCR host, causing all commands
5869 * to return a driver error. Call this so we can unload the
5870 * module during development and try again. Eventually,
5871 * we should be able to find clean workarounds for these
5872 * problems.
5873 *
5874 * Inputs : host - hostadapter to twiddle
5875 *
5876 * Returns : 0 on success.
5877 */
5878
5879static int
5880disable (struct Scsi_Host *host) {
5881 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5882 host->hostdata[0];
5883 unsigned long flags;
5884 Scsi_Cmnd *nuke_list, *tmp;
5885 local_irq_save(flags);
5886 if (hostdata->state != STATE_HALTED)
5887 ncr_halt (host);
5888 nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
5889 hard_reset (host);
5890 hostdata->state = STATE_DISABLED;
5891 local_irq_restore(flags);
5892 printk ("scsi%d : nuking commands\n", host->host_no);
5893 for (; nuke_list; nuke_list = tmp) {
5894 tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
5895 nuke_list->result = DID_ERROR << 16;
5896 nuke_list->scsi_done(nuke_list);
5897 }
5898 printk ("scsi%d : done. \n", host->host_no);
5899 printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
5900 host->host_no);
5901 return 0;
5902}
5903
5904/*
5905 * Function : static int ncr_halt (struct Scsi_Host *host)
5906 *
5907 * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
5908 *
5909 * Inputs : host - SCSI chip to halt
5910 *
5911 * Returns : 0 on success
5912 */
5913
5914static int
5915ncr_halt (struct Scsi_Host *host) {
5916 NCR53c7x0_local_declare();
5917 unsigned long flags;
5918 unsigned char istat, tmp;
5919 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5920 host->hostdata[0];
5921 int stage;
5922 NCR53c7x0_local_setup(host);
5923
5924 local_irq_save(flags);
5925 /* Stage 0 : eat all interrupts
5926 Stage 1 : set ABORT
5927 Stage 2 : eat all but abort interrupts
5928 Stage 3 : eat all interrupts
5929 */
5930 for (stage = 0;;) {
5931 if (stage == 1) {
5932 NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
5933 ++stage;
5934 }
5935 istat = NCR53c7x0_read8 (hostdata->istat);
5936 if (istat & ISTAT_SIP) {
5937 tmp = NCR53c7x0_read8(SSTAT0_REG);
5938 } else if (istat & ISTAT_DIP) {
5939 tmp = NCR53c7x0_read8(DSTAT_REG);
5940 if (stage == 2) {
5941 if (tmp & DSTAT_ABRT) {
5942 NCR53c7x0_write8(hostdata->istat, 0);
5943 ++stage;
5944 } else {
5945 printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
5946 host->host_no);
5947 disable (host);
5948 }
5949 }
5950 }
5951 if (!(istat & (ISTAT_SIP|ISTAT_DIP))) {
5952 if (stage == 0)
5953 ++stage;
5954 else if (stage == 3)
5955 break;
5956 }
5957 }
5958 hostdata->state = STATE_HALTED;
5959 local_irq_restore(flags);
5960#if 0
5961 print_lots (host);
5962#endif
5963 return 0;
5964}
5965
5966/*
5967 * Function: event_name (int event)
5968 *
5969 * Purpose: map event enum into user-readable strings.
5970 */
5971
5972static const char *
5973event_name (int event) {
5974 switch (event) {
5975 case EVENT_NONE: return "none";
5976 case EVENT_ISSUE_QUEUE: return "to issue queue";
5977 case EVENT_START_QUEUE: return "to start queue";
5978 case EVENT_SELECT: return "selected";
5979 case EVENT_DISCONNECT: return "disconnected";
5980 case EVENT_RESELECT: return "reselected";
5981 case EVENT_COMPLETE: return "completed";
5982 case EVENT_IDLE: return "idle";
5983 case EVENT_SELECT_FAILED: return "select failed";
5984 case EVENT_BEFORE_SELECT: return "before select";
5985 case EVENT_RESELECT_FAILED: return "reselect failed";
5986 default: return "unknown";
5987 }
5988}
5989
5990/*
5991 * Function : void dump_events (struct Scsi_Host *host, count)
5992 *
5993 * Purpose : print last count events which have occurred.
5994 */
5995static void
5996dump_events (struct Scsi_Host *host, int count) {
5997 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5998 host->hostdata[0];
5999 struct NCR53c7x0_event event;
6000 int i;
6001 unsigned long flags;
6002 if (hostdata->events) {
6003 if (count > hostdata->event_size)
6004 count = hostdata->event_size;
6005 for (i = hostdata->event_index; count > 0;
6006 i = (i ? i - 1 : hostdata->event_size -1), --count) {
6007/*
6008 * By copying the event we're currently examining with interrupts
6009 * disabled, we can do multiple printk(), etc. operations and
6010 * still be guaranteed that they're happening on the same
6011 * event structure.
6012 */
6013 local_irq_save(flags);
6014#if 0
6015 event = hostdata->events[i];
6016#else
6017 memcpy ((void *) &event, (void *) &(hostdata->events[i]),
6018 sizeof(event));
6019#endif
6020
6021 local_irq_restore(flags);
6022 printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
6023 host->host_no, event_name (event.event), count,
6024 (long) event.time.tv_sec, (long) event.time.tv_usec,
6025 event.target, event.lun);
6026 if (event.dsa)
6027 printk (" event for dsa 0x%lx (virt 0x%p)\n",
6028 virt_to_bus(event.dsa), event.dsa);
6029 if (event.pid != -1) {
6030 printk (" event for pid %ld ", event.pid);
6031 __scsi_print_command (event.cmnd);
6032 }
6033 }
6034 }
6035}
6036
6037/*
6038 * Function: check_address
6039 *
6040 * Purpose: Check to see if a possibly corrupt pointer will fault the
6041 * kernel.
6042 *
6043 * Inputs: addr - address; size - size of area
6044 *
6045 * Returns: 0 if area is OK, -1 on error.
6046 *
6047 * NOTES: should be implemented in terms of vverify on kernels
6048 * that have it.
6049 */
6050
6051static int
6052check_address (unsigned long addr, int size) {
6053 return (virt_to_phys((void *)addr) < PAGE_SIZE || virt_to_phys((void *)(addr + size)) > virt_to_phys(high_memory) ? -1 : 0);
6054}
6055
6056#ifdef MODULE
6057int
6058NCR53c7x0_release(struct Scsi_Host *host) {
6059 struct NCR53c7x0_hostdata *hostdata =
6060 (struct NCR53c7x0_hostdata *) host->hostdata[0];
6061 struct NCR53c7x0_cmd *cmd, *tmp;
6062 shutdown (host);
6063 if (host->irq != SCSI_IRQ_NONE)
6064 {
6065 int irq_count;
6066 struct Scsi_Host *tmp;
6067 for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
6068 if (tmp->hostt == the_template && tmp->irq == host->irq)
6069 ++irq_count;
6070 if (irq_count == 1)
6071 free_irq(host->irq, NULL);
6072 }
6073 if (host->dma_channel != DMA_NONE)
6074 free_dma(host->dma_channel);
6075 if (host->io_port)
6076 release_region(host->io_port, host->n_io_port);
6077
6078 for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
6079 --hostdata->num_cmds) {
6080 tmp = (struct NCR53c7x0_cmd *) cmd->next;
6081 /*
6082 * If we're going to loop, try to stop it to get a more accurate
6083 * count of the leaked commands.
6084 */
6085 cmd->next = NULL;
6086 if (cmd->free)
6087 cmd->free ((void *) cmd->real, cmd->size);
6088 }
6089 if (hostdata->num_cmds)
6090 printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
6091 host->host_no, hostdata->num_cmds);
6092
6093 vfree(hostdata->events);
6094
6095 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
6096 * XXX may be invalid (CONFIG_060_WRITETHROUGH)
6097 */
6098 kernel_set_cachemode((void *)hostdata, 8192, IOMAP_FULL_CACHING);
6099 free_pages ((u32)hostdata, 1);
6100 return 1;
6101}
6102#endif /* def MODULE */
diff --git a/drivers/scsi/53c7xx.h b/drivers/scsi/53c7xx.h
deleted file mode 100644
index 218f3b901537..000000000000
--- a/drivers/scsi/53c7xx.h
+++ /dev/null
@@ -1,1608 +0,0 @@
1/*
2 * 53c710 driver. Modified from Drew Eckhardts driver
3 * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
4 *
5 * I have left the code for the 53c8xx family in here, because it didn't
6 * seem worth removing it. The possibility of IO_MAPPED chips rather
7 * than MEMORY_MAPPED remains, in case someone wants to add support for
8 * 53c710 chips on Intel PCs (some older machines have them on the
9 * motherboard).
10 *
11 * NOTE THERE MAY BE PROBLEMS WITH CASTS IN read8 AND Co.
12 */
13
14/*
15 * NCR 53c{7,8}0x0 driver, header file
16 *
17 * Sponsored by
18 * iX Multiuser Multitasking Magazine
19 * Hannover, Germany
20 * hm@ix.de
21 *
22 * Copyright 1993, 1994, 1995 Drew Eckhardt
23 * Visionary Computing
24 * (Unix and Linux consulting and custom programming)
25 * drew@PoohSticks.ORG
26 * +1 (303) 786-7975
27 *
28 * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
29 *
30 * PRE-ALPHA
31 *
32 * For more information, please consult
33 *
34 * NCR 53C700/53C700-66
35 * SCSI I/O Processor
36 * Data Manual
37 *
38 * NCR 53C810
39 * PCI-SCSI I/O Processor
40 * Data Manual
41 *
42 * NCR Microelectronics
43 * 1635 Aeroplaza Drive
44 * Colorado Springs, CO 80916
45 * +1 (719) 578-3400
46 *
47 * Toll free literature number
48 * +1 (800) 334-5454
49 *
50 */
51
52#ifndef NCR53c710_H
53#define NCR53c710_H
54
55#ifndef HOSTS_C
56
57/* SCSI control 0 rw, default = 0xc0 */
58#define SCNTL0_REG 0x00
59#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
60#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
61#define SCNTL0_STRT 0x20 /* Start Sequence */
62#define SCNTL0_WATN 0x10 /* Select with ATN */
63#define SCNTL0_EPC 0x08 /* Enable parity checking */
64/* Bit 2 is reserved on 800 series chips */
65#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
66#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
67#define SCNTL0_TRG 0x01 /* Target mode */
68
69/* SCSI control 1 rw, default = 0x00 */
70
71#define SCNTL1_REG 0x01
72#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
73#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
74#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
75 and reselection */
76#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
77 target mode only */
78#define SCNTL1_CON 0x10 /* Connected */
79#define SCNTL1_RST 0x08 /* SCSI RST/ */
80#define SCNTL1_AESP 0x04 /* Force bad parity */
81#define SCNTL1_SND_700 0x02 /* Start SCSI send */
82#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
83 arbitration immediately after
84 busfree is detected */
85#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
86#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
87
88/* SCSI control 2 rw, */
89
90#define SCNTL2_REG_800 0x02
91#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
92
93/* SCSI control 3 rw */
94
95#define SCNTL3_REG_800 0x03
96#define SCNTL3_800_SCF_SHIFT 4
97#define SCNTL3_800_SCF_MASK 0x70
98#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
99#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
100#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
101 /* 0x20 = SCLK/1.5
102 0x30 = SCLK/2
103 0x40 = SCLK/3 */
104
105#define SCNTL3_800_CCF_SHIFT 0
106#define SCNTL3_800_CCF_MASK 0x07
107#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
108#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
109#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
110 0x03 37.51 - 50
111 0x04 50.01 - 66 */
112
113/*
114 * SCSI destination ID rw - the appropriate bit is set for the selected
115 * target ID. This is written by the SCSI SCRIPTS processor.
116 * default = 0x00
117 */
118#define SDID_REG_700 0x02
119#define SDID_REG_800 0x06
120
121#define GP_REG_800 0x07 /* General purpose IO */
122#define GP_800_IO1 0x02
123#define GP_800_IO2 0x01
124
125/* SCSI interrupt enable rw, default = 0x00 */
126#define SIEN_REG_700 0x03
127#define SIEN0_REG_800 0x40
128#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
129#define SIEN_FC 0x40 /* Function complete */
130#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
131#define SIEN_800_SEL 0x20 /* Selected */
132#define SIEN_700_SEL 0x10 /* Selected or reselected */
133#define SIEN_800_RESEL 0x10 /* Reselected */
134#define SIEN_SGE 0x08 /* SCSI gross error */
135#define SIEN_UDC 0x04 /* Unexpected disconnect */
136#define SIEN_RST 0x02 /* SCSI RST/ received */
137#define SIEN_PAR 0x01 /* Parity error */
138
139/*
140 * SCSI chip ID rw
141 * NCR53c700 :
142 * When arbitrating, the highest bit is used, when reselection or selection
143 * occurs, the chip responds to all IDs for which a bit is set.
144 * default = 0x00
145 * NCR53c810 :
146 * Uses bit mapping
147 */
148#define SCID_REG 0x04
149/* Bit 7 is reserved on 800 series chips */
150#define SCID_800_RRE 0x40 /* Enable response to reselection */
151#define SCID_800_SRE 0x20 /* Enable response to selection */
152/* Bits four and three are reserved on 800 series chips */
153#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
154
155/* SCSI transfer rw, default = 0x00 */
156#define SXFER_REG 0x05
157#define SXFER_DHP 0x80 /* Disable halt on parity */
158
159#define SXFER_TP2 0x40 /* Transfer period msb */
160#define SXFER_TP1 0x20
161#define SXFER_TP0 0x10 /* lsb */
162#define SXFER_TP_MASK 0x70
163/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
164#define SXFER_TP_SHIFT 5
165#define SXFER_TP_4 0x00 /* Divisors */
166#define SXFER_TP_5 0x10<<1
167#define SXFER_TP_6 0x20<<1
168#define SXFER_TP_7 0x30<<1
169#define SXFER_TP_8 0x40<<1
170#define SXFER_TP_9 0x50<<1
171#define SXFER_TP_10 0x60<<1
172#define SXFER_TP_11 0x70<<1
173
174#define SXFER_MO3 0x08 /* Max offset msb */
175#define SXFER_MO2 0x04
176#define SXFER_MO1 0x02
177#define SXFER_MO0 0x01 /* lsb */
178#define SXFER_MO_MASK 0x0f
179#define SXFER_MO_SHIFT 0
180
181/*
182 * SCSI output data latch rw
183 * The contents of this register are driven onto the SCSI bus when
184 * the Assert Data Bus bit of the SCNTL1 register is set and
185 * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
186 */
187#define SODL_REG_700 0x06
188#define SODL_REG_800 0x54
189
190
191/*
192 * SCSI output control latch rw, default = 0
193 * Note that when the chip is being manually programmed as an initiator,
194 * the MSG, CD, and IO bits must be set correctly for the phase the target
195 * is driving the bus in. Otherwise no data transfer will occur due to
196 * phase mismatch.
197 */
198
199#define SOCL_REG 0x07
200#define SOCL_REQ 0x80 /* REQ */
201#define SOCL_ACK 0x40 /* ACK */
202#define SOCL_BSY 0x20 /* BSY */
203#define SOCL_SEL 0x10 /* SEL */
204#define SOCL_ATN 0x08 /* ATN */
205#define SOCL_MSG 0x04 /* MSG */
206#define SOCL_CD 0x02 /* C/D */
207#define SOCL_IO 0x01 /* I/O */
208
209/*
210 * SCSI first byte received latch ro
211 * This register contains the first byte received during a block MOVE
212 * SCSI SCRIPTS instruction, including
213 *
214 * Initiator mode Target mode
215 * Message in Command
216 * Status Message out
217 * Data in Data out
218 *
219 * It also contains the selecting or reselecting device's ID and our
220 * ID.
221 *
222 * Note that this is the register the various IF conditionals can
223 * operate on.
224 */
225#define SFBR_REG 0x08
226
227/*
228 * SCSI input data latch ro
229 * In initiator mode, data is latched into this register on the rising
230 * edge of REQ/. In target mode, data is latched on the rising edge of
231 * ACK/
232 */
233#define SIDL_REG_700 0x09
234#define SIDL_REG_800 0x50
235
236/*
237 * SCSI bus data lines ro
238 * This register reflects the instantaneous status of the SCSI data
239 * lines. Note that SCNTL0 must be set to disable parity checking,
240 * otherwise reading this register will latch new parity.
241 */
242#define SBDL_REG_700 0x0a
243#define SBDL_REG_800 0x58
244
245#define SSID_REG_800 0x0a
246#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
247#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
248
249
250/*
251 * SCSI bus control lines rw,
252 * instantaneous readout of control lines
253 */
254#define SBCL_REG 0x0b
255#define SBCL_REQ 0x80 /* REQ ro */
256#define SBCL_ACK 0x40 /* ACK ro */
257#define SBCL_BSY 0x20 /* BSY ro */
258#define SBCL_SEL 0x10 /* SEL ro */
259#define SBCL_ATN 0x08 /* ATN ro */
260#define SBCL_MSG 0x04 /* MSG ro */
261#define SBCL_CD 0x02 /* C/D ro */
262#define SBCL_IO 0x01 /* I/O ro */
263#define SBCL_PHASE_CMDOUT SBCL_CD
264#define SBCL_PHASE_DATAIN SBCL_IO
265#define SBCL_PHASE_DATAOUT 0
266#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
267#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
268#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
269#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
270/*
271 * Synchronous SCSI Clock Control bits
272 * 0 - set by DCNTL
273 * 1 - SCLK / 1.0
274 * 2 - SCLK / 1.5
275 * 3 - SCLK / 2.0
276 */
277#define SBCL_SSCF1 0x02 /* wo, -66 only */
278#define SBCL_SSCF0 0x01 /* wo, -66 only */
279#define SBCL_SSCF_MASK 0x03
280
281/*
282 * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
283 * insure that 10 clocks elapse between the two
284 */
285/* DMA status ro */
286#define DSTAT_REG 0x0c
287#define DSTAT_DFE 0x80 /* DMA FIFO empty */
288#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
289#define DSTAT_800_BF 0x20 /* Bus Fault */
290#define DSTAT_ABRT 0x10 /* Aborted - set on error */
291#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
292#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
293 set when INT instruction is
294 executed */
295#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
296#define DSTAT_OPC 0x01 /* Illegal instruction */
297#define DSTAT_800_IID 0x01 /* Same thing, different name */
298
299
300/* NCR53c800 moves this stuff into SIST0 */
301#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
302#define SIST0_REG_800 0x42
303#define SSTAT0_MA 0x80 /* ini : phase mismatch,
304 * tgt : ATN/ asserted
305 */
306#define SSTAT0_CMP 0x40 /* function complete */
307#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
308#define SIST0_800_SEL 0x20 /* Selected */
309#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
310#define SIST0_800_RSL 0x10 /* Reselected */
311#define SSTAT0_SGE 0x08 /* SCSI gross error */
312#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
313#define SSTAT0_RST 0x02 /* SCSI RST/ received */
314#define SSTAT0_PAR 0x01 /* Parity error */
315
316/* And uses SSTAT0 for what was SSTAT1 */
317
318#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
319#define SSTAT1_ILF 0x80 /* SIDL full */
320#define SSTAT1_ORF 0x40 /* SODR full */
321#define SSTAT1_OLF 0x20 /* SODL full */
322#define SSTAT1_AIP 0x10 /* Arbitration in progress */
323#define SSTAT1_LOA 0x08 /* Lost arbitration */
324#define SSTAT1_WOA 0x04 /* Won arbitration */
325#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
326#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
327
328#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
329#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
330#define SSTAT2_FF2 0x40 /* data FIFO */
331#define SSTAT2_FF1 0x20
332#define SSTAT2_FF0 0x10
333#define SSTAT2_FF_MASK 0xf0
334#define SSTAT2_FF_SHIFT 4
335
336/*
337 * Latched signals, latched on the leading edge of REQ/ for initiators,
338 * ACK/ for targets.
339 */
340#define SSTAT2_SDP 0x08 /* SDP */
341#define SSTAT2_MSG 0x04 /* MSG */
342#define SSTAT2_CD 0x02 /* C/D */
343#define SSTAT2_IO 0x01 /* I/O */
344#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
345#define SSTAT2_PHASE_DATAIN SSTAT2_IO
346#define SSTAT2_PHASE_DATAOUT 0
347#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
348#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
349#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
350#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
351
352
353/* NCR53c700-66 only */
354#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
355/* NCR53c710 and higher */
356#define DSA_REG 0x10 /* DATA structure address */
357
358#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
359#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
360/* 0x80 - 0x04 are reserved */
361#define CTEST0_700_RTRG 0x02 /* Real target mode */
362#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
363 * SCSI bus to host, 0 =
364 * host to SCSI.
365 */
366
367#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
368#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
369#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
370#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
371#define CTEST1_FMT1 0x20
372#define CTEST1_FMT0 0x10
373
374#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
375#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
376#define CTEST1_FFL1 0x02
377#define CTEST1_FFL0 0x01
378
379#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
380#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
381
382#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
383#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
384 Reading this register clears */
385#define CTEST2_800_CIO 0x20 /* Configured as IO */.
386#define CTEST2_800_CM 0x10 /* Configured as memory */
387
388/* 0x80 - 0x40 are reserved on 700 series chips */
389#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
390 * As an initiator, this bit is
391 * one when the synchronous offset
392 * is zero, as a target this bit
393 * is one when the synchronous
394 * offset is at the maximum
395 * defined in SXFER
396 */
397#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
398 * reading CTEST3 unloads a byte
399 * from the FIFO and sets this
400 */
401#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
402 * reading CTEST6 unloads a byte
403 * from the FIFO and sets this
404 */
405#define CTEST2_TEOP 0x04 /* SCSI true end of process,
406 * indicates a totally finished
407 * transfer
408 */
409#define CTEST2_DREQ 0x02 /* Data request signal */
410/* 0x01 is reserved on 700 series chips */
411#define CTEST2_800_DACK 0x01
412
413/*
414 * Chip test 3 ro
415 * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
416 * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
417 * error results if a read is attempted on this register. Also note
418 * that 16 and 32 bit reads of this register will cause corruption.
419 */
420#define CTEST3_REG_700 0x17
421/* Chip test 3 rw */
422#define CTEST3_REG_800 0x1b
423#define CTEST3_800_V3 0x80 /* Chip revision */
424#define CTEST3_800_V2 0x40
425#define CTEST3_800_V1 0x20
426#define CTEST3_800_V0 0x10
427#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
428#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
429#define CTEST3_800_FM 0x02 /* Fetch mode pin */
430/* bit 0 is reserved on 800 series chips */
431
432#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
433#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
434/* 0x80 is reserved on 700 series chips */
435#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
436#define CTEST4_ZMOD 0x40 /* High impedance mode */
437#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
438#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
439#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
440#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
441 * redirects writes from SODL
442 * to the SCSI FIFO.
443 */
444#define CTEST4_800_MPEE 0x08 /* Enable parity checking
445 during master cycles on PCI
446 bus */
447
448/*
449 * These bits send the contents of the CTEST6 register to the appropriate
450 * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
451 * the high bit means the low two bits select the byte lane.
452 */
453#define CTEST4_FBL2 0x04
454#define CTEST4_FBL1 0x02
455#define CTEST4_FBL0 0x01
456#define CTEST4_FBL_MASK 0x07
457#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
458#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
459#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
460#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
461#define CTEST4_800_SAVE (CTEST4_800_BDIS)
462
463
464#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
465#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
466/*
467 * Clock Address Incrementor. When set, it increments the
468 * DNAD register to the next bus size boundary. It automatically
469 * resets itself when the operation is complete.
470 */
471#define CTEST5_ADCK 0x80
472/*
473 * Clock Byte Counter. When set, it decrements the DBC register to
474 * the next bus size boundary.
475 */
476#define CTEST5_BBCK 0x40
477/*
478 * Reset SCSI Offset. Setting this bit to 1 clears the current offset
479 * pointer in the SCSI synchronous offset counter (SSTAT). This bit
480 * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
481 * be cleared when a synchronous transfer fails. When written, it is
482 * automatically cleared after the SCSI synchronous offset counter is
483 * reset.
484 */
485/* Bit 5 is reserved on 800 series chips */
486#define CTEST5_700_ROFF 0x20
487/*
488 * Master Control for Set or Reset pulses. When 1, causes the low
489 * four bits of register to set when set, 0 causes the low bits to
490 * clear when set.
491 */
492#define CTEST5_MASR 0x10
493#define CTEST5_DDIR 0x08 /* DMA direction */
494/*
495 * Bits 2-0 are reserved on 800 series chips
496 */
497#define CTEST5_700_EOP 0x04 /* End of process */
498#define CTEST5_700_DREQ 0x02 /* Data request */
499#define CTEST5_700_DACK 0x01 /* Data acknowledge */
500
501/*
502 * Chip test 6 rw - writing to this register writes to the byte
503 * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
504 * register.
505 */
506#define CTEST6_REG_700 0x1a
507#define CTEST6_REG_800 0x23
508
509#define CTEST7_REG 0x1b /* Chip test 7 rw */
510/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
511#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
512#define CTEST7_10_SC1 0x40 /* Snoop control bits */
513#define CTEST7_10_SC0 0x20
514#define CTEST7_10_SC_MASK 0x60
515/* 0x20 is reserved on the NCR53c700 */
516#define CTEST7_0060_FM 0x20 /* Fetch mode */
517#define CTEST7_STD 0x10 /* Selection timeout disable */
518#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
519#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
520#define CTEST7_10_TT1 0x02 /* Transfer type */
521#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
522 fetch */
523#define CTEST7_DIFF 0x01 /* Differential mode */
524
525#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
526
527
528#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
529
530#define DFIFO_REG 0x20 /* DMA FIFO rw */
531/*
532 * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
533 * moved into the CTEST8 register.
534 */
535#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
536#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
537#define DFIFO_BO6 0x40
538#define DFIFO_BO5 0x20
539#define DFIFO_BO4 0x10
540#define DFIFO_BO3 0x08
541#define DFIFO_BO2 0x04
542#define DFIFO_BO1 0x02
543#define DFIFO_BO0 0x01
544#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
545#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
546
547/*
548 * Interrupt status rw
549 * Note that this is the only register which can be read while SCSI
550 * SCRIPTS are being executed.
551 */
552#define ISTAT_REG_700 0x21
553#define ISTAT_REG_800 0x14
554#define ISTAT_ABRT 0x80 /* Software abort, write
555 *1 to abort, wait for interrupt. */
556/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
557#define ISTAT_10_SRST 0x40 /* software reset */
558#define ISTAT_10_SIGP 0x20 /* signal script */
559/* 0x10 is reserved on NCR53c700 series chips */
560#define ISTAT_800_SEM 0x10 /* semaphore */
561#define ISTAT_CON 0x08 /* 1 when connected */
562#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
563#define ISTAT_700_PRE 0x04 /* Pointer register empty.
564 * Set to 1 when DSPS and DSP
565 * registers are empty in pipeline
566 * mode, always set otherwise.
567 */
568#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
569 * SCSI portion of SIOP see
570 * SSTAT0
571 */
572#define ISTAT_DIP 0x01 /* DMA interrupt pending
573 * see DSTAT
574 */
575
576/* NCR53c700-66 and NCR53c710 only */
577#define CTEST8_REG 0x22 /* Chip test 8 rw */
578#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
579 * ie read from SCLK/ rather than CLK/
580 */
581#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
582#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
583 * pass through. This insures that
584 * bad parity won't reach the host
585 * bus.
586 */
587#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
588 * active negation, should only
589 * be used for slow SCSI
590 * non-differential.
591 */
592#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
593#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
594 * must be set for fast SCSI-II
595 * speeds.
596 */
597#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
598 * switching.
599 */
600#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
601 * The status of pending
602 * disconnect is maintained by
603 * the core, eliminating
604 * the possibility of missing a
605 * selection or reselection
606 * while waiting to fetch a
607 * WAIT DISCONNECT opcode.
608 */
609
610#define CTEST8_10_V3 0x80 /* Chip revision */
611#define CTEST8_10_V2 0x40
612#define CTEST8_10_V1 0x20
613#define CTEST8_10_V0 0x10
614#define CTEST8_10_V_MASK 0xf0
615#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
616#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
617#define CTEST8_10_FM 0x02 /* Fetch pin mode */
618#define CTEST8_10_SM 0x01 /* Snoop pin mode */
619
620
621/*
622 * The CTEST9 register may be used to differentiate between a
623 * NCR53c700 and a NCR53c710.
624 *
625 * Write 0xff to this register.
626 * Read it.
627 * If the contents are 0xff, it is a NCR53c700
628 * If the contents are 0x00, it is a NCR53c700-66 first revision
629 * If the contents are some other value, it is some other NCR53c700-66
630 */
631#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
632#define LCRC_REG_10 0x23
633
634/*
635 * 0x24 through 0x27 are the DMA byte counter register. Instructions
636 * write their high 8 bits into the DCMD register, the low 24 bits into
637 * the DBC register.
638 *
639 * Function is dependent on the command type being executed.
640 */
641
642
643#define DBC_REG 0x24
644/*
645 * For Block Move Instructions, DBC is a 24 bit quantity representing
646 * the number of bytes to transfer.
647 * For Transfer Control Instructions, DBC is bit fielded as follows :
648 */
649/* Bits 20 - 23 should be clear */
650#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
651#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
652#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
653#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
654/* Bits 8 - 15 are reserved on some implementations ? */
655#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
656#define DBC_TCI_MASK_SHIFT 8
657#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
658#define DBC_TCI_DATA_SHIFT 0
659
660#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
661#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
662#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
663#define DBC_RWRI_ADDRESS_SHIFT 16
664
665
666/*
667 * DMA command r/w
668 */
669#define DCMD_REG 0x27
670#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
671#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
672#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
673#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
674#define DCMD_BMI_MSG 0x04 /* instruction */
675
676#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
677#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
678#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
679
680#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
681
682#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
683 instruction */
684#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
685#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
686#define DCMD_TCI_MSG 0x04 /* instruction */
687#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
688#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
689#define DCMD_TCI_OP_CALL 0x08 /* CALL */
690#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
691#define DCMD_TCI_OP_INT 0x18 /* INT */
692
693#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
694 instruction */
695#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
696#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
697#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
698#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
699
700#define DCMD_RWRI_OP_MASK 0x07
701#define DCMD_RWRI_OP_MOVE 0x00
702#define DCMD_RWRI_OP_SHL 0x01
703#define DCMD_RWRI_OP_OR 0x02
704#define DCMD_RWRI_OP_XOR 0x03
705#define DCMD_RWRI_OP_AND 0x04
706#define DCMD_RWRI_OP_SHR 0x05
707#define DCMD_RWRI_OP_ADD 0x06
708#define DCMD_RWRI_OP_ADDC 0x07
709
710#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
711 (three words) */
712
713
714#define DNAD_REG 0x28 /* through 0x2b DMA next address for
715 data */
716#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
717#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
718 save rw */
719#define DMODE_REG_00 0x34 /* DMA mode rw */
720#define DMODE_00_BL1 0x80 /* Burst length bits */
721#define DMODE_00_BL0 0x40
722#define DMODE_BL_MASK 0xc0
723/* Burst lengths (800) */
724#define DMODE_BL_2 0x00 /* 2 transfer */
725#define DMODE_BL_4 0x40 /* 4 transfers */
726#define DMODE_BL_8 0x80 /* 8 transfers */
727#define DMODE_BL_16 0xc0 /* 16 transfers */
728
729#define DMODE_10_BL_1 0x00 /* 1 transfer */
730#define DMODE_10_BL_2 0x40 /* 2 transfers */
731#define DMODE_10_BL_4 0x80 /* 4 transfers */
732#define DMODE_10_BL_8 0xc0 /* 8 transfers */
733#define DMODE_10_FC2 0x20 /* Driven to FC2 pin */
734#define DMODE_10_FC1 0x10 /* Driven to FC1 pin */
735#define DMODE_710_PD 0x08 /* Program/data on FC0 pin */
736#define DMODE_710_UO 0x02 /* User prog. output */
737
738#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
739#define DMODE_700_286 0x10 /* 286 mode */
740#define DMODE_700_IOM 0x08 /* Transfer to IO port */
741#define DMODE_700_FAM 0x04 /* Fixed address mode */
742#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
743 * automatic fetch / exec
744 */
745#define DMODE_MAN 0x01 /* Manual start mode,
746 * requires a 1 to be written
747 * to the start DMA bit in the DCNTL
748 * register to run scripts
749 */
750
751#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
752
753/* NCR53c800 series only */
754#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
755/* NCR53c710 only */
756#define SCRATCHB_REG_10 0x34 /* through 0x37 scratch B rw */
757
758#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
759#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
760#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
761#define DMODE_800_ERL 0x08 /* Enable Read Line */
762
763/* 35-38 are reserved on 700 and 700-66 series chips */
764#define DIEN_REG 0x39 /* DMA interrupt enable rw */
765/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
766#define DIEN_800_MDPE 0x40 /* Master data parity error */
767#define DIEN_800_BF 0x20 /* BUS fault */
768#define DIEN_700_BF 0x20 /* BUS fault */
769#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
770#define DIEN_SSI 0x08 /* Enable single step interrupt */
771#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
772 * interrupt
773 */
774/* 0x02 is reserved on 800 series chips */
775#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
776#define DIEN_700_OPC 0x01 /* Enable illegal instruction
777 * interrupt
778 */
779#define DIEN_800_IID 0x01 /* Same meaning, different name */
780
781/*
782 * DMA watchdog timer rw
783 * set in 16 CLK input periods.
784 */
785#define DWT_REG 0x3a
786
787/* DMA control rw */
788#define DCNTL_REG 0x3b
789#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
790#define DCNTL_700_CF0 0x40
791#define DCNTL_700_CF_MASK 0xc0
792/* Clock divisors Divisor SCLK range (MHZ) */
793#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
794#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
795#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
796#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
797
798#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
799#define DCNTL_SSM 0x10 /* Single step mode */
800#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
801 * after selection */
802#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
803#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
804/* 0x02 is reserved */
805#define DCNTL_00_RST 0x01 /* Software reset, resets everything
806 * but 286 mode bit in DMODE. On the
807 * NCR53c710, this bit moved to CTEST8
808 */
809#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
810#define DCNTL_10_EA 0x20 /* Enable Ack - needed for MVME16x */
811
812#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
813
814
815/* NCR53c700-66 only */
816#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
817#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
818/* NCR53c710 only */
819#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
820
821#define SIEN1_REG_800 0x41
822#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
823#define SIEN1_800_GEN 0x02 /* general purpose timer */
824#define SIEN1_800_HTH 0x01 /* handshake to handshake */
825
826#define SIST1_REG_800 0x43
827#define SIST1_800_STO 0x04 /* selection/reselection timeout */
828#define SIST1_800_GEN 0x02 /* general purpose timer */
829#define SIST1_800_HTH 0x01 /* handshake to handshake */
830
831#define SLPAR_REG_800 0x44 /* Parity */
832
833#define MACNTL_REG_800 0x46 /* Memory access control */
834#define MACNTL_800_TYP3 0x80
835#define MACNTL_800_TYP2 0x40
836#define MACNTL_800_TYP1 0x20
837#define MACNTL_800_TYP0 0x10
838#define MACNTL_800_DWR 0x08
839#define MACNTL_800_DRD 0x04
840#define MACNTL_800_PSCPT 0x02
841#define MACNTL_800_SCPTS 0x01
842
843#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
844
845/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
846#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
847#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
848#define STIME0_800_HTH_SHIFT 4
849#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
850#define STIME0_800_SEL_SHIFT 0
851
852#define STIME1_REG_800 0x49
853#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
854
855#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
856 bits on narrow chips, 16 on WIDE */
857
858#define STEST0_REG_800 0x4c
859#define STEST0_800_SLT 0x08 /* Selection response logic test */
860#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
861#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
862#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
863
864#define STEST1_REG_800 0x4d
865#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
866
867#define STEST2_REG_800 0x4e
868#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
869#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
870#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
871#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
872#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
873#define STEST2_800_LOW 0x01 /* SCSI low level mode */
874
875#define STEST3_REG_800 0x4f
876#define STEST3_800_TE 0x80 /* Enable active negation */
877#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
878#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
879#define STEST3_800_DSI 0x10 /* Disable single initiator response */
880#define STEST3_800_TTM 0x04 /* Time test mode */
881#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
882#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
883
884#define OPTION_PARITY 0x1 /* Enable parity checking */
885#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
886#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
887#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
888#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
889#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
890 simple test code, return
891 DID_NO_CONNECT if any SCSI
892 commands are attempted. */
893#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
894 SCSI write is attempted */
895#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
896 each address and instruction
897 executed to debug buffer. */
898#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
899 instruction */
900#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
901#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
902 memory mapping */
903#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
904 I/O mapping */
905#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
906#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
907#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
908#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
909#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
910#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
911#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
912#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
913#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
914#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
915#define OPTION_DEBUG_DSA 0x800000
916#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
917#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
918#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
919#define OPTION_DISCONNECT 0x8000000 /* Allow disconnect */
920#define OPTION_DEBUG_DISCONNECT 0x10000000
921#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
922 on power up */
923#define OPTION_DEBUG_QUEUES 0x80000000
924#define OPTION_DEBUG_ALLOCATION 0x100000000LL
925#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
926 SCNTL3 registers */
927#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
928 SDTR for async transfers when
929 we haven't been told to do
930 a synchronous transfer. */
931#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
932 the reselect/WAIT DISCONNECT
933 race condition hits */
934#if !defined(PERM_OPTIONS)
935#define PERM_OPTIONS 0
936#endif
937
938/*
939 * Some data which is accessed by the NCR chip must be 4-byte aligned.
940 * For some hosts the default is less than that (eg. 68K uses 2-byte).
941 * Alignment has only been forced where it is important; also if one
942 * 32 bit structure field is aligned then it is assumed that following
943 * 32 bit fields are also aligned. Take care when adding fields
944 * which are other than 32 bit.
945 */
946
947struct NCR53c7x0_synchronous {
948 u32 select_indirect /* Value used for indirect selection */
949 __attribute__ ((aligned (4)));
950 u32 sscf_710; /* Used to set SSCF bits for 710 */
951 u32 script[8]; /* Size ?? Script used when target is
952 reselected */
953 unsigned char synchronous_want[5]; /* Per target desired SDTR */
954/*
955 * Set_synchronous programs these, select_indirect and current settings after
956 * int_debug_should show a match.
957 */
958 unsigned char sxfer_sanity, scntl3_sanity;
959};
960
961#define CMD_FLAG_SDTR 1 /* Initiating synchronous
962 transfer negotiation */
963#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
964 negotiation */
965#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
966#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
967
968struct NCR53c7x0_table_indirect {
969 u32 count;
970 void *address;
971};
972
973enum ncr_event {
974 EVENT_NONE = 0,
975/*
976 * Order is IMPORTANT, since these must correspond to the event interrupts
977 * in 53c7,8xx.scr
978 */
979
980 EVENT_ISSUE_QUEUE = 0x5000000, /* 0 Command was added to issue queue */
981 EVENT_START_QUEUE, /* 1 Command moved to start queue */
982 EVENT_SELECT, /* 2 Command completed selection */
983 EVENT_DISCONNECT, /* 3 Command disconnected */
984 EVENT_RESELECT, /* 4 Command reselected */
985 EVENT_COMPLETE, /* 5 Command completed */
986 EVENT_IDLE, /* 6 */
987 EVENT_SELECT_FAILED, /* 7 */
988 EVENT_BEFORE_SELECT, /* 8 */
989 EVENT_RESELECT_FAILED /* 9 */
990};
991
992struct NCR53c7x0_event {
993 enum ncr_event event; /* What type of event */
994 unsigned char target;
995 unsigned char lun;
996 struct timeval time;
997 u32 *dsa; /* What's in the DSA register now (virt) */
998/*
999 * A few things from that SCSI pid so we know what happened after
1000 * the Scsi_Cmnd structure in question may have disappeared.
1001 */
1002 unsigned long pid; /* The SCSI PID which caused this
1003 event */
1004 unsigned char cmnd[12];
1005};
1006
1007/*
1008 * Things in the NCR53c7x0_cmd structure are split into two parts :
1009 *
1010 * 1. A fixed portion, for things which are not accessed directly by static NCR
1011 * code (ie, are referenced only by the Linux side of the driver,
1012 * or only by dynamically generated code).
1013 *
1014 * 2. The DSA portion, for things which are accessed directly by static NCR
1015 * code.
1016 *
1017 * This is a little ugly, but it
1018 * 1. Avoids conflicts between the NCR code's picture of the structure, and
1019 * Linux code's idea of what it looks like.
1020 *
1021 * 2. Minimizes the pain in the Linux side of the code needed
1022 * to calculate real dsa locations for things, etc.
1023 *
1024 */
1025
1026struct NCR53c7x0_cmd {
1027 void *real; /* Real, unaligned address for
1028 free function */
1029 void (* free)(void *, int); /* Command to deallocate; NULL
1030 for structures allocated with
1031 scsi_register, etc. */
1032 Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
1033 structure, Scsi_Cmnd points
1034 at NCR53c7x0_cmd using
1035 host_scribble structure */
1036
1037 int size; /* scsi_malloc'd size of this
1038 structure */
1039
1040 int flags; /* CMD_* flags */
1041
1042 unsigned char cmnd[12]; /* CDB, copied from Scsi_Cmnd */
1043 int result; /* Copy to Scsi_Cmnd when done */
1044
1045 struct { /* Private non-cached bounce buffer */
1046 unsigned char buf[256];
1047 u32 addr;
1048 u32 len;
1049 } bounce;
1050
1051/*
1052 * SDTR and WIDE messages are an either/or affair
1053 * in this message, since we will go into message out and send
1054 * _the whole mess_ without dropping out of message out to
1055 * let the target go into message in after sending the first
1056 * message.
1057 */
1058
1059 unsigned char select[11]; /* Select message, includes
1060 IDENTIFY
1061 (optional) QUEUE TAG
1062 (optional) SDTR or WDTR
1063 */
1064
1065
1066 volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
1067 running, eventually finished */
1068
1069
1070 u32 *data_transfer_start; /* Start of data transfer routines */
1071 u32 *data_transfer_end; /* Address after end of data transfer o
1072 routines */
1073/*
1074 * The following three fields were moved from the DSA proper to here
1075 * since only dynamically generated NCR code refers to them, meaning
1076 * we don't need dsa_* absolutes, and it is simpler to let the
1077 * host code refer to them directly.
1078 */
1079
1080/*
1081 * HARD CODED : residual and saved_residual need to agree with the sizes
1082 * used in NCR53c7,8xx.scr.
1083 *
1084 * FIXME: we want to consider the case where we have odd-length
1085 * scatter/gather buffers and a WIDE transfer, in which case
1086 * we'll need to use the CHAIN MOVE instruction. Ick.
1087 */
1088 u32 residual[6] __attribute__ ((aligned (4)));
1089 /* Residual data transfer which
1090 allows pointer code to work
1091 right.
1092
1093 [0-1] : Conditional call to
1094 appropriate other transfer
1095 routine.
1096 [2-3] : Residual block transfer
1097 instruction.
1098 [4-5] : Jump to instruction
1099 after splice.
1100 */
1101 u32 saved_residual[6]; /* Copy of old residual, so we
1102 can get another partial
1103 transfer and still recover
1104 */
1105
1106 u32 saved_data_pointer; /* Saved data pointer */
1107
1108 u32 dsa_next_addr; /* _Address_ of dsa_next field
1109 in this dsa for RISCy
1110 style constant. */
1111
1112 u32 dsa_addr; /* Address of dsa; RISCy style
1113 constant */
1114
1115 u32 dsa[0]; /* Variable length (depending
1116 on host type, number of scatter /
1117 gather buffers, etc). */
1118};
1119
1120struct NCR53c7x0_break {
1121 u32 *address, old_instruction[2];
1122 struct NCR53c7x0_break *next;
1123 unsigned char old_size; /* Size of old instruction */
1124};
1125
1126/* Indicates that the NCR is not executing code */
1127#define STATE_HALTED 0
1128/*
1129 * Indicates that the NCR is executing the wait for select / reselect
1130 * script. Only used when running NCR53c700 compatible scripts, only
1131 * state during which an ABORT is _not_ considered an error condition.
1132 */
1133#define STATE_WAITING 1
1134/* Indicates that the NCR is executing other code. */
1135#define STATE_RUNNING 2
1136/*
1137 * Indicates that the NCR was being aborted.
1138 */
1139#define STATE_ABORTING 3
1140/* Indicates that the NCR was successfully aborted. */
1141#define STATE_ABORTED 4
1142/* Indicates that the NCR has been disabled due to a fatal error */
1143#define STATE_DISABLED 5
1144
1145/*
1146 * Where knowledge of SCSI SCRIPT(tm) specified values are needed
1147 * in an interrupt handler, an interrupt handler exists for each
1148 * different SCSI script so we don't have name space problems.
1149 *
1150 * Return values of these handlers are as follows :
1151 */
1152#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
1153#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
1154#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
1155#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
1156#define SPECIFIC_INT_DONE 4 /* normal command completion */
1157#define SPECIFIC_INT_BREAK 5 /* break point encountered */
1158
1159struct NCR53c7x0_hostdata {
1160 int size; /* Size of entire Scsi_Host
1161 structure */
1162 int board; /* set to board type, useful if
1163 we have host specific things,
1164 ie, a general purpose I/O
1165 bit is being used to enable
1166 termination, etc. */
1167
1168 int chip; /* set to chip type; 700-66 is
1169 700-66, rest are last three
1170 digits of part number */
1171
1172 char valid_ids[8]; /* Valid SCSI ID's for adapter */
1173
1174 u32 *dsp; /* dsp to restart with after
1175 all stacked interrupts are
1176 handled. */
1177
1178 unsigned dsp_changed:1; /* Has dsp changed within this
1179 set of stacked interrupts ? */
1180
1181 unsigned char dstat; /* Most recent value of dstat */
1182 unsigned dstat_valid:1;
1183
1184 unsigned expecting_iid:1; /* Expect IID interrupt */
1185 unsigned expecting_sto:1; /* Expect STO interrupt */
1186
1187 /*
1188 * The code stays cleaner if we use variables with function
1189 * pointers and offsets that are unique for the different
1190 * scripts rather than having a slew of switch(hostdata->chip)
1191 * statements.
1192 *
1193 * It also means that the #defines from the SCSI SCRIPTS(tm)
1194 * don't have to be visible outside of the script-specific
1195 * instructions, preventing name space pollution.
1196 */
1197
1198 void (* init_fixup)(struct Scsi_Host *host);
1199 void (* init_save_regs)(struct Scsi_Host *host);
1200 void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
1201 void (* soft_reset)(struct Scsi_Host *host);
1202 int (* run_tests)(struct Scsi_Host *host);
1203
1204 /*
1205 * Called when DSTAT_SIR is set, indicating an interrupt generated
1206 * by the INT instruction, where values are unique for each SCSI
1207 * script. Should return one of the SPEC_* values.
1208 */
1209
1210 int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
1211
1212 int dsa_len; /* Size of DSA structure */
1213
1214 /*
1215 * Location of DSA fields for the SCSI SCRIPT corresponding to this
1216 * chip.
1217 */
1218
1219 s32 dsa_start;
1220 s32 dsa_end;
1221 s32 dsa_next;
1222 s32 dsa_prev;
1223 s32 dsa_cmnd;
1224 s32 dsa_select;
1225 s32 dsa_msgout;
1226 s32 dsa_cmdout;
1227 s32 dsa_dataout;
1228 s32 dsa_datain;
1229 s32 dsa_msgin;
1230 s32 dsa_msgout_other;
1231 s32 dsa_write_sync;
1232 s32 dsa_write_resume;
1233 s32 dsa_check_reselect;
1234 s32 dsa_status;
1235 s32 dsa_saved_pointer;
1236 s32 dsa_jump_dest;
1237
1238 /*
1239 * Important entry points that generic fixup code needs
1240 * to know about, fixed up.
1241 */
1242
1243 s32 E_accept_message;
1244 s32 E_command_complete;
1245 s32 E_data_transfer;
1246 s32 E_dsa_code_template;
1247 s32 E_dsa_code_template_end;
1248 s32 E_end_data_transfer;
1249 s32 E_msg_in;
1250 s32 E_initiator_abort;
1251 s32 E_other_transfer;
1252 s32 E_other_in;
1253 s32 E_other_out;
1254 s32 E_target_abort;
1255 s32 E_debug_break;
1256 s32 E_reject_message;
1257 s32 E_respond_message;
1258 s32 E_select;
1259 s32 E_select_msgout;
1260 s32 E_test_0;
1261 s32 E_test_1;
1262 s32 E_test_2;
1263 s32 E_test_3;
1264 s32 E_dsa_zero;
1265 s32 E_cmdout_cmdout;
1266 s32 E_wait_reselect;
1267 s32 E_dsa_code_begin;
1268
1269 long long options; /* Bitfielded set of options enabled */
1270 volatile u32 test_completed; /* Test completed */
1271 int test_running; /* Test currently running */
1272 s32 test_source
1273 __attribute__ ((aligned (4)));
1274 volatile s32 test_dest;
1275
1276 volatile int state; /* state of driver, only used for
1277 OPTION_700 */
1278
1279 unsigned char dmode; /*
1280 * set to the address of the DMODE
1281 * register for this chip.
1282 */
1283 unsigned char istat; /*
1284 * set to the address of the ISTAT
1285 * register for this chip.
1286 */
1287
1288 int scsi_clock; /*
1289 * SCSI clock in HZ. 0 may be used
1290 * for unknown, although this will
1291 * disable synchronous negotiation.
1292 */
1293
1294 volatile int intrs; /* Number of interrupts */
1295 volatile int resets; /* Number of SCSI resets */
1296 unsigned char saved_dmode;
1297 unsigned char saved_ctest4;
1298 unsigned char saved_ctest7;
1299 unsigned char saved_dcntl;
1300 unsigned char saved_scntl3;
1301
1302 unsigned char this_id_mask;
1303
1304 /* Debugger information */
1305 struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
1306 *breakpoint_current; /* Current breakpoint being stepped
1307 through, NULL if we are running
1308 normally. */
1309#ifdef NCR_DEBUG
1310 int debug_size; /* Size of debug buffer */
1311 volatile int debug_count; /* Current data count */
1312 volatile char *debug_buf; /* Output ring buffer */
1313 volatile char *debug_write; /* Current write pointer */
1314 volatile char *debug_read; /* Current read pointer */
1315#endif /* def NCR_DEBUG */
1316
1317 /* XXX - primitive debugging junk, remove when working ? */
1318 int debug_print_limit; /* Number of commands to print
1319 out exhaustive debugging
1320 information for if
1321 OPTION_DEBUG_DUMP is set */
1322
1323 unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
1324 set, puke if commands are sent
1325 to other target/lun combinations */
1326
1327 int debug_count_limit; /* Number of commands to execute
1328 before puking to limit debugging
1329 output */
1330
1331
1332 volatile unsigned idle:1; /* set to 1 if idle */
1333
1334 /*
1335 * Table of synchronous+wide transfer parameters set on a per-target
1336 * basis.
1337 */
1338
1339 volatile struct NCR53c7x0_synchronous sync[16]
1340 __attribute__ ((aligned (4)));
1341
1342 volatile Scsi_Cmnd *issue_queue
1343 __attribute__ ((aligned (4)));
1344 /* waiting to be issued by
1345 Linux driver */
1346 volatile struct NCR53c7x0_cmd *running_list;
1347 /* commands running, maintained
1348 by Linux driver */
1349
1350 volatile struct NCR53c7x0_cmd *ncrcurrent; /* currently connected
1351 nexus, ONLY valid for
1352 NCR53c700/NCR53c700-66
1353 */
1354
1355 volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
1356 allocated at probe time,
1357 which we can use for
1358 initialization */
1359 volatile struct NCR53c7x0_cmd *free;
1360 int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
1361 based on number of
1362 scatter/gather segments, etc.
1363 */
1364 volatile int num_cmds; /* Number of commands
1365 allocated */
1366 volatile int extra_allocate;
1367 volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
1368 for this target yet? If not,
1369 do so ASAP */
1370 volatile unsigned char busy[16][8]; /* number of commands
1371 executing on each target
1372 */
1373 /*
1374 * Eventually, I'll switch to a coroutine for calling
1375 * cmd->done(cmd), etc. so that we can overlap interrupt
1376 * processing with this code for maximum performance.
1377 */
1378
1379 volatile struct NCR53c7x0_cmd *finished_queue;
1380
1381 /* Shared variables between SCRIPT and host driver */
1382 volatile u32 *schedule
1383 __attribute__ ((aligned (4))); /* Array of JUMPs to dsa_begin
1384 routines of various DSAs.
1385 When not in use, replace
1386 with jump to next slot */
1387
1388
1389 volatile unsigned char msg_buf[16]; /* buffer for messages
1390 other than the command
1391 complete message */
1392
1393 /* Per-target default synchronous and WIDE messages */
1394 volatile unsigned char synchronous_want[16][5];
1395 volatile unsigned char wide_want[16][4];
1396
1397 /* Bit fielded set of targets we want to speak synchronously with */
1398 volatile u16 initiate_sdtr;
1399 /* Bit fielded set of targets we want to speak wide with */
1400 volatile u16 initiate_wdtr;
1401 /* Bit fielded list of targets we've talked to. */
1402 volatile u16 talked_to;
1403
1404 /* Array of bit-fielded lun lists that we need to request_sense */
1405 volatile unsigned char request_sense[16];
1406
1407 u32 addr_reconnect_dsa_head
1408 __attribute__ ((aligned (4))); /* RISCy style constant,
1409 address of following */
1410 volatile u32 reconnect_dsa_head;
1411 /* Data identifying nexus we are trying to match during reselection */
1412 volatile unsigned char reselected_identify; /* IDENTIFY message */
1413 volatile unsigned char reselected_tag; /* second byte of queue tag
1414 message or 0 */
1415
1416 /* These were static variables before we moved them */
1417
1418 s32 NCR53c7xx_zero
1419 __attribute__ ((aligned (4)));
1420 s32 NCR53c7xx_sink;
1421 u32 NOP_insn;
1422 char NCR53c7xx_msg_reject;
1423 char NCR53c7xx_msg_abort;
1424 char NCR53c7xx_msg_nop;
1425
1426 /*
1427 * Following item introduced by RGH to support NCRc710, which is
1428 * VERY brain-dead when it come to memory moves
1429 */
1430
1431 /* DSA save area used only by the NCR chip */
1432 volatile unsigned long saved2_dsa
1433 __attribute__ ((aligned (4)));
1434
1435 volatile unsigned long emulated_intfly
1436 __attribute__ ((aligned (4)));
1437
1438 volatile int event_size, event_index;
1439 volatile struct NCR53c7x0_event *events;
1440
1441 /* If we need to generate code to kill off the currently connected
1442 command, this is where we do it. Should have a BMI instruction
1443 to source or sink the current data, followed by a JUMP
1444 to abort_connected */
1445
1446 u32 *abort_script;
1447
1448 int script_count; /* Size of script in words */
1449 u32 script[0]; /* Relocated SCSI script */
1450
1451};
1452
1453#define SCSI_IRQ_NONE 255
1454#define DMA_NONE 255
1455#define IRQ_AUTO 254
1456#define DMA_AUTO 254
1457
1458#define BOARD_GENERIC 0
1459
1460#define NCR53c7x0_insn_size(insn) \
1461 (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
1462
1463
1464#define NCR53c7x0_local_declare() \
1465 volatile unsigned char *NCR53c7x0_address_memory; \
1466 unsigned int NCR53c7x0_address_io; \
1467 int NCR53c7x0_memory_mapped
1468
1469#define NCR53c7x0_local_setup(host) \
1470 NCR53c7x0_address_memory = (void *) (host)->base; \
1471 NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
1472 NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
1473 host->hostdata[0])-> options & OPTION_MEMORY_MAPPED
1474
1475#ifdef BIG_ENDIAN
1476/* These could be more efficient, given that we are always memory mapped,
1477 * but they don't give the same problems as the write macros, so leave
1478 * them. */
1479#ifdef __mc68000__
1480#define NCR53c7x0_read8(address) \
1481 ((unsigned int)raw_inb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) )
1482
1483#define NCR53c7x0_read16(address) \
1484 ((unsigned int)raw_inw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)))
1485#else
1486#define NCR53c7x0_read8(address) \
1487 (NCR53c7x0_memory_mapped ? \
1488 (unsigned int)readb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) : \
1489 inb(NCR53c7x0_address_io + (address)))
1490
1491#define NCR53c7x0_read16(address) \
1492 (NCR53c7x0_memory_mapped ? \
1493 (unsigned int)readw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) : \
1494 inw(NCR53c7x0_address_io + (address)))
1495#endif /* mc68000 */
1496#else
1497#define NCR53c7x0_read8(address) \
1498 (NCR53c7x0_memory_mapped ? \
1499 (unsigned int)readb((u32)NCR53c7x0_address_memory + (u32)(address)) : \
1500 inb(NCR53c7x0_address_io + (address)))
1501
1502#define NCR53c7x0_read16(address) \
1503 (NCR53c7x0_memory_mapped ? \
1504 (unsigned int)readw((u32)NCR53c7x0_address_memory + (u32)(address)) : \
1505 inw(NCR53c7x0_address_io + (address)))
1506#endif
1507
1508#ifdef __mc68000__
1509#define NCR53c7x0_read32(address) \
1510 ((unsigned int) raw_inl((u32)NCR53c7x0_address_memory + (u32)(address)))
1511#else
1512#define NCR53c7x0_read32(address) \
1513 (NCR53c7x0_memory_mapped ? \
1514 (unsigned int) readl((u32)NCR53c7x0_address_memory + (u32)(address)) : \
1515 inl(NCR53c7x0_address_io + (address)))
1516#endif /* mc68000*/
1517
1518#ifdef BIG_ENDIAN
1519/* If we are big-endian, then we are not Intel, so probably don't have
1520 * an i/o map as well as a memory map. So, let's assume memory mapped.
1521 * Also, I am having terrible problems trying to persuade the compiler
1522 * not to lay down code which does a read after write for these macros.
1523 * If you remove 'volatile' from writeb() and friends it is ok....
1524 */
1525
1526#define NCR53c7x0_write8(address,value) \
1527 *(volatile unsigned char *) \
1528 ((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) = (value)
1529
1530#define NCR53c7x0_write16(address,value) \
1531 *(volatile unsigned short *) \
1532 ((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) = (value)
1533
1534#define NCR53c7x0_write32(address,value) \
1535 *(volatile unsigned long *) \
1536 ((u32)NCR53c7x0_address_memory + ((u32)(address))) = (value)
1537
1538#else
1539
1540#define NCR53c7x0_write8(address,value) \
1541 (NCR53c7x0_memory_mapped ? \
1542 ({writeb((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
1543 outb((value), NCR53c7x0_address_io + (address)))
1544
1545#define NCR53c7x0_write16(address,value) \
1546 (NCR53c7x0_memory_mapped ? \
1547 ({writew((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
1548 outw((value), NCR53c7x0_address_io + (address)))
1549
1550#define NCR53c7x0_write32(address,value) \
1551 (NCR53c7x0_memory_mapped ? \
1552 ({writel((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
1553 outl((value), NCR53c7x0_address_io + (address)))
1554
1555#endif
1556
1557/* Patch arbitrary 32 bit words in the script */
1558#define patch_abs_32(script, offset, symbol, value) \
1559 for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
1560 (u32)); ++i) { \
1561 (script)[A_##symbol##_used[i] - (offset)] += (value); \
1562 if (hostdata->options & OPTION_DEBUG_FIXUP) \
1563 printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
1564 host->host_no, #symbol, i, A_##symbol##_used[i] - \
1565 (int)(offset), #script, (script)[A_##symbol##_used[i] - \
1566 (offset)]); \
1567 }
1568
1569/* Patch read/write instruction immediate field */
1570#define patch_abs_rwri_data(script, offset, symbol, value) \
1571 for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
1572 (u32)); ++i) \
1573 (script)[A_##symbol##_used[i] - (offset)] = \
1574 ((script)[A_##symbol##_used[i] - (offset)] & \
1575 ~DBC_RWRI_IMMEDIATE_MASK) | \
1576 (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
1577 DBC_RWRI_IMMEDIATE_MASK)
1578
1579/* Patch transfer control instruction data field */
1580#define patch_abs_tci_data(script, offset, symbol, value) \
1581 for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
1582 (u32)); ++i) \
1583 (script)[A_##symbol##_used[i] - (offset)] = \
1584 ((script)[A_##symbol##_used[i] - (offset)] & \
1585 ~DBC_TCI_DATA_MASK) | \
1586 (((value) << DBC_TCI_DATA_SHIFT) & \
1587 DBC_TCI_DATA_MASK)
1588
1589/* Patch field in dsa structure (assignment should be +=?) */
1590#define patch_dsa_32(dsa, symbol, word, value) \
1591 { \
1592 (dsa)[(hostdata->##symbol - hostdata->dsa_start) / sizeof(u32) \
1593 + (word)] = (value); \
1594 if (hostdata->options & OPTION_DEBUG_DSA) \
1595 printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
1596 #dsa, #symbol, hostdata->##symbol, \
1597 (word), (u32) (value)); \
1598 }
1599
1600/* Paranoid people could use panic() here. */
1601#define FATAL(host) shutdown((host));
1602
1603extern int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
1604 unsigned long base, int io_port, int irq, int dma,
1605 long long options, int clock);
1606
1607#endif /* NCR53c710_C */
1608#endif /* NCR53c710_H */
diff --git a/drivers/scsi/53c7xx.scr b/drivers/scsi/53c7xx.scr
deleted file mode 100644
index 9c5694a2da8a..000000000000
--- a/drivers/scsi/53c7xx.scr
+++ /dev/null
@@ -1,1591 +0,0 @@
1#undef DEBUG
2#undef EVENTS
3#undef NO_SELECTION_TIMEOUT
4#define BIG_ENDIAN
5
6; 53c710 driver. Modified from Drew Eckhardts driver
7; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
8;
9; I have left the script for the 53c8xx family in here, as it is likely
10; to be useful to see what I changed when bug hunting.
11
12; NCR 53c810 driver, main script
13; Sponsored by
14; iX Multiuser Multitasking Magazine
15; hm@ix.de
16;
17; Copyright 1993, 1994, 1995 Drew Eckhardt
18; Visionary Computing
19; (Unix and Linux consulting and custom programming)
20; drew@PoohSticks.ORG
21; +1 (303) 786-7975
22;
23; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
24;
25; PRE-ALPHA
26;
27; For more information, please consult
28;
29; NCR 53C810
30; PCI-SCSI I/O Processor
31; Data Manual
32;
33; NCR 53C710
34; SCSI I/O Processor
35; Programmers Guide
36;
37; NCR Microelectronics
38; 1635 Aeroplaza Drive
39; Colorado Springs, CO 80916
40; 1+ (719) 578-3400
41;
42; Toll free literature number
43; +1 (800) 334-5454
44;
45; IMPORTANT : This code is self modifying due to the limitations of
46; the NCR53c7,8xx series chips. Persons debugging this code with
47; the remote debugger should take this into account, and NOT set
48; breakpoints in modified instructions.
49;
50; Design:
51; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
52; microcontroller using a simple instruction set.
53;
54; So, to minimize the effects of interrupt latency, and to maximize
55; throughput, this driver offloads the practical maximum amount
56; of processing to the SCSI chip while still maintaining a common
57; structure.
58;
59; Where tradeoffs were needed between efficiency on the older
60; chips and the newer NCR53c800 series, the NCR53c800 series
61; was chosen.
62;
63; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
64; automate SCSI transfers without host processor intervention, this
65; isn't the case with the NCR53c710 and newer chips which allow
66;
67; - reads and writes to the internal registers from within the SCSI
68; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
69; state so that multiple threads of execution are possible, and also
70; provide an ALU for loop control, etc.
71;
72; - table indirect addressing for some instructions. This allows
73; pointers to be located relative to the DSA ((Data Structure
74; Address) register.
75;
76; These features make it possible to implement a mailbox style interface,
77; where the same piece of code is run to handle I/O for multiple threads
78; at once minimizing our need to relocate code. Since the NCR53c700/
79; NCR53c800 series have a unique combination of features, making a
80; a standard ingoing/outgoing mailbox system, costly, I've modified it.
81;
82; - Mailboxes are a mixture of code and data. This lets us greatly
83; simplify the NCR53c810 code and do things that would otherwise
84; not be possible.
85;
86; The saved data pointer is now implemented as follows :
87;
88; Control flow has been architected such that if control reaches
89; munge_save_data_pointer, on a restore pointers message or
90; reconnection, a jump to the address formerly in the TEMP register
91; will allow the SCSI command to resume execution.
92;
93
94;
95; Note : the DSA structures must be aligned on 32 bit boundaries,
96; since the source and destination of MOVE MEMORY instructions
97; must share the same alignment and this is the alignment of the
98; NCR registers.
99;
100
101; For some systems (MVME166, for example) dmode is always the same, so don't
102; waste time writing it
103
104#if 1
105#define DMODE_MEMORY_TO_NCR
106#define DMODE_MEMORY_TO_MEMORY
107#define DMODE_NCR_TO_MEMORY
108#else
109#define DMODE_MEMORY_TO_NCR MOVE dmode_memory_to_ncr TO DMODE
110#define DMODE_MEMORY_TO_MEMORY MOVE dmode_memory_to_memory TO DMODE
111#define DMODE_NCR_TO_MEMORY MOVE dmode_ncr_to_memory TO DMODE
112#endif
113
114ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
115ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
116ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
117 ; for current dsa
118ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
119 ; sync routine
120ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
121 ; sscf value (53c710)
122ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
123ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
124 ; saved data pointer
125ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
126 ; current residual code
127ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
128 ; saved residual code
129ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
130ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
131ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
132
133;
134; Once a device has initiated reselection, we need to compare it
135; against the singly linked list of commands which have disconnected
136; and are pending reselection. These commands are maintained in
137; an unordered singly linked list of DSA structures, through the
138; DSA pointers at their 'centers' headed by the reconnect_dsa_head
139; pointer.
140;
141; To avoid complications in removing commands from the list,
142; I minimize the amount of expensive (at eight operations per
143; addition @ 500-600ns each) pointer operations which must
144; be done in the NCR driver by precomputing them on the
145; host processor during dsa structure generation.
146;
147; The fixed-up per DSA code knows how to recognize the nexus
148; associated with the corresponding SCSI command, and modifies
149; the source and destination pointers for the MOVE MEMORY
150; instruction which is executed when reselected_ok is called
151; to remove the command from the list. Similarly, DSA is
152; loaded with the address of the next DSA structure and
153; reselected_check_next is called if a failure occurs.
154;
155; Perhaps more concisely, the net effect of the mess is
156;
157; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
158; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
159; src = &dsa->next;
160; if (target_id == dsa->id && target_lun == dsa->lun) {
161; *dest = *src;
162; break;
163; }
164; }
165;
166; if (!dsa)
167; error (int_err_unexpected_reselect);
168; else
169; longjmp (dsa->jump_resume, 0);
170;
171;
172
173#if (CHIP != 700) && (CHIP != 70066)
174; Define DSA structure used for mailboxes
175ENTRY dsa_code_template
176dsa_code_template:
177ENTRY dsa_code_begin
178dsa_code_begin:
179; RGH: Don't care about TEMP and DSA here
180 DMODE_MEMORY_TO_NCR
181 MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
182 DMODE_MEMORY_TO_MEMORY
183#if (CHIP == 710)
184 MOVE MEMORY 4, addr_scratch, saved_dsa
185 ; We are about to go and select the device, so must set SSCF bits
186 MOVE MEMORY 4, dsa_sscf_710, addr_scratch
187#ifdef BIG_ENDIAN
188 MOVE SCRATCH3 TO SFBR
189#else
190 MOVE SCRATCH0 TO SFBR
191#endif
192 MOVE SFBR TO SBCL
193 MOVE MEMORY 4, saved_dsa, addr_dsa
194#else
195 CALL scratch_to_dsa
196#endif
197 CALL select
198; Handle the phase mismatch which may have resulted from the
199; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
200; may or may not be necessary, and we should update script_asm.pl
201; to handle multiple pieces.
202 CLEAR ATN
203 CLEAR ACK
204
205; Replace second operand with address of JUMP instruction dest operand
206; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
207ENTRY dsa_code_fix_jump
208dsa_code_fix_jump:
209 MOVE MEMORY 4, NOP_insn, 0
210 JUMP select_done
211
212; wrong_dsa loads the DSA register with the value of the dsa_next
213; field.
214;
215wrong_dsa:
216#if (CHIP == 710)
217; NOTE DSA is corrupt when we arrive here!
218#endif
219; Patch the MOVE MEMORY INSTRUCTION such that
220; the destination address is the address of the OLD
221; next pointer.
222;
223 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
224 DMODE_MEMORY_TO_NCR
225;
226; Move the _contents_ of the next pointer into the DSA register as
227; the next I_T_L or I_T_L_Q tupple to check against the established
228; nexus.
229;
230 MOVE MEMORY 4, dsa_temp_next, addr_scratch
231 DMODE_MEMORY_TO_MEMORY
232#if (CHIP == 710)
233 MOVE MEMORY 4, addr_scratch, saved_dsa
234 MOVE MEMORY 4, saved_dsa, addr_dsa
235#else
236 CALL scratch_to_dsa
237#endif
238 JUMP reselected_check_next
239
240ABSOLUTE dsa_save_data_pointer = 0
241ENTRY dsa_code_save_data_pointer
242dsa_code_save_data_pointer:
243#if (CHIP == 710)
244 ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
245 ; We MUST return with DSA correct
246 MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
247; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
248 MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
249 CLEAR ACK
250#ifdef DEBUG
251 INT int_debug_saved
252#endif
253 MOVE MEMORY 4, saved_dsa, addr_dsa
254 JUMP jump_temp
255#else
256 DMODE_NCR_TO_MEMORY
257 MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
258 DMODE_MEMORY_TO_MEMORY
259; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
260 MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
261 CLEAR ACK
262#ifdef DEBUG
263 INT int_debug_saved
264#endif
265 RETURN
266#endif
267ABSOLUTE dsa_restore_pointers = 0
268ENTRY dsa_code_restore_pointers
269dsa_code_restore_pointers:
270#if (CHIP == 710)
271 ; TEMP and DSA are corrupt when we get here, but who cares!
272 MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
273; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
274 MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
275 CLEAR ACK
276 ; Restore DSA, note we don't care about TEMP
277 MOVE MEMORY 4, saved_dsa, addr_dsa
278#ifdef DEBUG
279 INT int_debug_restored
280#endif
281 JUMP jump_temp
282#else
283 DMODE_MEMORY_TO_NCR
284 MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
285 DMODE_MEMORY_TO_MEMORY
286; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
287 MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
288 CLEAR ACK
289#ifdef DEBUG
290 INT int_debug_restored
291#endif
292 RETURN
293#endif
294
295ABSOLUTE dsa_check_reselect = 0
296; dsa_check_reselect determines whether or not the current target and
297; lun match the current DSA
298ENTRY dsa_code_check_reselect
299dsa_code_check_reselect:
300#if (CHIP == 710)
301 /* Arrives here with DSA correct */
302 /* Assumes we are always ID 7 */
303 MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
304 JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
305#else
306 MOVE SSID TO SFBR ; SSID contains 3 bit target ID
307; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
308 JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
309#endif
310;
311; Hack - move to scratch first, since SFBR is not writeable
312; via the CPU and hence a MOVE MEMORY instruction.
313;
314 DMODE_MEMORY_TO_NCR
315 MOVE MEMORY 1, reselected_identify, addr_scratch
316 DMODE_MEMORY_TO_MEMORY
317#ifdef BIG_ENDIAN
318 ; BIG ENDIAN ON MVME16x
319 MOVE SCRATCH3 TO SFBR
320#else
321 MOVE SCRATCH0 TO SFBR
322#endif
323; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
324; Are you sure about that? richard@sleepie.demon.co.uk
325 JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
326; Patch the MOVE MEMORY INSTRUCTION such that
327; the source address is the address of this dsa's
328; next pointer.
329 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
330 CALL reselected_ok
331#if (CHIP == 710)
332; Restore DSA following memory moves in reselected_ok
333; dsa_temp_sync doesn't really care about DSA, but it has an
334; optional debug INT so a valid DSA is a good idea.
335 MOVE MEMORY 4, saved_dsa, addr_dsa
336#endif
337 CALL dsa_temp_sync
338; Release ACK on the IDENTIFY message _after_ we've set the synchronous
339; transfer parameters!
340 CLEAR ACK
341; Implicitly restore pointers on reselection, so a RETURN
342; will transfer control back to the right spot.
343 CALL REL (dsa_code_restore_pointers)
344 RETURN
345ENTRY dsa_zero
346dsa_zero:
347ENTRY dsa_code_template_end
348dsa_code_template_end:
349
350; Perform sanity check for dsa_fields_start == dsa_code_template_end -
351; dsa_zero, puke.
352
353ABSOLUTE dsa_fields_start = 0 ; Sanity marker
354 ; pad 48 bytes (fix this RSN)
355ABSOLUTE dsa_next = 48 ; len 4 Next DSA
356 ; del 4 Previous DSA address
357ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
358ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
359 ; table indirect select
360ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
361 ; select message
362ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
363 ; command
364ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
365ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
366ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
367ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
368ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
369 ; (Synchronous transfer negotiation, etc).
370ABSOLUTE dsa_end = 112
371
372ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
373 ; terminated by a call to JUMP wait_reselect
374
375; Linked lists of DSA structures
376ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
377ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
378 ; address of reconnect_dsa_head
379
380; These select the source and destination of a MOVE MEMORY instruction
381ABSOLUTE dmode_memory_to_memory = 0x0
382ABSOLUTE dmode_memory_to_ncr = 0x0
383ABSOLUTE dmode_ncr_to_memory = 0x0
384
385ABSOLUTE addr_scratch = 0x0
386ABSOLUTE addr_temp = 0x0
387#if (CHIP == 710)
388ABSOLUTE saved_dsa = 0x0
389ABSOLUTE emulfly = 0x0
390ABSOLUTE addr_dsa = 0x0
391#endif
392#endif /* CHIP != 700 && CHIP != 70066 */
393
394; Interrupts -
395; MSB indicates type
396; 0 handle error condition
397; 1 handle message
398; 2 handle normal condition
399; 3 debugging interrupt
400; 4 testing interrupt
401; Next byte indicates specific error
402
403; XXX not yet implemented, I'm not sure if I want to -
404; Next byte indicates the routine the error occurred in
405; The LSB indicates the specific place the error occurred
406
407ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
408ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
409ABSOLUTE int_err_unexpected_reselect = 0x00020000
410ABSOLUTE int_err_check_condition = 0x00030000
411ABSOLUTE int_err_no_phase = 0x00040000
412ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
413ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
414ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
415 ; received
416
417ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
418 ; registers.
419ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
420ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
421ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
422ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
423ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
424ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
425ABSOLUTE int_debug_break = 0x03000000 ; Break point
426#ifdef DEBUG
427ABSOLUTE int_debug_scheduled = 0x03010000 ; new I/O scheduled
428ABSOLUTE int_debug_idle = 0x03020000 ; scheduler is idle
429ABSOLUTE int_debug_dsa_loaded = 0x03030000 ; dsa reloaded
430ABSOLUTE int_debug_reselected = 0x03040000 ; NCR reselected
431ABSOLUTE int_debug_head = 0x03050000 ; issue head overwritten
432ABSOLUTE int_debug_disconnected = 0x03060000 ; disconnected
433ABSOLUTE int_debug_disconnect_msg = 0x03070000 ; got message to disconnect
434ABSOLUTE int_debug_dsa_schedule = 0x03080000 ; in dsa_schedule
435ABSOLUTE int_debug_reselect_check = 0x03090000 ; Check for reselection of DSA
436ABSOLUTE int_debug_reselected_ok = 0x030a0000 ; Reselection accepted
437#endif
438ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
439#ifdef DEBUG
440ABSOLUTE int_debug_saved = 0x030c0000 ; save/restore pointers
441ABSOLUTE int_debug_restored = 0x030d0000
442ABSOLUTE int_debug_sync = 0x030e0000 ; Sanity check synchronous
443 ; parameters.
444ABSOLUTE int_debug_datain = 0x030f0000 ; going into data in phase
445 ; now.
446ABSOLUTE int_debug_check_dsa = 0x03100000 ; Sanity check DSA against
447 ; SDID.
448#endif
449
450ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
451ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
452ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
453
454
455; These should start with 0x05000000, with low bits incrementing for
456; each one.
457
458#ifdef EVENTS
459ABSOLUTE int_EVENT_SELECT = 0
460ABSOLUTE int_EVENT_DISCONNECT = 0
461ABSOLUTE int_EVENT_RESELECT = 0
462ABSOLUTE int_EVENT_COMPLETE = 0
463ABSOLUTE int_EVENT_IDLE = 0
464ABSOLUTE int_EVENT_SELECT_FAILED = 0
465ABSOLUTE int_EVENT_BEFORE_SELECT = 0
466ABSOLUTE int_EVENT_RESELECT_FAILED = 0
467#endif
468
469ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
470ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
471ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
472ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
473ABSOLUTE NOP_insn = 0 ; NOP instruction
474
475; Pointer to message, potentially multi-byte
476ABSOLUTE msg_buf = 0
477
478; Pointer to holding area for reselection information
479ABSOLUTE reselected_identify = 0
480ABSOLUTE reselected_tag = 0
481
482; Request sense command pointer, it's a 6 byte command, should
483; be constant for all commands since we always want 16 bytes of
484; sense and we don't need to change any fields as we did under
485; SCSI-I when we actually cared about the LUN field.
486;EXTERNAL NCR53c7xx_sense ; Request sense command
487
488#if (CHIP != 700) && (CHIP != 70066)
489; dsa_schedule
490; PURPOSE : after a DISCONNECT message has been received, and pointers
491; saved, insert the current DSA structure at the head of the
492; disconnected queue and fall through to the scheduler.
493;
494; CALLS : OK
495;
496; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
497; of disconnected commands
498;
499; MODIFIES : SCRATCH, reconnect_dsa_head
500;
501; EXITS : always passes control to schedule
502
503ENTRY dsa_schedule
504dsa_schedule:
505#ifdef DEBUG
506 INT int_debug_dsa_schedule
507#endif
508
509;
510; Calculate the address of the next pointer within the DSA
511; structure of the command that is currently disconnecting
512;
513#if (CHIP == 710)
514 ; Read what should be the current DSA from memory - actual DSA
515 ; register is probably corrupt
516 MOVE MEMORY 4, saved_dsa, addr_scratch
517#else
518 CALL dsa_to_scratch
519#endif
520 MOVE SCRATCH0 + dsa_next TO SCRATCH0
521 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
522 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
523 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
524
525; Point the next field of this DSA structure at the current disconnected
526; list
527 DMODE_NCR_TO_MEMORY
528 MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
529 DMODE_MEMORY_TO_MEMORY
530dsa_schedule_insert:
531 MOVE MEMORY 4, reconnect_dsa_head, 0
532
533; And update the head pointer.
534#if (CHIP == 710)
535 ; Read what should be the current DSA from memory - actual DSA
536 ; register is probably corrupt
537 MOVE MEMORY 4, saved_dsa, addr_scratch
538#else
539 CALL dsa_to_scratch
540#endif
541 DMODE_NCR_TO_MEMORY
542 MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
543 DMODE_MEMORY_TO_MEMORY
544/* Temporarily, see what happens. */
545#ifndef ORIGINAL
546#if (CHIP != 710)
547 MOVE SCNTL2 & 0x7f TO SCNTL2
548#endif
549 CLEAR ACK
550#endif
551#if (CHIP == 710)
552 ; Time to correct DSA following memory move
553 MOVE MEMORY 4, saved_dsa, addr_dsa
554#endif
555 WAIT DISCONNECT
556#ifdef EVENTS
557 INT int_EVENT_DISCONNECT;
558#endif
559#ifdef DEBUG
560 INT int_debug_disconnected
561#endif
562 JUMP schedule
563#endif
564
565;
566; select
567;
568; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
569; On success, the current DSA structure is removed from the issue
570; queue. Usually, this is entered as a fall-through from schedule,
571; although the contingent allegiance handling code will write
572; the select entry address to the DSP to restart a command as a
573; REQUEST SENSE. A message is sent (usually IDENTIFY, although
574; additional SDTR or WDTR messages may be sent). COMMAND OUT
575; is handled.
576;
577; INPUTS : DSA - SCSI command, issue_dsa_head
578;
579; CALLS : NOT OK
580;
581; MODIFIES : SCRATCH, issue_dsa_head
582;
583; EXITS : on reselection or selection, go to select_failed
584; otherwise, RETURN so control is passed back to
585; dsa_begin.
586;
587
588ENTRY select
589select:
590
591#ifdef EVENTS
592 INT int_EVENT_BEFORE_SELECT
593#endif
594
595#ifdef DEBUG
596 INT int_debug_scheduled
597#endif
598 CLEAR TARGET
599
600; XXX
601;
602; In effect, SELECTION operations are backgrounded, with execution
603; continuing until code which waits for REQ or a fatal interrupt is
604; encountered.
605;
606; So, for more performance, we could overlap the code which removes
607; the command from the NCRs issue queue with the selection, but
608; at this point I don't want to deal with the error recovery.
609;
610
611#if (CHIP != 700) && (CHIP != 70066)
612#if (CHIP == 710)
613 ; Enable selection timer
614#ifdef NO_SELECTION_TIMEOUT
615 MOVE CTEST7 & 0xff TO CTEST7
616#else
617 MOVE CTEST7 & 0xef TO CTEST7
618#endif
619#endif
620 SELECT ATN FROM dsa_select, select_failed
621 JUMP select_msgout, WHEN MSG_OUT
622ENTRY select_msgout
623select_msgout:
624#if (CHIP == 710)
625 ; Disable selection timer
626 MOVE CTEST7 | 0x10 TO CTEST7
627#endif
628 MOVE FROM dsa_msgout, WHEN MSG_OUT
629#else
630ENTRY select_msgout
631 SELECT ATN 0, select_failed
632select_msgout:
633 MOVE 0, 0, WHEN MSGOUT
634#endif
635
636#ifdef EVENTS
637 INT int_EVENT_SELECT
638#endif
639 RETURN
640
641;
642; select_done
643;
644; PURPOSE: continue on to normal data transfer; called as the exit
645; point from dsa_begin.
646;
647; INPUTS: dsa
648;
649; CALLS: OK
650;
651;
652
653select_done:
654#if (CHIP == 710)
655; NOTE DSA is corrupt when we arrive here!
656 MOVE MEMORY 4, saved_dsa, addr_dsa
657#endif
658
659#ifdef DEBUG
660ENTRY select_check_dsa
661select_check_dsa:
662 INT int_debug_check_dsa
663#endif
664
665; After a successful selection, we should get either a CMD phase or
666; some transfer request negotiation message.
667
668 JUMP cmdout, WHEN CMD
669 INT int_err_unexpected_phase, WHEN NOT MSG_IN
670
671select_msg_in:
672 CALL msg_in, WHEN MSG_IN
673 JUMP select_msg_in, WHEN MSG_IN
674
675cmdout:
676 INT int_err_unexpected_phase, WHEN NOT CMD
677#if (CHIP == 700)
678 INT int_norm_selected
679#endif
680ENTRY cmdout_cmdout
681cmdout_cmdout:
682#if (CHIP != 700) && (CHIP != 70066)
683 MOVE FROM dsa_cmdout, WHEN CMD
684#else
685 MOVE 0, 0, WHEN CMD
686#endif /* (CHIP != 700) && (CHIP != 70066) */
687
688;
689; data_transfer
690; other_out
691; other_in
692; other_transfer
693;
694; PURPOSE : handle the main data transfer for a SCSI command in
695; several parts. In the first part, data_transfer, DATA_IN
696; and DATA_OUT phases are allowed, with the user provided
697; code (usually dynamically generated based on the scatter/gather
698; list associated with a SCSI command) called to handle these
699; phases.
700;
701; After control has passed to one of the user provided
702; DATA_IN or DATA_OUT routines, back calls are made to
703; other_transfer_in or other_transfer_out to handle non-DATA IN
704; and DATA OUT phases respectively, with the state of the active
705; data pointer being preserved in TEMP.
706;
707; On completion, the user code passes control to other_transfer
708; which causes DATA_IN and DATA_OUT to result in unexpected_phase
709; interrupts so that data overruns may be trapped.
710;
711; INPUTS : DSA - SCSI command
712;
713; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
714; other_transfer
715;
716; MODIFIES : SCRATCH
717;
718; EXITS : if STATUS IN is detected, signifying command completion,
719; the NCR jumps to command_complete. If MSG IN occurs, a
720; CALL is made to msg_in. Otherwise, other_transfer runs in
721; an infinite loop.
722;
723
724ENTRY data_transfer
725data_transfer:
726 JUMP cmdout_cmdout, WHEN CMD
727 CALL msg_in, WHEN MSG_IN
728 INT int_err_unexpected_phase, WHEN MSG_OUT
729 JUMP do_dataout, WHEN DATA_OUT
730 JUMP do_datain, WHEN DATA_IN
731 JUMP command_complete, WHEN STATUS
732 JUMP data_transfer
733ENTRY end_data_transfer
734end_data_transfer:
735
736;
737; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
738; should be fixed up whenever the nexus changes so it can point to the
739; correct routine for that command.
740;
741
742#if (CHIP != 700) && (CHIP != 70066)
743; Nasty jump to dsa->dataout
744do_dataout:
745#if (CHIP == 710)
746 MOVE MEMORY 4, saved_dsa, addr_scratch
747#else
748 CALL dsa_to_scratch
749#endif
750 MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
751 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
752 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
753 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
754 DMODE_NCR_TO_MEMORY
755 MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
756 DMODE_MEMORY_TO_MEMORY
757dataout_to_jump:
758 MOVE MEMORY 4, 0, dataout_jump + 4
759#if (CHIP == 710)
760 ; Time to correct DSA following memory move
761 MOVE MEMORY 4, saved_dsa, addr_dsa
762#endif
763dataout_jump:
764 JUMP 0
765
766; Nasty jump to dsa->dsain
767do_datain:
768#if (CHIP == 710)
769 MOVE MEMORY 4, saved_dsa, addr_scratch
770#else
771 CALL dsa_to_scratch
772#endif
773 MOVE SCRATCH0 + dsa_datain TO SCRATCH0
774 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
775 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
776 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
777 DMODE_NCR_TO_MEMORY
778 MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
779 DMODE_MEMORY_TO_MEMORY
780ENTRY datain_to_jump
781datain_to_jump:
782 MOVE MEMORY 4, 0, datain_jump + 4
783#if (CHIP == 710)
784 ; Time to correct DSA following memory move
785 MOVE MEMORY 4, saved_dsa, addr_dsa
786#endif
787#ifdef DEBUG
788 INT int_debug_datain
789#endif
790datain_jump:
791 JUMP 0
792#endif /* (CHIP != 700) && (CHIP != 70066) */
793
794
795; Note that other_out and other_in loop until a non-data phase
796; is discovered, so we only execute return statements when we
797; can go on to the next data phase block move statement.
798
799ENTRY other_out
800other_out:
801#if 0
802 INT 0x03ffdead
803#endif
804 INT int_err_unexpected_phase, WHEN CMD
805 JUMP msg_in_restart, WHEN MSG_IN
806 INT int_err_unexpected_phase, WHEN MSG_OUT
807 INT int_err_unexpected_phase, WHEN DATA_IN
808 JUMP command_complete, WHEN STATUS
809 JUMP other_out, WHEN NOT DATA_OUT
810#if (CHIP == 710)
811; TEMP should be OK, as we got here from a call in the user dataout code.
812#endif
813 RETURN
814
815ENTRY other_in
816other_in:
817#if 0
818 INT 0x03ffdead
819#endif
820 INT int_err_unexpected_phase, WHEN CMD
821 JUMP msg_in_restart, WHEN MSG_IN
822 INT int_err_unexpected_phase, WHEN MSG_OUT
823 INT int_err_unexpected_phase, WHEN DATA_OUT
824 JUMP command_complete, WHEN STATUS
825 JUMP other_in, WHEN NOT DATA_IN
826#if (CHIP == 710)
827; TEMP should be OK, as we got here from a call in the user datain code.
828#endif
829 RETURN
830
831
832ENTRY other_transfer
833other_transfer:
834 INT int_err_unexpected_phase, WHEN CMD
835 CALL msg_in, WHEN MSG_IN
836 INT int_err_unexpected_phase, WHEN MSG_OUT
837 INT int_err_unexpected_phase, WHEN DATA_OUT
838 INT int_err_unexpected_phase, WHEN DATA_IN
839 JUMP command_complete, WHEN STATUS
840 JUMP other_transfer
841
842;
843; msg_in_restart
844; msg_in
845; munge_msg
846;
847; PURPOSE : process messages from a target. msg_in is called when the
848; caller hasn't read the first byte of the message. munge_message
849; is called when the caller has read the first byte of the message,
850; and left it in SFBR. msg_in_restart is called when the caller
851; hasn't read the first byte of the message, and wishes RETURN
852; to transfer control back to the address of the conditional
853; CALL instruction rather than to the instruction after it.
854;
855; Various int_* interrupts are generated when the host system
856; needs to intervene, as is the case with SDTR, WDTR, and
857; INITIATE RECOVERY messages.
858;
859; When the host system handles one of these interrupts,
860; it can respond by reentering at reject_message,
861; which rejects the message and returns control to
862; the caller of msg_in or munge_msg, accept_message
863; which clears ACK and returns control, or reply_message
864; which sends the message pointed to by the DSA
865; msgout_other table indirect field.
866;
867; DISCONNECT messages are handled by moving the command
868; to the reconnect_dsa_queue.
869#if (CHIP == 710)
870; NOTE: DSA should be valid when we get here - we cannot save both it
871; and TEMP in this routine.
872#endif
873;
874; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
875; only)
876;
877; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
878;
879; MODIFIES : SCRATCH, DSA on DISCONNECT
880;
881; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
882; and normal return from message handlers running under
883; Linux, control is returned to the caller. Receipt
884; of DISCONNECT messages pass control to dsa_schedule.
885;
886ENTRY msg_in_restart
887msg_in_restart:
888; XXX - hackish
889;
890; Since it's easier to debug changes to the statically
891; compiled code, rather than the dynamically generated
892; stuff, such as
893;
894; MOVE x, y, WHEN data_phase
895; CALL other_z, WHEN NOT data_phase
896; MOVE x, y, WHEN data_phase
897;
898; I'd like to have certain routines (notably the message handler)
899; restart on the conditional call rather than the next instruction.
900;
901; So, subtract 8 from the return address
902
903 MOVE TEMP0 + 0xf8 TO TEMP0
904 MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
905 MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
906 MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
907
908ENTRY msg_in
909msg_in:
910 MOVE 1, msg_buf, WHEN MSG_IN
911
912munge_msg:
913 JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
914 JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
915;
916; XXX - I've seen a handful of broken SCSI devices which fail to issue
917; a SAVE POINTERS message before disconnecting in the middle of
918; a transfer, assuming that the DATA POINTER will be implicitly
919; restored.
920;
921; Historically, I've often done an implicit save when the DISCONNECT
922; message is processed. We may want to consider having the option of
923; doing that here.
924;
925 JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
926 JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
927 JUMP munge_disconnect, IF 0x04 ; DISCONNECT
928 INT int_msg_1, IF 0x07 ; MESSAGE REJECT
929 INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
930#ifdef EVENTS
931 INT int_EVENT_SELECT_FAILED
932#endif
933 JUMP reject_message
934
935munge_2:
936 JUMP reject_message
937;
938; The SCSI standard allows targets to recover from transient
939; error conditions by backing up the data pointer with a
940; RESTORE POINTERS message.
941;
942; So, we must save and restore the _residual_ code as well as
943; the current instruction pointer. Because of this messiness,
944; it is simpler to put dynamic code in the dsa for this and to
945; just do a simple jump down there.
946;
947
948munge_save_data_pointer:
949#if (CHIP == 710)
950 ; We have something in TEMP here, so first we must save that
951 MOVE TEMP0 TO SFBR
952 MOVE SFBR TO SCRATCH0
953 MOVE TEMP1 TO SFBR
954 MOVE SFBR TO SCRATCH1
955 MOVE TEMP2 TO SFBR
956 MOVE SFBR TO SCRATCH2
957 MOVE TEMP3 TO SFBR
958 MOVE SFBR TO SCRATCH3
959 MOVE MEMORY 4, addr_scratch, jump_temp + 4
960 ; Now restore DSA
961 MOVE MEMORY 4, saved_dsa, addr_dsa
962#endif
963 MOVE DSA0 + dsa_save_data_pointer TO SFBR
964 MOVE SFBR TO SCRATCH0
965 MOVE DSA1 + 0xff TO SFBR WITH CARRY
966 MOVE SFBR TO SCRATCH1
967 MOVE DSA2 + 0xff TO SFBR WITH CARRY
968 MOVE SFBR TO SCRATCH2
969 MOVE DSA3 + 0xff TO SFBR WITH CARRY
970 MOVE SFBR TO SCRATCH3
971
972 DMODE_NCR_TO_MEMORY
973 MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
974 DMODE_MEMORY_TO_MEMORY
975jump_dsa_save:
976 JUMP 0
977
978munge_restore_pointers:
979#if (CHIP == 710)
980 ; The code at dsa_restore_pointers will RETURN, but we don't care
981 ; about TEMP here, as it will overwrite it anyway.
982#endif
983 MOVE DSA0 + dsa_restore_pointers TO SFBR
984 MOVE SFBR TO SCRATCH0
985 MOVE DSA1 + 0xff TO SFBR WITH CARRY
986 MOVE SFBR TO SCRATCH1
987 MOVE DSA2 + 0xff TO SFBR WITH CARRY
988 MOVE SFBR TO SCRATCH2
989 MOVE DSA3 + 0xff TO SFBR WITH CARRY
990 MOVE SFBR TO SCRATCH3
991
992 DMODE_NCR_TO_MEMORY
993 MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
994 DMODE_MEMORY_TO_MEMORY
995jump_dsa_restore:
996 JUMP 0
997
998
999munge_disconnect:
1000#ifdef DEBUG
1001 INT int_debug_disconnect_msg
1002#endif
1003
1004/*
1005 * Before, we overlapped processing with waiting for disconnect, but
1006 * debugging was beginning to appear messy. Temporarily move things
1007 * to just before the WAIT DISCONNECT.
1008 */
1009
1010#ifdef ORIGINAL
1011#if (CHIP == 710)
1012; Following clears Unexpected Disconnect bit. What do we do?
1013#else
1014 MOVE SCNTL2 & 0x7f TO SCNTL2
1015#endif
1016 CLEAR ACK
1017#endif
1018
1019#if (CHIP != 700) && (CHIP != 70066)
1020 JUMP dsa_schedule
1021#else
1022 WAIT DISCONNECT
1023 INT int_norm_disconnected
1024#endif
1025
1026munge_extended:
1027 CLEAR ACK
1028 INT int_err_unexpected_phase, WHEN NOT MSG_IN
1029 MOVE 1, msg_buf + 1, WHEN MSG_IN
1030 JUMP munge_extended_2, IF 0x02
1031 JUMP munge_extended_3, IF 0x03
1032 JUMP reject_message
1033
1034munge_extended_2:
1035 CLEAR ACK
1036 MOVE 1, msg_buf + 2, WHEN MSG_IN
1037 JUMP reject_message, IF NOT 0x02 ; Must be WDTR
1038 CLEAR ACK
1039 MOVE 1, msg_buf + 3, WHEN MSG_IN
1040 INT int_msg_wdtr
1041
1042munge_extended_3:
1043 CLEAR ACK
1044 MOVE 1, msg_buf + 2, WHEN MSG_IN
1045 JUMP reject_message, IF NOT 0x01 ; Must be SDTR
1046 CLEAR ACK
1047 MOVE 2, msg_buf + 3, WHEN MSG_IN
1048 INT int_msg_sdtr
1049
1050ENTRY reject_message
1051reject_message:
1052 SET ATN
1053 CLEAR ACK
1054 MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
1055 RETURN
1056
1057ENTRY accept_message
1058accept_message:
1059 CLEAR ATN
1060 CLEAR ACK
1061 RETURN
1062
1063ENTRY respond_message
1064respond_message:
1065 SET ATN
1066 CLEAR ACK
1067 MOVE FROM dsa_msgout_other, WHEN MSG_OUT
1068 RETURN
1069
1070;
1071; command_complete
1072;
1073; PURPOSE : handle command termination when STATUS IN is detected by reading
1074; a status byte followed by a command termination message.
1075;
1076; Normal termination results in an INTFLY instruction, and
1077; the host system can pick out which command terminated by
1078; examining the MESSAGE and STATUS buffers of all currently
1079; executing commands;
1080;
1081; Abnormal (CHECK_CONDITION) termination results in an
1082; int_err_check_condition interrupt so that a REQUEST SENSE
1083; command can be issued out-of-order so that no other command
1084; clears the contingent allegiance condition.
1085;
1086;
1087; INPUTS : DSA - command
1088;
1089; CALLS : OK
1090;
1091; EXITS : On successful termination, control is passed to schedule.
1092; On abnormal termination, the user will usually modify the
1093; DSA fields and corresponding buffers and return control
1094; to select.
1095;
1096
1097ENTRY command_complete
1098command_complete:
1099 MOVE FROM dsa_status, WHEN STATUS
1100#if (CHIP != 700) && (CHIP != 70066)
1101 MOVE SFBR TO SCRATCH0 ; Save status
1102#endif /* (CHIP != 700) && (CHIP != 70066) */
1103ENTRY command_complete_msgin
1104command_complete_msgin:
1105 MOVE FROM dsa_msgin, WHEN MSG_IN
1106; Indicate that we should be expecting a disconnect
1107#if (CHIP != 710)
1108 MOVE SCNTL2 & 0x7f TO SCNTL2
1109#else
1110 ; Above code cleared the Unexpected Disconnect bit, what do we do?
1111#endif
1112 CLEAR ACK
1113#if (CHIP != 700) && (CHIP != 70066)
1114 WAIT DISCONNECT
1115
1116;
1117; The SCSI specification states that when a UNIT ATTENTION condition
1118; is pending, as indicated by a CHECK CONDITION status message,
1119; the target shall revert to asynchronous transfers. Since
1120; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
1121; basis, and returning control to our scheduler could work on a command
1122; running on another lun on that target using the old parameters, we must
1123; interrupt the host processor to get them changed, or change them ourselves.
1124;
1125; Once SCSI-II tagged queueing is implemented, things will be even more
1126; hairy, since contingent allegiance conditions exist on a per-target/lun
1127; basis, and issuing a new command with a different tag would clear it.
1128; In these cases, we must interrupt the host processor to get a request
1129; added to the HEAD of the queue with the request sense command, or we
1130; must automatically issue the request sense command.
1131
1132#if 0
1133 MOVE SCRATCH0 TO SFBR
1134 JUMP command_failed, IF 0x02
1135#endif
1136#if (CHIP == 710)
1137#if defined(MVME16x_INTFLY)
1138; For MVME16x (ie CHIP=710) we will force an INTFLY by triggering a software
1139; interrupt (SW7). We can use SCRATCH, as we are about to jump to
1140; schedule, which corrupts it anyway. Will probably remove this later,
1141; but want to check performance effects first.
1142
1143#define INTFLY_ADDR 0xfff40070
1144
1145 MOVE 0 TO SCRATCH0
1146 MOVE 0x80 TO SCRATCH1
1147 MOVE 0 TO SCRATCH2
1148 MOVE 0 TO SCRATCH3
1149 MOVE MEMORY 4, addr_scratch, INTFLY_ADDR
1150#else
1151 INT int_norm_emulateintfly
1152#endif
1153#else
1154 INTFLY
1155#endif
1156#endif /* (CHIP != 700) && (CHIP != 70066) */
1157#if (CHIP == 710)
1158 ; Time to correct DSA following memory move
1159 MOVE MEMORY 4, saved_dsa, addr_dsa
1160#endif
1161#ifdef EVENTS
1162 INT int_EVENT_COMPLETE
1163#endif
1164#if (CHIP != 700) && (CHIP != 70066)
1165 JUMP schedule
1166command_failed:
1167 INT int_err_check_condition
1168#else
1169 INT int_norm_command_complete
1170#endif
1171
1172;
1173; wait_reselect
1174;
1175; PURPOSE : This is essentially the idle routine, where control lands
1176; when there are no new processes to schedule. wait_reselect
1177; waits for reselection, selection, and new commands.
1178;
1179; When a successful reselection occurs, with the aid
1180; of fixed up code in each DSA, wait_reselect walks the
1181; reconnect_dsa_queue, asking each dsa if the target ID
1182; and LUN match its.
1183;
1184; If a match is found, a call is made back to reselected_ok,
1185; which through the miracles of self modifying code, extracts
1186; the found DSA from the reconnect_dsa_queue and then
1187; returns control to the DSAs thread of execution.
1188;
1189; INPUTS : NONE
1190;
1191; CALLS : OK
1192;
1193; MODIFIES : DSA,
1194;
1195; EXITS : On successful reselection, control is returned to the
1196; DSA which called reselected_ok. If the WAIT RESELECT
1197; was interrupted by a new commands arrival signaled by
1198; SIG_P, control is passed to schedule. If the NCR is
1199; selected, the host system is interrupted with an
1200; int_err_selected which is usually responded to by
1201; setting DSP to the target_abort address.
1202
1203ENTRY wait_reselect
1204wait_reselect:
1205#ifdef EVENTS
1206 int int_EVENT_IDLE
1207#endif
1208#ifdef DEBUG
1209 int int_debug_idle
1210#endif
1211 WAIT RESELECT wait_reselect_failed
1212
1213reselected:
1214#ifdef EVENTS
1215 int int_EVENT_RESELECT
1216#endif
1217 CLEAR TARGET
1218 DMODE_MEMORY_TO_MEMORY
1219 ; Read all data needed to reestablish the nexus -
1220 MOVE 1, reselected_identify, WHEN MSG_IN
1221 ; We used to CLEAR ACK here.
1222#if (CHIP != 700) && (CHIP != 70066)
1223#ifdef DEBUG
1224 int int_debug_reselected
1225#endif
1226
1227 ; Point DSA at the current head of the disconnected queue.
1228 DMODE_MEMORY_TO_NCR
1229 MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
1230 DMODE_MEMORY_TO_MEMORY
1231#if (CHIP == 710)
1232 MOVE MEMORY 4, addr_scratch, saved_dsa
1233#else
1234 CALL scratch_to_dsa
1235#endif
1236
1237 ; Fix the update-next pointer so that the reconnect_dsa_head
1238 ; pointer is the one that will be updated if this DSA is a hit
1239 ; and we remove it from the queue.
1240
1241 MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
1242#if (CHIP == 710)
1243 ; Time to correct DSA following memory move
1244 MOVE MEMORY 4, saved_dsa, addr_dsa
1245#endif
1246
1247ENTRY reselected_check_next
1248reselected_check_next:
1249#ifdef DEBUG
1250 INT int_debug_reselect_check
1251#endif
1252 ; Check for a NULL pointer.
1253 MOVE DSA0 TO SFBR
1254 JUMP reselected_not_end, IF NOT 0
1255 MOVE DSA1 TO SFBR
1256 JUMP reselected_not_end, IF NOT 0
1257 MOVE DSA2 TO SFBR
1258 JUMP reselected_not_end, IF NOT 0
1259 MOVE DSA3 TO SFBR
1260 JUMP reselected_not_end, IF NOT 0
1261 INT int_err_unexpected_reselect
1262
1263reselected_not_end:
1264 ;
1265 ; XXX the ALU is only eight bits wide, and the assembler
1266 ; wont do the dirt work for us. As long as dsa_check_reselect
1267 ; is negative, we need to sign extend with 1 bits to the full
1268 ; 32 bit width of the address.
1269 ;
1270 ; A potential work around would be to have a known alignment
1271 ; of the DSA structure such that the base address plus
1272 ; dsa_check_reselect doesn't require carrying from bytes
1273 ; higher than the LSB.
1274 ;
1275
1276 MOVE DSA0 TO SFBR
1277 MOVE SFBR + dsa_check_reselect TO SCRATCH0
1278 MOVE DSA1 TO SFBR
1279 MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
1280 MOVE DSA2 TO SFBR
1281 MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
1282 MOVE DSA3 TO SFBR
1283 MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
1284
1285 DMODE_NCR_TO_MEMORY
1286 MOVE MEMORY 4, addr_scratch, reselected_check + 4
1287 DMODE_MEMORY_TO_MEMORY
1288#if (CHIP == 710)
1289 ; Time to correct DSA following memory move
1290 MOVE MEMORY 4, saved_dsa, addr_dsa
1291#endif
1292reselected_check:
1293 JUMP 0
1294
1295
1296;
1297;
1298#if (CHIP == 710)
1299; We have problems here - the memory move corrupts TEMP and DSA. This
1300; routine is called from DSA code, and patched from many places. Scratch
1301; is probably free when it is called.
1302; We have to:
1303; copy temp to scratch, one byte at a time
1304; write scratch to patch a jump in place of the return
1305; do the move memory
1306; jump to the patched in return address
1307; DSA is corrupt when we get here, and can be left corrupt
1308
1309ENTRY reselected_ok
1310reselected_ok:
1311 MOVE TEMP0 TO SFBR
1312 MOVE SFBR TO SCRATCH0
1313 MOVE TEMP1 TO SFBR
1314 MOVE SFBR TO SCRATCH1
1315 MOVE TEMP2 TO SFBR
1316 MOVE SFBR TO SCRATCH2
1317 MOVE TEMP3 TO SFBR
1318 MOVE SFBR TO SCRATCH3
1319 MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
1320reselected_ok_patch:
1321 MOVE MEMORY 4, 0, 0
1322reselected_ok_jump:
1323 JUMP 0
1324#else
1325ENTRY reselected_ok
1326reselected_ok:
1327reselected_ok_patch:
1328 MOVE MEMORY 4, 0, 0 ; Patched : first word
1329 ; is address of
1330 ; successful dsa_next
1331 ; Second word is last
1332 ; unsuccessful dsa_next,
1333 ; starting with
1334 ; dsa_reconnect_head
1335 ; We used to CLEAR ACK here.
1336#ifdef DEBUG
1337 INT int_debug_reselected_ok
1338#endif
1339#ifdef DEBUG
1340 INT int_debug_check_dsa
1341#endif
1342 RETURN ; Return control to where
1343#endif
1344#else
1345 INT int_norm_reselected
1346#endif /* (CHIP != 700) && (CHIP != 70066) */
1347
1348selected:
1349 INT int_err_selected;
1350
1351;
1352; A select or reselect failure can be caused by one of two conditions :
1353; 1. SIG_P was set. This will be the case if the user has written
1354; a new value to a previously NULL head of the issue queue.
1355;
1356; 2. The NCR53c810 was selected or reselected by another device.
1357;
1358; 3. The bus was already busy since we were selected or reselected
1359; before starting the command.
1360
1361wait_reselect_failed:
1362#ifdef EVENTS
1363 INT int_EVENT_RESELECT_FAILED
1364#endif
1365; Check selected bit.
1366#if (CHIP == 710)
1367 ; Must work out how to tell if we are selected....
1368#else
1369 MOVE SIST0 & 0x20 TO SFBR
1370 JUMP selected, IF 0x20
1371#endif
1372; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
1373 MOVE CTEST2 & 0x40 TO SFBR
1374 JUMP schedule, IF 0x40
1375; Check connected bit.
1376; FIXME: this needs to change if we support target mode
1377 MOVE ISTAT & 0x08 TO SFBR
1378 JUMP reselected, IF 0x08
1379; FIXME : Something bogus happened, and we shouldn't fail silently.
1380#if 0
1381 JUMP schedule
1382#else
1383 INT int_debug_panic
1384#endif
1385
1386
1387select_failed:
1388#if (CHIP == 710)
1389 ; Disable selection timer
1390 MOVE CTEST7 | 0x10 TO CTEST7
1391#endif
1392#ifdef EVENTS
1393 int int_EVENT_SELECT_FAILED
1394#endif
1395; Otherwise, mask the selected and reselected bits off SIST0
1396#if (CHIP ==710)
1397 ; Let's assume we don't get selected for now
1398 MOVE SSTAT0 & 0x10 TO SFBR
1399#else
1400 MOVE SIST0 & 0x30 TO SFBR
1401 JUMP selected, IF 0x20
1402#endif
1403 JUMP reselected, IF 0x10
1404; If SIGP is set, the user just gave us another command, and
1405; we should restart or return to the scheduler.
1406; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
1407 MOVE CTEST2 & 0x40 TO SFBR
1408 JUMP select, IF 0x40
1409; Check connected bit.
1410; FIXME: this needs to change if we support target mode
1411; FIXME: is this really necessary?
1412 MOVE ISTAT & 0x08 TO SFBR
1413 JUMP reselected, IF 0x08
1414; FIXME : Something bogus happened, and we shouldn't fail silently.
1415#if 0
1416 JUMP schedule
1417#else
1418 INT int_debug_panic
1419#endif
1420
1421;
1422; test_1
1423; test_2
1424;
1425; PURPOSE : run some verification tests on the NCR. test_1
1426; copies test_src to test_dest and interrupts the host
1427; processor, testing for cache coherency and interrupt
1428; problems in the processes.
1429;
1430; test_2 runs a command with offsets relative to the
1431; DSA on entry, and is useful for miscellaneous experimentation.
1432;
1433
1434; Verify that interrupts are working correctly and that we don't
1435; have a cache invalidation problem.
1436
1437ABSOLUTE test_src = 0, test_dest = 0
1438ENTRY test_1
1439test_1:
1440 MOVE MEMORY 4, test_src, test_dest
1441 INT int_test_1
1442
1443;
1444; Run arbitrary commands, with test code establishing a DSA
1445;
1446
1447ENTRY test_2
1448test_2:
1449 CLEAR TARGET
1450#if (CHIP == 710)
1451 ; Enable selection timer
1452#ifdef NO_SELECTION_TIMEOUT
1453 MOVE CTEST7 & 0xff TO CTEST7
1454#else
1455 MOVE CTEST7 & 0xef TO CTEST7
1456#endif
1457#endif
1458 SELECT ATN FROM 0, test_2_fail
1459 JUMP test_2_msgout, WHEN MSG_OUT
1460ENTRY test_2_msgout
1461test_2_msgout:
1462#if (CHIP == 710)
1463 ; Disable selection timer
1464 MOVE CTEST7 | 0x10 TO CTEST7
1465#endif
1466 MOVE FROM 8, WHEN MSG_OUT
1467 MOVE FROM 16, WHEN CMD
1468 MOVE FROM 24, WHEN DATA_IN
1469 MOVE FROM 32, WHEN STATUS
1470 MOVE FROM 40, WHEN MSG_IN
1471#if (CHIP != 710)
1472 MOVE SCNTL2 & 0x7f TO SCNTL2
1473#endif
1474 CLEAR ACK
1475 WAIT DISCONNECT
1476test_2_fail:
1477#if (CHIP == 710)
1478 ; Disable selection timer
1479 MOVE CTEST7 | 0x10 TO CTEST7
1480#endif
1481 INT int_test_2
1482
1483ENTRY debug_break
1484debug_break:
1485 INT int_debug_break
1486
1487;
1488; initiator_abort
1489; target_abort
1490;
1491; PURPOSE : Abort the currently established nexus from with initiator
1492; or target mode.
1493;
1494;
1495
1496ENTRY target_abort
1497target_abort:
1498 SET TARGET
1499 DISCONNECT
1500 CLEAR TARGET
1501 JUMP schedule
1502
1503ENTRY initiator_abort
1504initiator_abort:
1505 SET ATN
1506;
1507; The SCSI-I specification says that targets may go into MSG out at
1508; their leisure upon receipt of the ATN single. On all versions of the
1509; specification, we can't change phases until REQ transitions true->false,
1510; so we need to sink/source one byte of data to allow the transition.
1511;
1512; For the sake of safety, we'll only source one byte of data in all
1513; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
1514; arbitrary number of bytes.
1515 JUMP spew_cmd, WHEN CMD
1516 JUMP eat_msgin, WHEN MSG_IN
1517 JUMP eat_datain, WHEN DATA_IN
1518 JUMP eat_status, WHEN STATUS
1519 JUMP spew_dataout, WHEN DATA_OUT
1520 JUMP sated
1521spew_cmd:
1522 MOVE 1, NCR53c7xx_zero, WHEN CMD
1523 JUMP sated
1524eat_msgin:
1525 MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
1526 JUMP eat_msgin, WHEN MSG_IN
1527 JUMP sated
1528eat_status:
1529 MOVE 1, NCR53c7xx_sink, WHEN STATUS
1530 JUMP eat_status, WHEN STATUS
1531 JUMP sated
1532eat_datain:
1533 MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
1534 JUMP eat_datain, WHEN DATA_IN
1535 JUMP sated
1536spew_dataout:
1537 MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
1538sated:
1539#if (CHIP != 710)
1540 MOVE SCNTL2 & 0x7f TO SCNTL2
1541#endif
1542 MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
1543 WAIT DISCONNECT
1544 INT int_norm_aborted
1545
1546#if (CHIP != 710)
1547;
1548; dsa_to_scratch
1549; scratch_to_dsa
1550;
1551; PURPOSE :
1552; The NCR chips cannot do a move memory instruction with the DSA register
1553; as the source or destination. So, we provide a couple of subroutines
1554; that let us switch between the DSA register and scratch register.
1555;
1556; Memory moves to/from the DSPS register also don't work, but we
1557; don't use them.
1558;
1559;
1560
1561
1562dsa_to_scratch:
1563 MOVE DSA0 TO SFBR
1564 MOVE SFBR TO SCRATCH0
1565 MOVE DSA1 TO SFBR
1566 MOVE SFBR TO SCRATCH1
1567 MOVE DSA2 TO SFBR
1568 MOVE SFBR TO SCRATCH2
1569 MOVE DSA3 TO SFBR
1570 MOVE SFBR TO SCRATCH3
1571 RETURN
1572
1573scratch_to_dsa:
1574 MOVE SCRATCH0 TO SFBR
1575 MOVE SFBR TO DSA0
1576 MOVE SCRATCH1 TO SFBR
1577 MOVE SFBR TO DSA1
1578 MOVE SCRATCH2 TO SFBR
1579 MOVE SFBR TO DSA2
1580 MOVE SCRATCH3 TO SFBR
1581 MOVE SFBR TO DSA3
1582 RETURN
1583#endif
1584
1585#if (CHIP == 710)
1586; Little patched jump, used to overcome problems with TEMP getting
1587; corrupted on memory moves.
1588
1589jump_temp:
1590 JUMP 0
1591#endif
diff --git a/drivers/scsi/53c7xx_d.h_shipped b/drivers/scsi/53c7xx_d.h_shipped
deleted file mode 100644
index 21d31b08ec31..000000000000
--- a/drivers/scsi/53c7xx_d.h_shipped
+++ /dev/null
@@ -1,2874 +0,0 @@
1/* DO NOT EDIT - Generated automatically by script_asm.pl */
2static u32 SCRIPT[] = {
3/*
4
5
6
7
8
9; 53c710 driver. Modified from Drew Eckhardts driver
10; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
11;
12; I have left the script for the 53c8xx family in here, as it is likely
13; to be useful to see what I changed when bug hunting.
14
15; NCR 53c810 driver, main script
16; Sponsored by
17; iX Multiuser Multitasking Magazine
18; hm@ix.de
19;
20; Copyright 1993, 1994, 1995 Drew Eckhardt
21; Visionary Computing
22; (Unix and Linux consulting and custom programming)
23; drew@PoohSticks.ORG
24; +1 (303) 786-7975
25;
26; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
27;
28; PRE-ALPHA
29;
30; For more information, please consult
31;
32; NCR 53C810
33; PCI-SCSI I/O Processor
34; Data Manual
35;
36; NCR 53C710
37; SCSI I/O Processor
38; Programmers Guide
39;
40; NCR Microelectronics
41; 1635 Aeroplaza Drive
42; Colorado Springs, CO 80916
43; 1+ (719) 578-3400
44;
45; Toll free literature number
46; +1 (800) 334-5454
47;
48; IMPORTANT : This code is self modifying due to the limitations of
49; the NCR53c7,8xx series chips. Persons debugging this code with
50; the remote debugger should take this into account, and NOT set
51; breakpoints in modified instructions.
52;
53; Design:
54; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
55; microcontroller using a simple instruction set.
56;
57; So, to minimize the effects of interrupt latency, and to maximize
58; throughput, this driver offloads the practical maximum amount
59; of processing to the SCSI chip while still maintaining a common
60; structure.
61;
62; Where tradeoffs were needed between efficiency on the older
63; chips and the newer NCR53c800 series, the NCR53c800 series
64; was chosen.
65;
66; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
67; automate SCSI transfers without host processor intervention, this
68; isn't the case with the NCR53c710 and newer chips which allow
69;
70; - reads and writes to the internal registers from within the SCSI
71; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
72; state so that multiple threads of execution are possible, and also
73; provide an ALU for loop control, etc.
74;
75; - table indirect addressing for some instructions. This allows
76; pointers to be located relative to the DSA ((Data Structure
77; Address) register.
78;
79; These features make it possible to implement a mailbox style interface,
80; where the same piece of code is run to handle I/O for multiple threads
81; at once minimizing our need to relocate code. Since the NCR53c700/
82; NCR53c800 series have a unique combination of features, making a
83; a standard ingoing/outgoing mailbox system, costly, I've modified it.
84;
85; - Mailboxes are a mixture of code and data. This lets us greatly
86; simplify the NCR53c810 code and do things that would otherwise
87; not be possible.
88;
89; The saved data pointer is now implemented as follows :
90;
91; Control flow has been architected such that if control reaches
92; munge_save_data_pointer, on a restore pointers message or
93; reconnection, a jump to the address formerly in the TEMP register
94; will allow the SCSI command to resume execution.
95;
96
97;
98; Note : the DSA structures must be aligned on 32 bit boundaries,
99; since the source and destination of MOVE MEMORY instructions
100; must share the same alignment and this is the alignment of the
101; NCR registers.
102;
103
104; For some systems (MVME166, for example) dmode is always the same, so don't
105; waste time writing it
106
107
108
109
110
111
112
113
114
115
116
117ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
118ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
119ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
120 ; for current dsa
121ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
122 ; sync routine
123ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
124 ; sscf value (53c710)
125ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
126ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
127 ; saved data pointer
128ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
129 ; current residual code
130ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
131 ; saved residual code
132ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
133ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
134ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
135
136;
137; Once a device has initiated reselection, we need to compare it
138; against the singly linked list of commands which have disconnected
139; and are pending reselection. These commands are maintained in
140; an unordered singly linked list of DSA structures, through the
141; DSA pointers at their 'centers' headed by the reconnect_dsa_head
142; pointer.
143;
144; To avoid complications in removing commands from the list,
145; I minimize the amount of expensive (at eight operations per
146; addition @ 500-600ns each) pointer operations which must
147; be done in the NCR driver by precomputing them on the
148; host processor during dsa structure generation.
149;
150; The fixed-up per DSA code knows how to recognize the nexus
151; associated with the corresponding SCSI command, and modifies
152; the source and destination pointers for the MOVE MEMORY
153; instruction which is executed when reselected_ok is called
154; to remove the command from the list. Similarly, DSA is
155; loaded with the address of the next DSA structure and
156; reselected_check_next is called if a failure occurs.
157;
158; Perhaps more concisely, the net effect of the mess is
159;
160; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
161; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
162; src = &dsa->next;
163; if (target_id == dsa->id && target_lun == dsa->lun) {
164; *dest = *src;
165; break;
166; }
167; }
168;
169; if (!dsa)
170; error (int_err_unexpected_reselect);
171; else
172; longjmp (dsa->jump_resume, 0);
173;
174;
175
176
177; Define DSA structure used for mailboxes
178ENTRY dsa_code_template
179dsa_code_template:
180ENTRY dsa_code_begin
181dsa_code_begin:
182; RGH: Don't care about TEMP and DSA here
183
184 MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
185
186at 0x00000000 : */ 0xc0000004,0x00000000,0x00000000,
187/*
188
189
190 MOVE MEMORY 4, addr_scratch, saved_dsa
191
192at 0x00000003 : */ 0xc0000004,0x00000000,0x00000000,
193/*
194 ; We are about to go and select the device, so must set SSCF bits
195 MOVE MEMORY 4, dsa_sscf_710, addr_scratch
196
197at 0x00000006 : */ 0xc0000004,0x00000000,0x00000000,
198/*
199
200 MOVE SCRATCH3 TO SFBR
201
202at 0x00000009 : */ 0x72370000,0x00000000,
203/*
204
205
206
207 MOVE SFBR TO SBCL
208
209at 0x0000000b : */ 0x6a0b0000,0x00000000,
210/*
211 MOVE MEMORY 4, saved_dsa, addr_dsa
212
213at 0x0000000d : */ 0xc0000004,0x00000000,0x00000000,
214/*
215
216
217
218 CALL select
219
220at 0x00000010 : */ 0x88080000,0x000001f8,
221/*
222; Handle the phase mismatch which may have resulted from the
223; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
224; may or may not be necessary, and we should update script_asm.pl
225; to handle multiple pieces.
226 CLEAR ATN
227
228at 0x00000012 : */ 0x60000008,0x00000000,
229/*
230 CLEAR ACK
231
232at 0x00000014 : */ 0x60000040,0x00000000,
233/*
234
235; Replace second operand with address of JUMP instruction dest operand
236; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
237ENTRY dsa_code_fix_jump
238dsa_code_fix_jump:
239 MOVE MEMORY 4, NOP_insn, 0
240
241at 0x00000016 : */ 0xc0000004,0x00000000,0x00000000,
242/*
243 JUMP select_done
244
245at 0x00000019 : */ 0x80080000,0x00000230,
246/*
247
248; wrong_dsa loads the DSA register with the value of the dsa_next
249; field.
250;
251wrong_dsa:
252
253; NOTE DSA is corrupt when we arrive here!
254
255; Patch the MOVE MEMORY INSTRUCTION such that
256; the destination address is the address of the OLD
257; next pointer.
258;
259 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
260
261at 0x0000001b : */ 0xc0000004,0x00000000,0x000007ec,
262/*
263
264;
265; Move the _contents_ of the next pointer into the DSA register as
266; the next I_T_L or I_T_L_Q tupple to check against the established
267; nexus.
268;
269 MOVE MEMORY 4, dsa_temp_next, addr_scratch
270
271at 0x0000001e : */ 0xc0000004,0x00000000,0x00000000,
272/*
273
274
275 MOVE MEMORY 4, addr_scratch, saved_dsa
276
277at 0x00000021 : */ 0xc0000004,0x00000000,0x00000000,
278/*
279 MOVE MEMORY 4, saved_dsa, addr_dsa
280
281at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
282/*
283
284
285
286 JUMP reselected_check_next
287
288at 0x00000027 : */ 0x80080000,0x000006f0,
289/*
290
291ABSOLUTE dsa_save_data_pointer = 0
292ENTRY dsa_code_save_data_pointer
293dsa_code_save_data_pointer:
294
295 ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
296 ; We MUST return with DSA correct
297 MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
298
299at 0x00000029 : */ 0xc0000004,0x000009c8,0x00000000,
300/*
301; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
302 MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
303
304at 0x0000002c : */ 0xc0000018,0x00000000,0x00000000,
305/*
306 CLEAR ACK
307
308at 0x0000002f : */ 0x60000040,0x00000000,
309/*
310
311
312
313 MOVE MEMORY 4, saved_dsa, addr_dsa
314
315at 0x00000031 : */ 0xc0000004,0x00000000,0x00000000,
316/*
317 JUMP jump_temp
318
319at 0x00000034 : */ 0x80080000,0x000009c4,
320/*
321
322ABSOLUTE dsa_restore_pointers = 0
323ENTRY dsa_code_restore_pointers
324dsa_code_restore_pointers:
325
326 ; TEMP and DSA are corrupt when we get here, but who cares!
327 MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
328
329at 0x00000036 : */ 0xc0000004,0x00000000,0x000009c8,
330/*
331; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
332 MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
333
334at 0x00000039 : */ 0xc0000018,0x00000000,0x00000000,
335/*
336 CLEAR ACK
337
338at 0x0000003c : */ 0x60000040,0x00000000,
339/*
340 ; Restore DSA, note we don't care about TEMP
341 MOVE MEMORY 4, saved_dsa, addr_dsa
342
343at 0x0000003e : */ 0xc0000004,0x00000000,0x00000000,
344/*
345
346
347
348 JUMP jump_temp
349
350at 0x00000041 : */ 0x80080000,0x000009c4,
351/*
352
353
354ABSOLUTE dsa_check_reselect = 0
355; dsa_check_reselect determines whether or not the current target and
356; lun match the current DSA
357ENTRY dsa_code_check_reselect
358dsa_code_check_reselect:
359
360
361
362 MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
363
364at 0x00000043 : */ 0x72230000,0x00000000,
365/*
366 JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
367
368at 0x00000045 : */ 0x80848000,0x00ffff50,
369/*
370
371
372
373
374
375;
376; Hack - move to scratch first, since SFBR is not writeable
377; via the CPU and hence a MOVE MEMORY instruction.
378;
379
380 MOVE MEMORY 1, reselected_identify, addr_scratch
381
382at 0x00000047 : */ 0xc0000001,0x00000000,0x00000000,
383/*
384
385
386 ; BIG ENDIAN ON MVME16x
387 MOVE SCRATCH3 TO SFBR
388
389at 0x0000004a : */ 0x72370000,0x00000000,
390/*
391
392
393
394; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
395; Are you sure about that? richard@sleepie.demon.co.uk
396 JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
397
398at 0x0000004c : */ 0x8084f800,0x00ffff34,
399/*
400; Patch the MOVE MEMORY INSTRUCTION such that
401; the source address is the address of this dsa's
402; next pointer.
403 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
404
405at 0x0000004e : */ 0xc0000004,0x00000000,0x000007e8,
406/*
407 CALL reselected_ok
408
409at 0x00000051 : */ 0x88080000,0x00000798,
410/*
411
412; Restore DSA following memory moves in reselected_ok
413; dsa_temp_sync doesn't really care about DSA, but it has an
414; optional debug INT so a valid DSA is a good idea.
415 MOVE MEMORY 4, saved_dsa, addr_dsa
416
417at 0x00000053 : */ 0xc0000004,0x00000000,0x00000000,
418/*
419
420 CALL dsa_temp_sync
421
422at 0x00000056 : */ 0x88080000,0x00000000,
423/*
424; Release ACK on the IDENTIFY message _after_ we've set the synchronous
425; transfer parameters!
426 CLEAR ACK
427
428at 0x00000058 : */ 0x60000040,0x00000000,
429/*
430; Implicitly restore pointers on reselection, so a RETURN
431; will transfer control back to the right spot.
432 CALL REL (dsa_code_restore_pointers)
433
434at 0x0000005a : */ 0x88880000,0x00ffff68,
435/*
436 RETURN
437
438at 0x0000005c : */ 0x90080000,0x00000000,
439/*
440ENTRY dsa_zero
441dsa_zero:
442ENTRY dsa_code_template_end
443dsa_code_template_end:
444
445; Perform sanity check for dsa_fields_start == dsa_code_template_end -
446; dsa_zero, puke.
447
448ABSOLUTE dsa_fields_start = 0 ; Sanity marker
449 ; pad 48 bytes (fix this RSN)
450ABSOLUTE dsa_next = 48 ; len 4 Next DSA
451 ; del 4 Previous DSA address
452ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
453ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
454 ; table indirect select
455ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
456 ; select message
457ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
458 ; command
459ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
460ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
461ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
462ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
463ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
464 ; (Synchronous transfer negotiation, etc).
465ABSOLUTE dsa_end = 112
466
467ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
468 ; terminated by a call to JUMP wait_reselect
469
470; Linked lists of DSA structures
471ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
472ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
473 ; address of reconnect_dsa_head
474
475; These select the source and destination of a MOVE MEMORY instruction
476ABSOLUTE dmode_memory_to_memory = 0x0
477ABSOLUTE dmode_memory_to_ncr = 0x0
478ABSOLUTE dmode_ncr_to_memory = 0x0
479
480ABSOLUTE addr_scratch = 0x0
481ABSOLUTE addr_temp = 0x0
482
483ABSOLUTE saved_dsa = 0x0
484ABSOLUTE emulfly = 0x0
485ABSOLUTE addr_dsa = 0x0
486
487
488
489; Interrupts -
490; MSB indicates type
491; 0 handle error condition
492; 1 handle message
493; 2 handle normal condition
494; 3 debugging interrupt
495; 4 testing interrupt
496; Next byte indicates specific error
497
498; XXX not yet implemented, I'm not sure if I want to -
499; Next byte indicates the routine the error occurred in
500; The LSB indicates the specific place the error occurred
501
502ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
503ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
504ABSOLUTE int_err_unexpected_reselect = 0x00020000
505ABSOLUTE int_err_check_condition = 0x00030000
506ABSOLUTE int_err_no_phase = 0x00040000
507ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
508ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
509ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
510 ; received
511
512ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
513 ; registers.
514ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
515ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
516ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
517ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
518ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
519ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
520ABSOLUTE int_debug_break = 0x03000000 ; Break point
521
522ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
523
524
525ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
526ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
527ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
528
529
530; These should start with 0x05000000, with low bits incrementing for
531; each one.
532
533
534
535ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
536ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
537ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
538ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
539ABSOLUTE NOP_insn = 0 ; NOP instruction
540
541; Pointer to message, potentially multi-byte
542ABSOLUTE msg_buf = 0
543
544; Pointer to holding area for reselection information
545ABSOLUTE reselected_identify = 0
546ABSOLUTE reselected_tag = 0
547
548; Request sense command pointer, it's a 6 byte command, should
549; be constant for all commands since we always want 16 bytes of
550; sense and we don't need to change any fields as we did under
551; SCSI-I when we actually cared about the LUN field.
552;EXTERNAL NCR53c7xx_sense ; Request sense command
553
554
555; dsa_schedule
556; PURPOSE : after a DISCONNECT message has been received, and pointers
557; saved, insert the current DSA structure at the head of the
558; disconnected queue and fall through to the scheduler.
559;
560; CALLS : OK
561;
562; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
563; of disconnected commands
564;
565; MODIFIES : SCRATCH, reconnect_dsa_head
566;
567; EXITS : always passes control to schedule
568
569ENTRY dsa_schedule
570dsa_schedule:
571
572
573
574
575;
576; Calculate the address of the next pointer within the DSA
577; structure of the command that is currently disconnecting
578;
579
580 ; Read what should be the current DSA from memory - actual DSA
581 ; register is probably corrupt
582 MOVE MEMORY 4, saved_dsa, addr_scratch
583
584at 0x0000005e : */ 0xc0000004,0x00000000,0x00000000,
585/*
586
587
588
589 MOVE SCRATCH0 + dsa_next TO SCRATCH0
590
591at 0x00000061 : */ 0x7e343000,0x00000000,
592/*
593 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
594
595at 0x00000063 : */ 0x7f350000,0x00000000,
596/*
597 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
598
599at 0x00000065 : */ 0x7f360000,0x00000000,
600/*
601 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
602
603at 0x00000067 : */ 0x7f370000,0x00000000,
604/*
605
606; Point the next field of this DSA structure at the current disconnected
607; list
608
609 MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
610
611at 0x00000069 : */ 0xc0000004,0x00000000,0x000001b8,
612/*
613
614dsa_schedule_insert:
615 MOVE MEMORY 4, reconnect_dsa_head, 0
616
617at 0x0000006c : */ 0xc0000004,0x00000000,0x00000000,
618/*
619
620; And update the head pointer.
621
622 ; Read what should be the current DSA from memory - actual DSA
623 ; register is probably corrupt
624 MOVE MEMORY 4, saved_dsa, addr_scratch
625
626at 0x0000006f : */ 0xc0000004,0x00000000,0x00000000,
627/*
628
629
630
631
632 MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
633
634at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
635/*
636
637
638
639
640
641
642 CLEAR ACK
643
644at 0x00000075 : */ 0x60000040,0x00000000,
645/*
646
647
648 ; Time to correct DSA following memory move
649 MOVE MEMORY 4, saved_dsa, addr_dsa
650
651at 0x00000077 : */ 0xc0000004,0x00000000,0x00000000,
652/*
653
654 WAIT DISCONNECT
655
656at 0x0000007a : */ 0x48000000,0x00000000,
657/*
658
659
660
661
662
663
664 JUMP schedule
665
666at 0x0000007c : */ 0x80080000,0x00000000,
667/*
668
669
670;
671; select
672;
673; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
674; On success, the current DSA structure is removed from the issue
675; queue. Usually, this is entered as a fall-through from schedule,
676; although the contingent allegiance handling code will write
677; the select entry address to the DSP to restart a command as a
678; REQUEST SENSE. A message is sent (usually IDENTIFY, although
679; additional SDTR or WDTR messages may be sent). COMMAND OUT
680; is handled.
681;
682; INPUTS : DSA - SCSI command, issue_dsa_head
683;
684; CALLS : NOT OK
685;
686; MODIFIES : SCRATCH, issue_dsa_head
687;
688; EXITS : on reselection or selection, go to select_failed
689; otherwise, RETURN so control is passed back to
690; dsa_begin.
691;
692
693ENTRY select
694select:
695
696
697
698
699
700
701
702
703 CLEAR TARGET
704
705at 0x0000007e : */ 0x60000200,0x00000000,
706/*
707
708; XXX
709;
710; In effect, SELECTION operations are backgrounded, with execution
711; continuing until code which waits for REQ or a fatal interrupt is
712; encountered.
713;
714; So, for more performance, we could overlap the code which removes
715; the command from the NCRs issue queue with the selection, but
716; at this point I don't want to deal with the error recovery.
717;
718
719
720
721 ; Enable selection timer
722
723
724
725 MOVE CTEST7 & 0xef TO CTEST7
726
727at 0x00000080 : */ 0x7c1bef00,0x00000000,
728/*
729
730
731 SELECT ATN FROM dsa_select, select_failed
732
733at 0x00000082 : */ 0x4300003c,0x00000828,
734/*
735 JUMP select_msgout, WHEN MSG_OUT
736
737at 0x00000084 : */ 0x860b0000,0x00000218,
738/*
739ENTRY select_msgout
740select_msgout:
741
742 ; Disable selection timer
743 MOVE CTEST7 | 0x10 TO CTEST7
744
745at 0x00000086 : */ 0x7a1b1000,0x00000000,
746/*
747
748 MOVE FROM dsa_msgout, WHEN MSG_OUT
749
750at 0x00000088 : */ 0x1e000000,0x00000040,
751/*
752
753
754
755
756
757
758
759
760
761
762 RETURN
763
764at 0x0000008a : */ 0x90080000,0x00000000,
765/*
766
767;
768; select_done
769;
770; PURPOSE: continue on to normal data transfer; called as the exit
771; point from dsa_begin.
772;
773; INPUTS: dsa
774;
775; CALLS: OK
776;
777;
778
779select_done:
780
781; NOTE DSA is corrupt when we arrive here!
782 MOVE MEMORY 4, saved_dsa, addr_dsa
783
784at 0x0000008c : */ 0xc0000004,0x00000000,0x00000000,
785/*
786
787
788
789
790
791
792
793
794; After a successful selection, we should get either a CMD phase or
795; some transfer request negotiation message.
796
797 JUMP cmdout, WHEN CMD
798
799at 0x0000008f : */ 0x820b0000,0x0000025c,
800/*
801 INT int_err_unexpected_phase, WHEN NOT MSG_IN
802
803at 0x00000091 : */ 0x9f030000,0x00000000,
804/*
805
806select_msg_in:
807 CALL msg_in, WHEN MSG_IN
808
809at 0x00000093 : */ 0x8f0b0000,0x0000041c,
810/*
811 JUMP select_msg_in, WHEN MSG_IN
812
813at 0x00000095 : */ 0x870b0000,0x0000024c,
814/*
815
816cmdout:
817 INT int_err_unexpected_phase, WHEN NOT CMD
818
819at 0x00000097 : */ 0x9a030000,0x00000000,
820/*
821
822
823
824ENTRY cmdout_cmdout
825cmdout_cmdout:
826
827 MOVE FROM dsa_cmdout, WHEN CMD
828
829at 0x00000099 : */ 0x1a000000,0x00000048,
830/*
831
832
833
834
835;
836; data_transfer
837; other_out
838; other_in
839; other_transfer
840;
841; PURPOSE : handle the main data transfer for a SCSI command in
842; several parts. In the first part, data_transfer, DATA_IN
843; and DATA_OUT phases are allowed, with the user provided
844; code (usually dynamically generated based on the scatter/gather
845; list associated with a SCSI command) called to handle these
846; phases.
847;
848; After control has passed to one of the user provided
849; DATA_IN or DATA_OUT routines, back calls are made to
850; other_transfer_in or other_transfer_out to handle non-DATA IN
851; and DATA OUT phases respectively, with the state of the active
852; data pointer being preserved in TEMP.
853;
854; On completion, the user code passes control to other_transfer
855; which causes DATA_IN and DATA_OUT to result in unexpected_phase
856; interrupts so that data overruns may be trapped.
857;
858; INPUTS : DSA - SCSI command
859;
860; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
861; other_transfer
862;
863; MODIFIES : SCRATCH
864;
865; EXITS : if STATUS IN is detected, signifying command completion,
866; the NCR jumps to command_complete. If MSG IN occurs, a
867; CALL is made to msg_in. Otherwise, other_transfer runs in
868; an infinite loop.
869;
870
871ENTRY data_transfer
872data_transfer:
873 JUMP cmdout_cmdout, WHEN CMD
874
875at 0x0000009b : */ 0x820b0000,0x00000264,
876/*
877 CALL msg_in, WHEN MSG_IN
878
879at 0x0000009d : */ 0x8f0b0000,0x0000041c,
880/*
881 INT int_err_unexpected_phase, WHEN MSG_OUT
882
883at 0x0000009f : */ 0x9e0b0000,0x00000000,
884/*
885 JUMP do_dataout, WHEN DATA_OUT
886
887at 0x000000a1 : */ 0x800b0000,0x000002a4,
888/*
889 JUMP do_datain, WHEN DATA_IN
890
891at 0x000000a3 : */ 0x810b0000,0x000002fc,
892/*
893 JUMP command_complete, WHEN STATUS
894
895at 0x000000a5 : */ 0x830b0000,0x0000065c,
896/*
897 JUMP data_transfer
898
899at 0x000000a7 : */ 0x80080000,0x0000026c,
900/*
901ENTRY end_data_transfer
902end_data_transfer:
903
904;
905; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
906; should be fixed up whenever the nexus changes so it can point to the
907; correct routine for that command.
908;
909
910
911; Nasty jump to dsa->dataout
912do_dataout:
913
914 MOVE MEMORY 4, saved_dsa, addr_scratch
915
916at 0x000000a9 : */ 0xc0000004,0x00000000,0x00000000,
917/*
918
919
920
921 MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
922
923at 0x000000ac : */ 0x7e345000,0x00000000,
924/*
925 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
926
927at 0x000000ae : */ 0x7f350000,0x00000000,
928/*
929 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
930
931at 0x000000b0 : */ 0x7f360000,0x00000000,
932/*
933 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
934
935at 0x000000b2 : */ 0x7f370000,0x00000000,
936/*
937
938 MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
939
940at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
941/*
942
943dataout_to_jump:
944 MOVE MEMORY 4, 0, dataout_jump + 4
945
946at 0x000000b7 : */ 0xc0000004,0x00000000,0x000002f8,
947/*
948
949 ; Time to correct DSA following memory move
950 MOVE MEMORY 4, saved_dsa, addr_dsa
951
952at 0x000000ba : */ 0xc0000004,0x00000000,0x00000000,
953/*
954
955dataout_jump:
956 JUMP 0
957
958at 0x000000bd : */ 0x80080000,0x00000000,
959/*
960
961; Nasty jump to dsa->dsain
962do_datain:
963
964 MOVE MEMORY 4, saved_dsa, addr_scratch
965
966at 0x000000bf : */ 0xc0000004,0x00000000,0x00000000,
967/*
968
969
970
971 MOVE SCRATCH0 + dsa_datain TO SCRATCH0
972
973at 0x000000c2 : */ 0x7e345400,0x00000000,
974/*
975 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
976
977at 0x000000c4 : */ 0x7f350000,0x00000000,
978/*
979 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
980
981at 0x000000c6 : */ 0x7f360000,0x00000000,
982/*
983 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
984
985at 0x000000c8 : */ 0x7f370000,0x00000000,
986/*
987
988 MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
989
990at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
991/*
992
993ENTRY datain_to_jump
994datain_to_jump:
995 MOVE MEMORY 4, 0, datain_jump + 4
996
997at 0x000000cd : */ 0xc0000004,0x00000000,0x00000350,
998/*
999
1000 ; Time to correct DSA following memory move
1001 MOVE MEMORY 4, saved_dsa, addr_dsa
1002
1003at 0x000000d0 : */ 0xc0000004,0x00000000,0x00000000,
1004/*
1005
1006
1007
1008
1009datain_jump:
1010 JUMP 0
1011
1012at 0x000000d3 : */ 0x80080000,0x00000000,
1013/*
1014
1015
1016
1017; Note that other_out and other_in loop until a non-data phase
1018; is discovered, so we only execute return statements when we
1019; can go on to the next data phase block move statement.
1020
1021ENTRY other_out
1022other_out:
1023
1024
1025
1026 INT int_err_unexpected_phase, WHEN CMD
1027
1028at 0x000000d5 : */ 0x9a0b0000,0x00000000,
1029/*
1030 JUMP msg_in_restart, WHEN MSG_IN
1031
1032at 0x000000d7 : */ 0x870b0000,0x000003fc,
1033/*
1034 INT int_err_unexpected_phase, WHEN MSG_OUT
1035
1036at 0x000000d9 : */ 0x9e0b0000,0x00000000,
1037/*
1038 INT int_err_unexpected_phase, WHEN DATA_IN
1039
1040at 0x000000db : */ 0x990b0000,0x00000000,
1041/*
1042 JUMP command_complete, WHEN STATUS
1043
1044at 0x000000dd : */ 0x830b0000,0x0000065c,
1045/*
1046 JUMP other_out, WHEN NOT DATA_OUT
1047
1048at 0x000000df : */ 0x80030000,0x00000354,
1049/*
1050
1051; TEMP should be OK, as we got here from a call in the user dataout code.
1052
1053 RETURN
1054
1055at 0x000000e1 : */ 0x90080000,0x00000000,
1056/*
1057
1058ENTRY other_in
1059other_in:
1060
1061
1062
1063 INT int_err_unexpected_phase, WHEN CMD
1064
1065at 0x000000e3 : */ 0x9a0b0000,0x00000000,
1066/*
1067 JUMP msg_in_restart, WHEN MSG_IN
1068
1069at 0x000000e5 : */ 0x870b0000,0x000003fc,
1070/*
1071 INT int_err_unexpected_phase, WHEN MSG_OUT
1072
1073at 0x000000e7 : */ 0x9e0b0000,0x00000000,
1074/*
1075 INT int_err_unexpected_phase, WHEN DATA_OUT
1076
1077at 0x000000e9 : */ 0x980b0000,0x00000000,
1078/*
1079 JUMP command_complete, WHEN STATUS
1080
1081at 0x000000eb : */ 0x830b0000,0x0000065c,
1082/*
1083 JUMP other_in, WHEN NOT DATA_IN
1084
1085at 0x000000ed : */ 0x81030000,0x0000038c,
1086/*
1087
1088; TEMP should be OK, as we got here from a call in the user datain code.
1089
1090 RETURN
1091
1092at 0x000000ef : */ 0x90080000,0x00000000,
1093/*
1094
1095
1096ENTRY other_transfer
1097other_transfer:
1098 INT int_err_unexpected_phase, WHEN CMD
1099
1100at 0x000000f1 : */ 0x9a0b0000,0x00000000,
1101/*
1102 CALL msg_in, WHEN MSG_IN
1103
1104at 0x000000f3 : */ 0x8f0b0000,0x0000041c,
1105/*
1106 INT int_err_unexpected_phase, WHEN MSG_OUT
1107
1108at 0x000000f5 : */ 0x9e0b0000,0x00000000,
1109/*
1110 INT int_err_unexpected_phase, WHEN DATA_OUT
1111
1112at 0x000000f7 : */ 0x980b0000,0x00000000,
1113/*
1114 INT int_err_unexpected_phase, WHEN DATA_IN
1115
1116at 0x000000f9 : */ 0x990b0000,0x00000000,
1117/*
1118 JUMP command_complete, WHEN STATUS
1119
1120at 0x000000fb : */ 0x830b0000,0x0000065c,
1121/*
1122 JUMP other_transfer
1123
1124at 0x000000fd : */ 0x80080000,0x000003c4,
1125/*
1126
1127;
1128; msg_in_restart
1129; msg_in
1130; munge_msg
1131;
1132; PURPOSE : process messages from a target. msg_in is called when the
1133; caller hasn't read the first byte of the message. munge_message
1134; is called when the caller has read the first byte of the message,
1135; and left it in SFBR. msg_in_restart is called when the caller
1136; hasn't read the first byte of the message, and wishes RETURN
1137; to transfer control back to the address of the conditional
1138; CALL instruction rather than to the instruction after it.
1139;
1140; Various int_* interrupts are generated when the host system
1141; needs to intervene, as is the case with SDTR, WDTR, and
1142; INITIATE RECOVERY messages.
1143;
1144; When the host system handles one of these interrupts,
1145; it can respond by reentering at reject_message,
1146; which rejects the message and returns control to
1147; the caller of msg_in or munge_msg, accept_message
1148; which clears ACK and returns control, or reply_message
1149; which sends the message pointed to by the DSA
1150; msgout_other table indirect field.
1151;
1152; DISCONNECT messages are handled by moving the command
1153; to the reconnect_dsa_queue.
1154
1155; NOTE: DSA should be valid when we get here - we cannot save both it
1156; and TEMP in this routine.
1157
1158;
1159; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
1160; only)
1161;
1162; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
1163;
1164; MODIFIES : SCRATCH, DSA on DISCONNECT
1165;
1166; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
1167; and normal return from message handlers running under
1168; Linux, control is returned to the caller. Receipt
1169; of DISCONNECT messages pass control to dsa_schedule.
1170;
1171ENTRY msg_in_restart
1172msg_in_restart:
1173; XXX - hackish
1174;
1175; Since it's easier to debug changes to the statically
1176; compiled code, rather than the dynamically generated
1177; stuff, such as
1178;
1179; MOVE x, y, WHEN data_phase
1180; CALL other_z, WHEN NOT data_phase
1181; MOVE x, y, WHEN data_phase
1182;
1183; I'd like to have certain routines (notably the message handler)
1184; restart on the conditional call rather than the next instruction.
1185;
1186; So, subtract 8 from the return address
1187
1188 MOVE TEMP0 + 0xf8 TO TEMP0
1189
1190at 0x000000ff : */ 0x7e1cf800,0x00000000,
1191/*
1192 MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
1193
1194at 0x00000101 : */ 0x7f1dff00,0x00000000,
1195/*
1196 MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
1197
1198at 0x00000103 : */ 0x7f1eff00,0x00000000,
1199/*
1200 MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
1201
1202at 0x00000105 : */ 0x7f1fff00,0x00000000,
1203/*
1204
1205ENTRY msg_in
1206msg_in:
1207 MOVE 1, msg_buf, WHEN MSG_IN
1208
1209at 0x00000107 : */ 0x0f000001,0x00000000,
1210/*
1211
1212munge_msg:
1213 JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
1214
1215at 0x00000109 : */ 0x800c0001,0x00000574,
1216/*
1217 JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
1218
1219at 0x0000010b : */ 0x800cdf20,0x00000464,
1220/*
1221;
1222; XXX - I've seen a handful of broken SCSI devices which fail to issue
1223; a SAVE POINTERS message before disconnecting in the middle of
1224; a transfer, assuming that the DATA POINTER will be implicitly
1225; restored.
1226;
1227; Historically, I've often done an implicit save when the DISCONNECT
1228; message is processed. We may want to consider having the option of
1229; doing that here.
1230;
1231 JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
1232
1233at 0x0000010d : */ 0x800c0002,0x0000046c,
1234/*
1235 JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
1236
1237at 0x0000010f : */ 0x800c0003,0x00000518,
1238/*
1239 JUMP munge_disconnect, IF 0x04 ; DISCONNECT
1240
1241at 0x00000111 : */ 0x800c0004,0x0000056c,
1242/*
1243 INT int_msg_1, IF 0x07 ; MESSAGE REJECT
1244
1245at 0x00000113 : */ 0x980c0007,0x01020000,
1246/*
1247 INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
1248
1249at 0x00000115 : */ 0x980c000f,0x01020000,
1250/*
1251
1252
1253
1254 JUMP reject_message
1255
1256at 0x00000117 : */ 0x80080000,0x00000604,
1257/*
1258
1259munge_2:
1260 JUMP reject_message
1261
1262at 0x00000119 : */ 0x80080000,0x00000604,
1263/*
1264;
1265; The SCSI standard allows targets to recover from transient
1266; error conditions by backing up the data pointer with a
1267; RESTORE POINTERS message.
1268;
1269; So, we must save and restore the _residual_ code as well as
1270; the current instruction pointer. Because of this messiness,
1271; it is simpler to put dynamic code in the dsa for this and to
1272; just do a simple jump down there.
1273;
1274
1275munge_save_data_pointer:
1276
1277 ; We have something in TEMP here, so first we must save that
1278 MOVE TEMP0 TO SFBR
1279
1280at 0x0000011b : */ 0x721c0000,0x00000000,
1281/*
1282 MOVE SFBR TO SCRATCH0
1283
1284at 0x0000011d : */ 0x6a340000,0x00000000,
1285/*
1286 MOVE TEMP1 TO SFBR
1287
1288at 0x0000011f : */ 0x721d0000,0x00000000,
1289/*
1290 MOVE SFBR TO SCRATCH1
1291
1292at 0x00000121 : */ 0x6a350000,0x00000000,
1293/*
1294 MOVE TEMP2 TO SFBR
1295
1296at 0x00000123 : */ 0x721e0000,0x00000000,
1297/*
1298 MOVE SFBR TO SCRATCH2
1299
1300at 0x00000125 : */ 0x6a360000,0x00000000,
1301/*
1302 MOVE TEMP3 TO SFBR
1303
1304at 0x00000127 : */ 0x721f0000,0x00000000,
1305/*
1306 MOVE SFBR TO SCRATCH3
1307
1308at 0x00000129 : */ 0x6a370000,0x00000000,
1309/*
1310 MOVE MEMORY 4, addr_scratch, jump_temp + 4
1311
1312at 0x0000012b : */ 0xc0000004,0x00000000,0x000009c8,
1313/*
1314 ; Now restore DSA
1315 MOVE MEMORY 4, saved_dsa, addr_dsa
1316
1317at 0x0000012e : */ 0xc0000004,0x00000000,0x00000000,
1318/*
1319
1320 MOVE DSA0 + dsa_save_data_pointer TO SFBR
1321
1322at 0x00000131 : */ 0x76100000,0x00000000,
1323/*
1324 MOVE SFBR TO SCRATCH0
1325
1326at 0x00000133 : */ 0x6a340000,0x00000000,
1327/*
1328 MOVE DSA1 + 0xff TO SFBR WITH CARRY
1329
1330at 0x00000135 : */ 0x7711ff00,0x00000000,
1331/*
1332 MOVE SFBR TO SCRATCH1
1333
1334at 0x00000137 : */ 0x6a350000,0x00000000,
1335/*
1336 MOVE DSA2 + 0xff TO SFBR WITH CARRY
1337
1338at 0x00000139 : */ 0x7712ff00,0x00000000,
1339/*
1340 MOVE SFBR TO SCRATCH2
1341
1342at 0x0000013b : */ 0x6a360000,0x00000000,
1343/*
1344 MOVE DSA3 + 0xff TO SFBR WITH CARRY
1345
1346at 0x0000013d : */ 0x7713ff00,0x00000000,
1347/*
1348 MOVE SFBR TO SCRATCH3
1349
1350at 0x0000013f : */ 0x6a370000,0x00000000,
1351/*
1352
1353
1354 MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
1355
1356at 0x00000141 : */ 0xc0000004,0x00000000,0x00000514,
1357/*
1358
1359jump_dsa_save:
1360 JUMP 0
1361
1362at 0x00000144 : */ 0x80080000,0x00000000,
1363/*
1364
1365munge_restore_pointers:
1366
1367 ; The code at dsa_restore_pointers will RETURN, but we don't care
1368 ; about TEMP here, as it will overwrite it anyway.
1369
1370 MOVE DSA0 + dsa_restore_pointers TO SFBR
1371
1372at 0x00000146 : */ 0x76100000,0x00000000,
1373/*
1374 MOVE SFBR TO SCRATCH0
1375
1376at 0x00000148 : */ 0x6a340000,0x00000000,
1377/*
1378 MOVE DSA1 + 0xff TO SFBR WITH CARRY
1379
1380at 0x0000014a : */ 0x7711ff00,0x00000000,
1381/*
1382 MOVE SFBR TO SCRATCH1
1383
1384at 0x0000014c : */ 0x6a350000,0x00000000,
1385/*
1386 MOVE DSA2 + 0xff TO SFBR WITH CARRY
1387
1388at 0x0000014e : */ 0x7712ff00,0x00000000,
1389/*
1390 MOVE SFBR TO SCRATCH2
1391
1392at 0x00000150 : */ 0x6a360000,0x00000000,
1393/*
1394 MOVE DSA3 + 0xff TO SFBR WITH CARRY
1395
1396at 0x00000152 : */ 0x7713ff00,0x00000000,
1397/*
1398 MOVE SFBR TO SCRATCH3
1399
1400at 0x00000154 : */ 0x6a370000,0x00000000,
1401/*
1402
1403
1404 MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
1405
1406at 0x00000156 : */ 0xc0000004,0x00000000,0x00000568,
1407/*
1408
1409jump_dsa_restore:
1410 JUMP 0
1411
1412at 0x00000159 : */ 0x80080000,0x00000000,
1413/*
1414
1415
1416munge_disconnect:
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 JUMP dsa_schedule
1438
1439at 0x0000015b : */ 0x80080000,0x00000178,
1440/*
1441
1442
1443
1444
1445
1446munge_extended:
1447 CLEAR ACK
1448
1449at 0x0000015d : */ 0x60000040,0x00000000,
1450/*
1451 INT int_err_unexpected_phase, WHEN NOT MSG_IN
1452
1453at 0x0000015f : */ 0x9f030000,0x00000000,
1454/*
1455 MOVE 1, msg_buf + 1, WHEN MSG_IN
1456
1457at 0x00000161 : */ 0x0f000001,0x00000001,
1458/*
1459 JUMP munge_extended_2, IF 0x02
1460
1461at 0x00000163 : */ 0x800c0002,0x000005a4,
1462/*
1463 JUMP munge_extended_3, IF 0x03
1464
1465at 0x00000165 : */ 0x800c0003,0x000005d4,
1466/*
1467 JUMP reject_message
1468
1469at 0x00000167 : */ 0x80080000,0x00000604,
1470/*
1471
1472munge_extended_2:
1473 CLEAR ACK
1474
1475at 0x00000169 : */ 0x60000040,0x00000000,
1476/*
1477 MOVE 1, msg_buf + 2, WHEN MSG_IN
1478
1479at 0x0000016b : */ 0x0f000001,0x00000002,
1480/*
1481 JUMP reject_message, IF NOT 0x02 ; Must be WDTR
1482
1483at 0x0000016d : */ 0x80040002,0x00000604,
1484/*
1485 CLEAR ACK
1486
1487at 0x0000016f : */ 0x60000040,0x00000000,
1488/*
1489 MOVE 1, msg_buf + 3, WHEN MSG_IN
1490
1491at 0x00000171 : */ 0x0f000001,0x00000003,
1492/*
1493 INT int_msg_wdtr
1494
1495at 0x00000173 : */ 0x98080000,0x01000000,
1496/*
1497
1498munge_extended_3:
1499 CLEAR ACK
1500
1501at 0x00000175 : */ 0x60000040,0x00000000,
1502/*
1503 MOVE 1, msg_buf + 2, WHEN MSG_IN
1504
1505at 0x00000177 : */ 0x0f000001,0x00000002,
1506/*
1507 JUMP reject_message, IF NOT 0x01 ; Must be SDTR
1508
1509at 0x00000179 : */ 0x80040001,0x00000604,
1510/*
1511 CLEAR ACK
1512
1513at 0x0000017b : */ 0x60000040,0x00000000,
1514/*
1515 MOVE 2, msg_buf + 3, WHEN MSG_IN
1516
1517at 0x0000017d : */ 0x0f000002,0x00000003,
1518/*
1519 INT int_msg_sdtr
1520
1521at 0x0000017f : */ 0x98080000,0x01010000,
1522/*
1523
1524ENTRY reject_message
1525reject_message:
1526 SET ATN
1527
1528at 0x00000181 : */ 0x58000008,0x00000000,
1529/*
1530 CLEAR ACK
1531
1532at 0x00000183 : */ 0x60000040,0x00000000,
1533/*
1534 MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
1535
1536at 0x00000185 : */ 0x0e000001,0x00000000,
1537/*
1538 RETURN
1539
1540at 0x00000187 : */ 0x90080000,0x00000000,
1541/*
1542
1543ENTRY accept_message
1544accept_message:
1545 CLEAR ATN
1546
1547at 0x00000189 : */ 0x60000008,0x00000000,
1548/*
1549 CLEAR ACK
1550
1551at 0x0000018b : */ 0x60000040,0x00000000,
1552/*
1553 RETURN
1554
1555at 0x0000018d : */ 0x90080000,0x00000000,
1556/*
1557
1558ENTRY respond_message
1559respond_message:
1560 SET ATN
1561
1562at 0x0000018f : */ 0x58000008,0x00000000,
1563/*
1564 CLEAR ACK
1565
1566at 0x00000191 : */ 0x60000040,0x00000000,
1567/*
1568 MOVE FROM dsa_msgout_other, WHEN MSG_OUT
1569
1570at 0x00000193 : */ 0x1e000000,0x00000068,
1571/*
1572 RETURN
1573
1574at 0x00000195 : */ 0x90080000,0x00000000,
1575/*
1576
1577;
1578; command_complete
1579;
1580; PURPOSE : handle command termination when STATUS IN is detected by reading
1581; a status byte followed by a command termination message.
1582;
1583; Normal termination results in an INTFLY instruction, and
1584; the host system can pick out which command terminated by
1585; examining the MESSAGE and STATUS buffers of all currently
1586; executing commands;
1587;
1588; Abnormal (CHECK_CONDITION) termination results in an
1589; int_err_check_condition interrupt so that a REQUEST SENSE
1590; command can be issued out-of-order so that no other command
1591; clears the contingent allegiance condition.
1592;
1593;
1594; INPUTS : DSA - command
1595;
1596; CALLS : OK
1597;
1598; EXITS : On successful termination, control is passed to schedule.
1599; On abnormal termination, the user will usually modify the
1600; DSA fields and corresponding buffers and return control
1601; to select.
1602;
1603
1604ENTRY command_complete
1605command_complete:
1606 MOVE FROM dsa_status, WHEN STATUS
1607
1608at 0x00000197 : */ 0x1b000000,0x00000060,
1609/*
1610
1611 MOVE SFBR TO SCRATCH0 ; Save status
1612
1613at 0x00000199 : */ 0x6a340000,0x00000000,
1614/*
1615
1616ENTRY command_complete_msgin
1617command_complete_msgin:
1618 MOVE FROM dsa_msgin, WHEN MSG_IN
1619
1620at 0x0000019b : */ 0x1f000000,0x00000058,
1621/*
1622; Indicate that we should be expecting a disconnect
1623
1624
1625
1626 ; Above code cleared the Unexpected Disconnect bit, what do we do?
1627
1628 CLEAR ACK
1629
1630at 0x0000019d : */ 0x60000040,0x00000000,
1631/*
1632
1633 WAIT DISCONNECT
1634
1635at 0x0000019f : */ 0x48000000,0x00000000,
1636/*
1637
1638;
1639; The SCSI specification states that when a UNIT ATTENTION condition
1640; is pending, as indicated by a CHECK CONDITION status message,
1641; the target shall revert to asynchronous transfers. Since
1642; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
1643; basis, and returning control to our scheduler could work on a command
1644; running on another lun on that target using the old parameters, we must
1645; interrupt the host processor to get them changed, or change them ourselves.
1646;
1647; Once SCSI-II tagged queueing is implemented, things will be even more
1648; hairy, since contingent allegiance conditions exist on a per-target/lun
1649; basis, and issuing a new command with a different tag would clear it.
1650; In these cases, we must interrupt the host processor to get a request
1651; added to the HEAD of the queue with the request sense command, or we
1652; must automatically issue the request sense command.
1653
1654
1655
1656
1657
1658
1659
1660 INT int_norm_emulateintfly
1661
1662at 0x000001a1 : */ 0x98080000,0x02060000,
1663/*
1664
1665
1666
1667
1668
1669
1670 ; Time to correct DSA following memory move
1671 MOVE MEMORY 4, saved_dsa, addr_dsa
1672
1673at 0x000001a3 : */ 0xc0000004,0x00000000,0x00000000,
1674/*
1675
1676
1677
1678
1679
1680 JUMP schedule
1681
1682at 0x000001a6 : */ 0x80080000,0x00000000,
1683/*
1684command_failed:
1685 INT int_err_check_condition
1686
1687at 0x000001a8 : */ 0x98080000,0x00030000,
1688/*
1689
1690
1691
1692
1693;
1694; wait_reselect
1695;
1696; PURPOSE : This is essentially the idle routine, where control lands
1697; when there are no new processes to schedule. wait_reselect
1698; waits for reselection, selection, and new commands.
1699;
1700; When a successful reselection occurs, with the aid
1701; of fixed up code in each DSA, wait_reselect walks the
1702; reconnect_dsa_queue, asking each dsa if the target ID
1703; and LUN match its.
1704;
1705; If a match is found, a call is made back to reselected_ok,
1706; which through the miracles of self modifying code, extracts
1707; the found DSA from the reconnect_dsa_queue and then
1708; returns control to the DSAs thread of execution.
1709;
1710; INPUTS : NONE
1711;
1712; CALLS : OK
1713;
1714; MODIFIES : DSA,
1715;
1716; EXITS : On successful reselection, control is returned to the
1717; DSA which called reselected_ok. If the WAIT RESELECT
1718; was interrupted by a new commands arrival signaled by
1719; SIG_P, control is passed to schedule. If the NCR is
1720; selected, the host system is interrupted with an
1721; int_err_selected which is usually responded to by
1722; setting DSP to the target_abort address.
1723
1724ENTRY wait_reselect
1725wait_reselect:
1726
1727
1728
1729
1730
1731
1732 WAIT RESELECT wait_reselect_failed
1733
1734at 0x000001aa : */ 0x50000000,0x00000800,
1735/*
1736
1737reselected:
1738
1739
1740
1741 CLEAR TARGET
1742
1743at 0x000001ac : */ 0x60000200,0x00000000,
1744/*
1745
1746 ; Read all data needed to reestablish the nexus -
1747 MOVE 1, reselected_identify, WHEN MSG_IN
1748
1749at 0x000001ae : */ 0x0f000001,0x00000000,
1750/*
1751 ; We used to CLEAR ACK here.
1752
1753
1754
1755
1756
1757 ; Point DSA at the current head of the disconnected queue.
1758
1759 MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
1760
1761at 0x000001b0 : */ 0xc0000004,0x00000000,0x00000000,
1762/*
1763
1764
1765 MOVE MEMORY 4, addr_scratch, saved_dsa
1766
1767at 0x000001b3 : */ 0xc0000004,0x00000000,0x00000000,
1768/*
1769
1770
1771
1772
1773 ; Fix the update-next pointer so that the reconnect_dsa_head
1774 ; pointer is the one that will be updated if this DSA is a hit
1775 ; and we remove it from the queue.
1776
1777 MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
1778
1779at 0x000001b6 : */ 0xc0000004,0x00000000,0x000007ec,
1780/*
1781
1782 ; Time to correct DSA following memory move
1783 MOVE MEMORY 4, saved_dsa, addr_dsa
1784
1785at 0x000001b9 : */ 0xc0000004,0x00000000,0x00000000,
1786/*
1787
1788
1789ENTRY reselected_check_next
1790reselected_check_next:
1791
1792
1793
1794 ; Check for a NULL pointer.
1795 MOVE DSA0 TO SFBR
1796
1797at 0x000001bc : */ 0x72100000,0x00000000,
1798/*
1799 JUMP reselected_not_end, IF NOT 0
1800
1801at 0x000001be : */ 0x80040000,0x00000738,
1802/*
1803 MOVE DSA1 TO SFBR
1804
1805at 0x000001c0 : */ 0x72110000,0x00000000,
1806/*
1807 JUMP reselected_not_end, IF NOT 0
1808
1809at 0x000001c2 : */ 0x80040000,0x00000738,
1810/*
1811 MOVE DSA2 TO SFBR
1812
1813at 0x000001c4 : */ 0x72120000,0x00000000,
1814/*
1815 JUMP reselected_not_end, IF NOT 0
1816
1817at 0x000001c6 : */ 0x80040000,0x00000738,
1818/*
1819 MOVE DSA3 TO SFBR
1820
1821at 0x000001c8 : */ 0x72130000,0x00000000,
1822/*
1823 JUMP reselected_not_end, IF NOT 0
1824
1825at 0x000001ca : */ 0x80040000,0x00000738,
1826/*
1827 INT int_err_unexpected_reselect
1828
1829at 0x000001cc : */ 0x98080000,0x00020000,
1830/*
1831
1832reselected_not_end:
1833 ;
1834 ; XXX the ALU is only eight bits wide, and the assembler
1835 ; wont do the dirt work for us. As long as dsa_check_reselect
1836 ; is negative, we need to sign extend with 1 bits to the full
1837 ; 32 bit width of the address.
1838 ;
1839 ; A potential work around would be to have a known alignment
1840 ; of the DSA structure such that the base address plus
1841 ; dsa_check_reselect doesn't require carrying from bytes
1842 ; higher than the LSB.
1843 ;
1844
1845 MOVE DSA0 TO SFBR
1846
1847at 0x000001ce : */ 0x72100000,0x00000000,
1848/*
1849 MOVE SFBR + dsa_check_reselect TO SCRATCH0
1850
1851at 0x000001d0 : */ 0x6e340000,0x00000000,
1852/*
1853 MOVE DSA1 TO SFBR
1854
1855at 0x000001d2 : */ 0x72110000,0x00000000,
1856/*
1857 MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
1858
1859at 0x000001d4 : */ 0x6f35ff00,0x00000000,
1860/*
1861 MOVE DSA2 TO SFBR
1862
1863at 0x000001d6 : */ 0x72120000,0x00000000,
1864/*
1865 MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
1866
1867at 0x000001d8 : */ 0x6f36ff00,0x00000000,
1868/*
1869 MOVE DSA3 TO SFBR
1870
1871at 0x000001da : */ 0x72130000,0x00000000,
1872/*
1873 MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
1874
1875at 0x000001dc : */ 0x6f37ff00,0x00000000,
1876/*
1877
1878
1879 MOVE MEMORY 4, addr_scratch, reselected_check + 4
1880
1881at 0x000001de : */ 0xc0000004,0x00000000,0x00000794,
1882/*
1883
1884
1885 ; Time to correct DSA following memory move
1886 MOVE MEMORY 4, saved_dsa, addr_dsa
1887
1888at 0x000001e1 : */ 0xc0000004,0x00000000,0x00000000,
1889/*
1890
1891reselected_check:
1892 JUMP 0
1893
1894at 0x000001e4 : */ 0x80080000,0x00000000,
1895/*
1896
1897
1898;
1899;
1900
1901; We have problems here - the memory move corrupts TEMP and DSA. This
1902; routine is called from DSA code, and patched from many places. Scratch
1903; is probably free when it is called.
1904; We have to:
1905; copy temp to scratch, one byte at a time
1906; write scratch to patch a jump in place of the return
1907; do the move memory
1908; jump to the patched in return address
1909; DSA is corrupt when we get here, and can be left corrupt
1910
1911ENTRY reselected_ok
1912reselected_ok:
1913 MOVE TEMP0 TO SFBR
1914
1915at 0x000001e6 : */ 0x721c0000,0x00000000,
1916/*
1917 MOVE SFBR TO SCRATCH0
1918
1919at 0x000001e8 : */ 0x6a340000,0x00000000,
1920/*
1921 MOVE TEMP1 TO SFBR
1922
1923at 0x000001ea : */ 0x721d0000,0x00000000,
1924/*
1925 MOVE SFBR TO SCRATCH1
1926
1927at 0x000001ec : */ 0x6a350000,0x00000000,
1928/*
1929 MOVE TEMP2 TO SFBR
1930
1931at 0x000001ee : */ 0x721e0000,0x00000000,
1932/*
1933 MOVE SFBR TO SCRATCH2
1934
1935at 0x000001f0 : */ 0x6a360000,0x00000000,
1936/*
1937 MOVE TEMP3 TO SFBR
1938
1939at 0x000001f2 : */ 0x721f0000,0x00000000,
1940/*
1941 MOVE SFBR TO SCRATCH3
1942
1943at 0x000001f4 : */ 0x6a370000,0x00000000,
1944/*
1945 MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
1946
1947at 0x000001f6 : */ 0xc0000004,0x00000000,0x000007f4,
1948/*
1949reselected_ok_patch:
1950 MOVE MEMORY 4, 0, 0
1951
1952at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
1953/*
1954reselected_ok_jump:
1955 JUMP 0
1956
1957at 0x000001fc : */ 0x80080000,0x00000000,
1958/*
1959
1960
1961
1962
1963
1964selected:
1965 INT int_err_selected;
1966
1967at 0x000001fe : */ 0x98080000,0x00010000,
1968/*
1969
1970;
1971; A select or reselect failure can be caused by one of two conditions :
1972; 1. SIG_P was set. This will be the case if the user has written
1973; a new value to a previously NULL head of the issue queue.
1974;
1975; 2. The NCR53c810 was selected or reselected by another device.
1976;
1977; 3. The bus was already busy since we were selected or reselected
1978; before starting the command.
1979
1980wait_reselect_failed:
1981
1982
1983
1984; Check selected bit.
1985
1986 ; Must work out how to tell if we are selected....
1987
1988
1989
1990
1991; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
1992 MOVE CTEST2 & 0x40 TO SFBR
1993
1994at 0x00000200 : */ 0x74164000,0x00000000,
1995/*
1996 JUMP schedule, IF 0x40
1997
1998at 0x00000202 : */ 0x800c0040,0x00000000,
1999/*
2000; Check connected bit.
2001; FIXME: this needs to change if we support target mode
2002 MOVE ISTAT & 0x08 TO SFBR
2003
2004at 0x00000204 : */ 0x74210800,0x00000000,
2005/*
2006 JUMP reselected, IF 0x08
2007
2008at 0x00000206 : */ 0x800c0008,0x000006b0,
2009/*
2010; FIXME : Something bogus happened, and we shouldn't fail silently.
2011
2012
2013
2014 INT int_debug_panic
2015
2016at 0x00000208 : */ 0x98080000,0x030b0000,
2017/*
2018
2019
2020
2021select_failed:
2022
2023 ; Disable selection timer
2024 MOVE CTEST7 | 0x10 TO CTEST7
2025
2026at 0x0000020a : */ 0x7a1b1000,0x00000000,
2027/*
2028
2029
2030
2031
2032; Otherwise, mask the selected and reselected bits off SIST0
2033
2034 ; Let's assume we don't get selected for now
2035 MOVE SSTAT0 & 0x10 TO SFBR
2036
2037at 0x0000020c : */ 0x740d1000,0x00000000,
2038/*
2039
2040
2041
2042
2043 JUMP reselected, IF 0x10
2044
2045at 0x0000020e : */ 0x800c0010,0x000006b0,
2046/*
2047; If SIGP is set, the user just gave us another command, and
2048; we should restart or return to the scheduler.
2049; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
2050 MOVE CTEST2 & 0x40 TO SFBR
2051
2052at 0x00000210 : */ 0x74164000,0x00000000,
2053/*
2054 JUMP select, IF 0x40
2055
2056at 0x00000212 : */ 0x800c0040,0x000001f8,
2057/*
2058; Check connected bit.
2059; FIXME: this needs to change if we support target mode
2060; FIXME: is this really necessary?
2061 MOVE ISTAT & 0x08 TO SFBR
2062
2063at 0x00000214 : */ 0x74210800,0x00000000,
2064/*
2065 JUMP reselected, IF 0x08
2066
2067at 0x00000216 : */ 0x800c0008,0x000006b0,
2068/*
2069; FIXME : Something bogus happened, and we shouldn't fail silently.
2070
2071
2072
2073 INT int_debug_panic
2074
2075at 0x00000218 : */ 0x98080000,0x030b0000,
2076/*
2077
2078
2079;
2080; test_1
2081; test_2
2082;
2083; PURPOSE : run some verification tests on the NCR. test_1
2084; copies test_src to test_dest and interrupts the host
2085; processor, testing for cache coherency and interrupt
2086; problems in the processes.
2087;
2088; test_2 runs a command with offsets relative to the
2089; DSA on entry, and is useful for miscellaneous experimentation.
2090;
2091
2092; Verify that interrupts are working correctly and that we don't
2093; have a cache invalidation problem.
2094
2095ABSOLUTE test_src = 0, test_dest = 0
2096ENTRY test_1
2097test_1:
2098 MOVE MEMORY 4, test_src, test_dest
2099
2100at 0x0000021a : */ 0xc0000004,0x00000000,0x00000000,
2101/*
2102 INT int_test_1
2103
2104at 0x0000021d : */ 0x98080000,0x04000000,
2105/*
2106
2107;
2108; Run arbitrary commands, with test code establishing a DSA
2109;
2110
2111ENTRY test_2
2112test_2:
2113 CLEAR TARGET
2114
2115at 0x0000021f : */ 0x60000200,0x00000000,
2116/*
2117
2118 ; Enable selection timer
2119
2120
2121
2122 MOVE CTEST7 & 0xef TO CTEST7
2123
2124at 0x00000221 : */ 0x7c1bef00,0x00000000,
2125/*
2126
2127
2128 SELECT ATN FROM 0, test_2_fail
2129
2130at 0x00000223 : */ 0x43000000,0x000008dc,
2131/*
2132 JUMP test_2_msgout, WHEN MSG_OUT
2133
2134at 0x00000225 : */ 0x860b0000,0x0000089c,
2135/*
2136ENTRY test_2_msgout
2137test_2_msgout:
2138
2139 ; Disable selection timer
2140 MOVE CTEST7 | 0x10 TO CTEST7
2141
2142at 0x00000227 : */ 0x7a1b1000,0x00000000,
2143/*
2144
2145 MOVE FROM 8, WHEN MSG_OUT
2146
2147at 0x00000229 : */ 0x1e000000,0x00000008,
2148/*
2149 MOVE FROM 16, WHEN CMD
2150
2151at 0x0000022b : */ 0x1a000000,0x00000010,
2152/*
2153 MOVE FROM 24, WHEN DATA_IN
2154
2155at 0x0000022d : */ 0x19000000,0x00000018,
2156/*
2157 MOVE FROM 32, WHEN STATUS
2158
2159at 0x0000022f : */ 0x1b000000,0x00000020,
2160/*
2161 MOVE FROM 40, WHEN MSG_IN
2162
2163at 0x00000231 : */ 0x1f000000,0x00000028,
2164/*
2165
2166
2167
2168 CLEAR ACK
2169
2170at 0x00000233 : */ 0x60000040,0x00000000,
2171/*
2172 WAIT DISCONNECT
2173
2174at 0x00000235 : */ 0x48000000,0x00000000,
2175/*
2176test_2_fail:
2177
2178 ; Disable selection timer
2179 MOVE CTEST7 | 0x10 TO CTEST7
2180
2181at 0x00000237 : */ 0x7a1b1000,0x00000000,
2182/*
2183
2184 INT int_test_2
2185
2186at 0x00000239 : */ 0x98080000,0x04010000,
2187/*
2188
2189ENTRY debug_break
2190debug_break:
2191 INT int_debug_break
2192
2193at 0x0000023b : */ 0x98080000,0x03000000,
2194/*
2195
2196;
2197; initiator_abort
2198; target_abort
2199;
2200; PURPOSE : Abort the currently established nexus from with initiator
2201; or target mode.
2202;
2203;
2204
2205ENTRY target_abort
2206target_abort:
2207 SET TARGET
2208
2209at 0x0000023d : */ 0x58000200,0x00000000,
2210/*
2211 DISCONNECT
2212
2213at 0x0000023f : */ 0x48000000,0x00000000,
2214/*
2215 CLEAR TARGET
2216
2217at 0x00000241 : */ 0x60000200,0x00000000,
2218/*
2219 JUMP schedule
2220
2221at 0x00000243 : */ 0x80080000,0x00000000,
2222/*
2223
2224ENTRY initiator_abort
2225initiator_abort:
2226 SET ATN
2227
2228at 0x00000245 : */ 0x58000008,0x00000000,
2229/*
2230;
2231; The SCSI-I specification says that targets may go into MSG out at
2232; their leisure upon receipt of the ATN single. On all versions of the
2233; specification, we can't change phases until REQ transitions true->false,
2234; so we need to sink/source one byte of data to allow the transition.
2235;
2236; For the sake of safety, we'll only source one byte of data in all
2237; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
2238; arbitrary number of bytes.
2239 JUMP spew_cmd, WHEN CMD
2240
2241at 0x00000247 : */ 0x820b0000,0x0000094c,
2242/*
2243 JUMP eat_msgin, WHEN MSG_IN
2244
2245at 0x00000249 : */ 0x870b0000,0x0000095c,
2246/*
2247 JUMP eat_datain, WHEN DATA_IN
2248
2249at 0x0000024b : */ 0x810b0000,0x0000098c,
2250/*
2251 JUMP eat_status, WHEN STATUS
2252
2253at 0x0000024d : */ 0x830b0000,0x00000974,
2254/*
2255 JUMP spew_dataout, WHEN DATA_OUT
2256
2257at 0x0000024f : */ 0x800b0000,0x000009a4,
2258/*
2259 JUMP sated
2260
2261at 0x00000251 : */ 0x80080000,0x000009ac,
2262/*
2263spew_cmd:
2264 MOVE 1, NCR53c7xx_zero, WHEN CMD
2265
2266at 0x00000253 : */ 0x0a000001,0x00000000,
2267/*
2268 JUMP sated
2269
2270at 0x00000255 : */ 0x80080000,0x000009ac,
2271/*
2272eat_msgin:
2273 MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
2274
2275at 0x00000257 : */ 0x0f000001,0x00000000,
2276/*
2277 JUMP eat_msgin, WHEN MSG_IN
2278
2279at 0x00000259 : */ 0x870b0000,0x0000095c,
2280/*
2281 JUMP sated
2282
2283at 0x0000025b : */ 0x80080000,0x000009ac,
2284/*
2285eat_status:
2286 MOVE 1, NCR53c7xx_sink, WHEN STATUS
2287
2288at 0x0000025d : */ 0x0b000001,0x00000000,
2289/*
2290 JUMP eat_status, WHEN STATUS
2291
2292at 0x0000025f : */ 0x830b0000,0x00000974,
2293/*
2294 JUMP sated
2295
2296at 0x00000261 : */ 0x80080000,0x000009ac,
2297/*
2298eat_datain:
2299 MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
2300
2301at 0x00000263 : */ 0x09000001,0x00000000,
2302/*
2303 JUMP eat_datain, WHEN DATA_IN
2304
2305at 0x00000265 : */ 0x810b0000,0x0000098c,
2306/*
2307 JUMP sated
2308
2309at 0x00000267 : */ 0x80080000,0x000009ac,
2310/*
2311spew_dataout:
2312 MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
2313
2314at 0x00000269 : */ 0x08000001,0x00000000,
2315/*
2316sated:
2317
2318
2319
2320 MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
2321
2322at 0x0000026b : */ 0x0e000001,0x00000000,
2323/*
2324 WAIT DISCONNECT
2325
2326at 0x0000026d : */ 0x48000000,0x00000000,
2327/*
2328 INT int_norm_aborted
2329
2330at 0x0000026f : */ 0x98080000,0x02040000,
2331/*
2332
2333
2334
2335
2336; Little patched jump, used to overcome problems with TEMP getting
2337; corrupted on memory moves.
2338
2339jump_temp:
2340 JUMP 0
2341
2342at 0x00000271 : */ 0x80080000,0x00000000,
2343};
2344
2345#define A_NCR53c7xx_msg_abort 0x00000000
2346static u32 A_NCR53c7xx_msg_abort_used[] __attribute((unused)) = {
2347 0x0000026c,
2348};
2349
2350#define A_NCR53c7xx_msg_reject 0x00000000
2351static u32 A_NCR53c7xx_msg_reject_used[] __attribute((unused)) = {
2352 0x00000186,
2353};
2354
2355#define A_NCR53c7xx_sink 0x00000000
2356static u32 A_NCR53c7xx_sink_used[] __attribute((unused)) = {
2357 0x00000258,
2358 0x0000025e,
2359 0x00000264,
2360};
2361
2362#define A_NCR53c7xx_zero 0x00000000
2363static u32 A_NCR53c7xx_zero_used[] __attribute((unused)) = {
2364 0x00000254,
2365 0x0000026a,
2366};
2367
2368#define A_NOP_insn 0x00000000
2369static u32 A_NOP_insn_used[] __attribute((unused)) = {
2370 0x00000017,
2371};
2372
2373#define A_addr_dsa 0x00000000
2374static u32 A_addr_dsa_used[] __attribute((unused)) = {
2375 0x0000000f,
2376 0x00000026,
2377 0x00000033,
2378 0x00000040,
2379 0x00000055,
2380 0x00000079,
2381 0x0000008e,
2382 0x000000bc,
2383 0x000000d2,
2384 0x00000130,
2385 0x000001a5,
2386 0x000001bb,
2387 0x000001e3,
2388};
2389
2390#define A_addr_reconnect_dsa_head 0x00000000
2391static u32 A_addr_reconnect_dsa_head_used[] __attribute((unused)) = {
2392 0x000001b7,
2393};
2394
2395#define A_addr_scratch 0x00000000
2396static u32 A_addr_scratch_used[] __attribute((unused)) = {
2397 0x00000002,
2398 0x00000004,
2399 0x00000008,
2400 0x00000020,
2401 0x00000022,
2402 0x00000049,
2403 0x00000060,
2404 0x0000006a,
2405 0x00000071,
2406 0x00000073,
2407 0x000000ab,
2408 0x000000b5,
2409 0x000000c1,
2410 0x000000cb,
2411 0x0000012c,
2412 0x00000142,
2413 0x00000157,
2414 0x000001b2,
2415 0x000001b4,
2416 0x000001df,
2417 0x000001f7,
2418};
2419
2420#define A_addr_temp 0x00000000
2421static u32 A_addr_temp_used[] __attribute((unused)) = {
2422};
2423
2424#define A_dmode_memory_to_memory 0x00000000
2425static u32 A_dmode_memory_to_memory_used[] __attribute((unused)) = {
2426};
2427
2428#define A_dmode_memory_to_ncr 0x00000000
2429static u32 A_dmode_memory_to_ncr_used[] __attribute((unused)) = {
2430};
2431
2432#define A_dmode_ncr_to_memory 0x00000000
2433static u32 A_dmode_ncr_to_memory_used[] __attribute((unused)) = {
2434};
2435
2436#define A_dsa_check_reselect 0x00000000
2437static u32 A_dsa_check_reselect_used[] __attribute((unused)) = {
2438 0x000001d0,
2439};
2440
2441#define A_dsa_cmdout 0x00000048
2442static u32 A_dsa_cmdout_used[] __attribute((unused)) = {
2443 0x0000009a,
2444};
2445
2446#define A_dsa_cmnd 0x00000038
2447static u32 A_dsa_cmnd_used[] __attribute((unused)) = {
2448};
2449
2450#define A_dsa_datain 0x00000054
2451static u32 A_dsa_datain_used[] __attribute((unused)) = {
2452 0x000000c2,
2453};
2454
2455#define A_dsa_dataout 0x00000050
2456static u32 A_dsa_dataout_used[] __attribute((unused)) = {
2457 0x000000ac,
2458};
2459
2460#define A_dsa_end 0x00000070
2461static u32 A_dsa_end_used[] __attribute((unused)) = {
2462};
2463
2464#define A_dsa_fields_start 0x00000000
2465static u32 A_dsa_fields_start_used[] __attribute((unused)) = {
2466};
2467
2468#define A_dsa_msgin 0x00000058
2469static u32 A_dsa_msgin_used[] __attribute((unused)) = {
2470 0x0000019c,
2471};
2472
2473#define A_dsa_msgout 0x00000040
2474static u32 A_dsa_msgout_used[] __attribute((unused)) = {
2475 0x00000089,
2476};
2477
2478#define A_dsa_msgout_other 0x00000068
2479static u32 A_dsa_msgout_other_used[] __attribute((unused)) = {
2480 0x00000194,
2481};
2482
2483#define A_dsa_next 0x00000030
2484static u32 A_dsa_next_used[] __attribute((unused)) = {
2485 0x00000061,
2486};
2487
2488#define A_dsa_restore_pointers 0x00000000
2489static u32 A_dsa_restore_pointers_used[] __attribute((unused)) = {
2490 0x00000146,
2491};
2492
2493#define A_dsa_save_data_pointer 0x00000000
2494static u32 A_dsa_save_data_pointer_used[] __attribute((unused)) = {
2495 0x00000131,
2496};
2497
2498#define A_dsa_select 0x0000003c
2499static u32 A_dsa_select_used[] __attribute((unused)) = {
2500 0x00000082,
2501};
2502
2503#define A_dsa_sscf_710 0x00000000
2504static u32 A_dsa_sscf_710_used[] __attribute((unused)) = {
2505 0x00000007,
2506};
2507
2508#define A_dsa_status 0x00000060
2509static u32 A_dsa_status_used[] __attribute((unused)) = {
2510 0x00000198,
2511};
2512
2513#define A_dsa_temp_addr_array_value 0x00000000
2514static u32 A_dsa_temp_addr_array_value_used[] __attribute((unused)) = {
2515};
2516
2517#define A_dsa_temp_addr_dsa_value 0x00000000
2518static u32 A_dsa_temp_addr_dsa_value_used[] __attribute((unused)) = {
2519 0x00000001,
2520};
2521
2522#define A_dsa_temp_addr_new_value 0x00000000
2523static u32 A_dsa_temp_addr_new_value_used[] __attribute((unused)) = {
2524};
2525
2526#define A_dsa_temp_addr_next 0x00000000
2527static u32 A_dsa_temp_addr_next_used[] __attribute((unused)) = {
2528 0x0000001c,
2529 0x0000004f,
2530};
2531
2532#define A_dsa_temp_addr_residual 0x00000000
2533static u32 A_dsa_temp_addr_residual_used[] __attribute((unused)) = {
2534 0x0000002d,
2535 0x0000003b,
2536};
2537
2538#define A_dsa_temp_addr_saved_pointer 0x00000000
2539static u32 A_dsa_temp_addr_saved_pointer_used[] __attribute((unused)) = {
2540 0x0000002b,
2541 0x00000037,
2542};
2543
2544#define A_dsa_temp_addr_saved_residual 0x00000000
2545static u32 A_dsa_temp_addr_saved_residual_used[] __attribute((unused)) = {
2546 0x0000002e,
2547 0x0000003a,
2548};
2549
2550#define A_dsa_temp_lun 0x00000000
2551static u32 A_dsa_temp_lun_used[] __attribute((unused)) = {
2552 0x0000004c,
2553};
2554
2555#define A_dsa_temp_next 0x00000000
2556static u32 A_dsa_temp_next_used[] __attribute((unused)) = {
2557 0x0000001f,
2558};
2559
2560#define A_dsa_temp_sync 0x00000000
2561static u32 A_dsa_temp_sync_used[] __attribute((unused)) = {
2562 0x00000057,
2563};
2564
2565#define A_dsa_temp_target 0x00000000
2566static u32 A_dsa_temp_target_used[] __attribute((unused)) = {
2567 0x00000045,
2568};
2569
2570#define A_emulfly 0x00000000
2571static u32 A_emulfly_used[] __attribute((unused)) = {
2572};
2573
2574#define A_int_debug_break 0x03000000
2575static u32 A_int_debug_break_used[] __attribute((unused)) = {
2576 0x0000023c,
2577};
2578
2579#define A_int_debug_panic 0x030b0000
2580static u32 A_int_debug_panic_used[] __attribute((unused)) = {
2581 0x00000209,
2582 0x00000219,
2583};
2584
2585#define A_int_err_check_condition 0x00030000
2586static u32 A_int_err_check_condition_used[] __attribute((unused)) = {
2587 0x000001a9,
2588};
2589
2590#define A_int_err_no_phase 0x00040000
2591static u32 A_int_err_no_phase_used[] __attribute((unused)) = {
2592};
2593
2594#define A_int_err_selected 0x00010000
2595static u32 A_int_err_selected_used[] __attribute((unused)) = {
2596 0x000001ff,
2597};
2598
2599#define A_int_err_unexpected_phase 0x00000000
2600static u32 A_int_err_unexpected_phase_used[] __attribute((unused)) = {
2601 0x00000092,
2602 0x00000098,
2603 0x000000a0,
2604 0x000000d6,
2605 0x000000da,
2606 0x000000dc,
2607 0x000000e4,
2608 0x000000e8,
2609 0x000000ea,
2610 0x000000f2,
2611 0x000000f6,
2612 0x000000f8,
2613 0x000000fa,
2614 0x00000160,
2615};
2616
2617#define A_int_err_unexpected_reselect 0x00020000
2618static u32 A_int_err_unexpected_reselect_used[] __attribute((unused)) = {
2619 0x000001cd,
2620};
2621
2622#define A_int_msg_1 0x01020000
2623static u32 A_int_msg_1_used[] __attribute((unused)) = {
2624 0x00000114,
2625 0x00000116,
2626};
2627
2628#define A_int_msg_sdtr 0x01010000
2629static u32 A_int_msg_sdtr_used[] __attribute((unused)) = {
2630 0x00000180,
2631};
2632
2633#define A_int_msg_wdtr 0x01000000
2634static u32 A_int_msg_wdtr_used[] __attribute((unused)) = {
2635 0x00000174,
2636};
2637
2638#define A_int_norm_aborted 0x02040000
2639static u32 A_int_norm_aborted_used[] __attribute((unused)) = {
2640 0x00000270,
2641};
2642
2643#define A_int_norm_command_complete 0x02020000
2644static u32 A_int_norm_command_complete_used[] __attribute((unused)) = {
2645};
2646
2647#define A_int_norm_disconnected 0x02030000
2648static u32 A_int_norm_disconnected_used[] __attribute((unused)) = {
2649};
2650
2651#define A_int_norm_emulateintfly 0x02060000
2652static u32 A_int_norm_emulateintfly_used[] __attribute((unused)) = {
2653 0x000001a2,
2654};
2655
2656#define A_int_norm_reselect_complete 0x02010000
2657static u32 A_int_norm_reselect_complete_used[] __attribute((unused)) = {
2658};
2659
2660#define A_int_norm_reset 0x02050000
2661static u32 A_int_norm_reset_used[] __attribute((unused)) = {
2662};
2663
2664#define A_int_norm_select_complete 0x02000000
2665static u32 A_int_norm_select_complete_used[] __attribute((unused)) = {
2666};
2667
2668#define A_int_test_1 0x04000000
2669static u32 A_int_test_1_used[] __attribute((unused)) = {
2670 0x0000021e,
2671};
2672
2673#define A_int_test_2 0x04010000
2674static u32 A_int_test_2_used[] __attribute((unused)) = {
2675 0x0000023a,
2676};
2677
2678#define A_int_test_3 0x04020000
2679static u32 A_int_test_3_used[] __attribute((unused)) = {
2680};
2681
2682#define A_msg_buf 0x00000000
2683static u32 A_msg_buf_used[] __attribute((unused)) = {
2684 0x00000108,
2685 0x00000162,
2686 0x0000016c,
2687 0x00000172,
2688 0x00000178,
2689 0x0000017e,
2690};
2691
2692#define A_reconnect_dsa_head 0x00000000
2693static u32 A_reconnect_dsa_head_used[] __attribute((unused)) = {
2694 0x0000006d,
2695 0x00000074,
2696 0x000001b1,
2697};
2698
2699#define A_reselected_identify 0x00000000
2700static u32 A_reselected_identify_used[] __attribute((unused)) = {
2701 0x00000048,
2702 0x000001af,
2703};
2704
2705#define A_reselected_tag 0x00000000
2706static u32 A_reselected_tag_used[] __attribute((unused)) = {
2707};
2708
2709#define A_saved_dsa 0x00000000
2710static u32 A_saved_dsa_used[] __attribute((unused)) = {
2711 0x00000005,
2712 0x0000000e,
2713 0x00000023,
2714 0x00000025,
2715 0x00000032,
2716 0x0000003f,
2717 0x00000054,
2718 0x0000005f,
2719 0x00000070,
2720 0x00000078,
2721 0x0000008d,
2722 0x000000aa,
2723 0x000000bb,
2724 0x000000c0,
2725 0x000000d1,
2726 0x0000012f,
2727 0x000001a4,
2728 0x000001b5,
2729 0x000001ba,
2730 0x000001e2,
2731};
2732
2733#define A_schedule 0x00000000
2734static u32 A_schedule_used[] __attribute((unused)) = {
2735 0x0000007d,
2736 0x000001a7,
2737 0x00000203,
2738 0x00000244,
2739};
2740
2741#define A_test_dest 0x00000000
2742static u32 A_test_dest_used[] __attribute((unused)) = {
2743 0x0000021c,
2744};
2745
2746#define A_test_src 0x00000000
2747static u32 A_test_src_used[] __attribute((unused)) = {
2748 0x0000021b,
2749};
2750
2751#define Ent_accept_message 0x00000624
2752#define Ent_cmdout_cmdout 0x00000264
2753#define Ent_command_complete 0x0000065c
2754#define Ent_command_complete_msgin 0x0000066c
2755#define Ent_data_transfer 0x0000026c
2756#define Ent_datain_to_jump 0x00000334
2757#define Ent_debug_break 0x000008ec
2758#define Ent_dsa_code_begin 0x00000000
2759#define Ent_dsa_code_check_reselect 0x0000010c
2760#define Ent_dsa_code_fix_jump 0x00000058
2761#define Ent_dsa_code_restore_pointers 0x000000d8
2762#define Ent_dsa_code_save_data_pointer 0x000000a4
2763#define Ent_dsa_code_template 0x00000000
2764#define Ent_dsa_code_template_end 0x00000178
2765#define Ent_dsa_schedule 0x00000178
2766#define Ent_dsa_zero 0x00000178
2767#define Ent_end_data_transfer 0x000002a4
2768#define Ent_initiator_abort 0x00000914
2769#define Ent_msg_in 0x0000041c
2770#define Ent_msg_in_restart 0x000003fc
2771#define Ent_other_in 0x0000038c
2772#define Ent_other_out 0x00000354
2773#define Ent_other_transfer 0x000003c4
2774#define Ent_reject_message 0x00000604
2775#define Ent_reselected_check_next 0x000006f0
2776#define Ent_reselected_ok 0x00000798
2777#define Ent_respond_message 0x0000063c
2778#define Ent_select 0x000001f8
2779#define Ent_select_msgout 0x00000218
2780#define Ent_target_abort 0x000008f4
2781#define Ent_test_1 0x00000868
2782#define Ent_test_2 0x0000087c
2783#define Ent_test_2_msgout 0x0000089c
2784#define Ent_wait_reselect 0x000006a8
2785static u32 LABELPATCHES[] __attribute((unused)) = {
2786 0x00000011,
2787 0x0000001a,
2788 0x0000001d,
2789 0x00000028,
2790 0x0000002a,
2791 0x00000035,
2792 0x00000038,
2793 0x00000042,
2794 0x00000050,
2795 0x00000052,
2796 0x0000006b,
2797 0x00000083,
2798 0x00000085,
2799 0x00000090,
2800 0x00000094,
2801 0x00000096,
2802 0x0000009c,
2803 0x0000009e,
2804 0x000000a2,
2805 0x000000a4,
2806 0x000000a6,
2807 0x000000a8,
2808 0x000000b6,
2809 0x000000b9,
2810 0x000000cc,
2811 0x000000cf,
2812 0x000000d8,
2813 0x000000de,
2814 0x000000e0,
2815 0x000000e6,
2816 0x000000ec,
2817 0x000000ee,
2818 0x000000f4,
2819 0x000000fc,
2820 0x000000fe,
2821 0x0000010a,
2822 0x0000010c,
2823 0x0000010e,
2824 0x00000110,
2825 0x00000112,
2826 0x00000118,
2827 0x0000011a,
2828 0x0000012d,
2829 0x00000143,
2830 0x00000158,
2831 0x0000015c,
2832 0x00000164,
2833 0x00000166,
2834 0x00000168,
2835 0x0000016e,
2836 0x0000017a,
2837 0x000001ab,
2838 0x000001b8,
2839 0x000001bf,
2840 0x000001c3,
2841 0x000001c7,
2842 0x000001cb,
2843 0x000001e0,
2844 0x000001f8,
2845 0x00000207,
2846 0x0000020f,
2847 0x00000213,
2848 0x00000217,
2849 0x00000224,
2850 0x00000226,
2851 0x00000248,
2852 0x0000024a,
2853 0x0000024c,
2854 0x0000024e,
2855 0x00000250,
2856 0x00000252,
2857 0x00000256,
2858 0x0000025a,
2859 0x0000025c,
2860 0x00000260,
2861 0x00000262,
2862 0x00000266,
2863 0x00000268,
2864};
2865
2866static struct {
2867 u32 offset;
2868 void *address;
2869} EXTERNAL_PATCHES[] __attribute((unused)) = {
2870};
2871
2872static u32 INSTRUCTIONS __attribute((unused)) = 290;
2873static u32 PATCHES __attribute((unused)) = 78;
2874static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0;
diff --git a/drivers/scsi/53c7xx_u.h_shipped b/drivers/scsi/53c7xx_u.h_shipped
deleted file mode 100644
index 7b337174e228..000000000000
--- a/drivers/scsi/53c7xx_u.h_shipped
+++ /dev/null
@@ -1,102 +0,0 @@
1#undef A_NCR53c7xx_msg_abort
2#undef A_NCR53c7xx_msg_reject
3#undef A_NCR53c7xx_sink
4#undef A_NCR53c7xx_zero
5#undef A_NOP_insn
6#undef A_addr_dsa
7#undef A_addr_reconnect_dsa_head
8#undef A_addr_scratch
9#undef A_addr_temp
10#undef A_dmode_memory_to_memory
11#undef A_dmode_memory_to_ncr
12#undef A_dmode_ncr_to_memory
13#undef A_dsa_check_reselect
14#undef A_dsa_cmdout
15#undef A_dsa_cmnd
16#undef A_dsa_datain
17#undef A_dsa_dataout
18#undef A_dsa_end
19#undef A_dsa_fields_start
20#undef A_dsa_msgin
21#undef A_dsa_msgout
22#undef A_dsa_msgout_other
23#undef A_dsa_next
24#undef A_dsa_restore_pointers
25#undef A_dsa_save_data_pointer
26#undef A_dsa_select
27#undef A_dsa_sscf_710
28#undef A_dsa_status
29#undef A_dsa_temp_addr_array_value
30#undef A_dsa_temp_addr_dsa_value
31#undef A_dsa_temp_addr_new_value
32#undef A_dsa_temp_addr_next
33#undef A_dsa_temp_addr_residual
34#undef A_dsa_temp_addr_saved_pointer
35#undef A_dsa_temp_addr_saved_residual
36#undef A_dsa_temp_lun
37#undef A_dsa_temp_next
38#undef A_dsa_temp_sync
39#undef A_dsa_temp_target
40#undef A_emulfly
41#undef A_int_debug_break
42#undef A_int_debug_panic
43#undef A_int_err_check_condition
44#undef A_int_err_no_phase
45#undef A_int_err_selected
46#undef A_int_err_unexpected_phase
47#undef A_int_err_unexpected_reselect
48#undef A_int_msg_1
49#undef A_int_msg_sdtr
50#undef A_int_msg_wdtr
51#undef A_int_norm_aborted
52#undef A_int_norm_command_complete
53#undef A_int_norm_disconnected
54#undef A_int_norm_emulateintfly
55#undef A_int_norm_reselect_complete
56#undef A_int_norm_reset
57#undef A_int_norm_select_complete
58#undef A_int_test_1
59#undef A_int_test_2
60#undef A_int_test_3
61#undef A_msg_buf
62#undef A_reconnect_dsa_head
63#undef A_reselected_identify
64#undef A_reselected_tag
65#undef A_saved_dsa
66#undef A_schedule
67#undef A_test_dest
68#undef A_test_src
69#undef Ent_accept_message
70#undef Ent_cmdout_cmdout
71#undef Ent_command_complete
72#undef Ent_command_complete_msgin
73#undef Ent_data_transfer
74#undef Ent_datain_to_jump
75#undef Ent_debug_break
76#undef Ent_dsa_code_begin
77#undef Ent_dsa_code_check_reselect
78#undef Ent_dsa_code_fix_jump
79#undef Ent_dsa_code_restore_pointers
80#undef Ent_dsa_code_save_data_pointer
81#undef Ent_dsa_code_template
82#undef Ent_dsa_code_template_end
83#undef Ent_dsa_schedule
84#undef Ent_dsa_zero
85#undef Ent_end_data_transfer
86#undef Ent_initiator_abort
87#undef Ent_msg_in
88#undef Ent_msg_in_restart
89#undef Ent_other_in
90#undef Ent_other_out
91#undef Ent_other_transfer
92#undef Ent_reject_message
93#undef Ent_reselected_check_next
94#undef Ent_reselected_ok
95#undef Ent_respond_message
96#undef Ent_select
97#undef Ent_select_msgout
98#undef Ent_target_abort
99#undef Ent_test_1
100#undef Ent_test_2
101#undef Ent_test_2_msgout
102#undef Ent_wait_reselect
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 96f4cab07614..9b206176f717 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -304,18 +304,10 @@ static struct BusLogic_CCB *BusLogic_AllocateCCB(struct BusLogic_HostAdapter
304static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB) 304static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB)
305{ 305{
306 struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter; 306 struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter;
307 struct scsi_cmnd *cmd = CCB->Command;
308 307
309 if (cmd->use_sg != 0) { 308 scsi_dma_unmap(CCB->Command);
310 pci_unmap_sg(HostAdapter->PCI_Device,
311 (struct scatterlist *)cmd->request_buffer,
312 cmd->use_sg, cmd->sc_data_direction);
313 } else if (cmd->request_bufflen != 0) {
314 pci_unmap_single(HostAdapter->PCI_Device, CCB->DataPointer,
315 CCB->DataLength, cmd->sc_data_direction);
316 }
317 pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer, 309 pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer,
318 CCB->SenseDataLength, PCI_DMA_FROMDEVICE); 310 CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
319 311
320 CCB->Command = NULL; 312 CCB->Command = NULL;
321 CCB->Status = BusLogic_CCB_Free; 313 CCB->Status = BusLogic_CCB_Free;
@@ -2648,7 +2640,8 @@ static void BusLogic_ProcessCompletedCCBs(struct BusLogic_HostAdapter *HostAdapt
2648 */ 2640 */
2649 if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) { 2641 if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) {
2650 struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID]; 2642 struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID];
2651 struct SCSI_Inquiry *InquiryResult = (struct SCSI_Inquiry *) Command->request_buffer; 2643 struct SCSI_Inquiry *InquiryResult =
2644 (struct SCSI_Inquiry *) scsi_sglist(Command);
2652 TargetFlags->TargetExists = true; 2645 TargetFlags->TargetExists = true;
2653 TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue; 2646 TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue;
2654 TargetFlags->WideTransfersSupported = InquiryResult->WBus16; 2647 TargetFlags->WideTransfersSupported = InquiryResult->WBus16;
@@ -2819,9 +2812,8 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2819 int CDB_Length = Command->cmd_len; 2812 int CDB_Length = Command->cmd_len;
2820 int TargetID = Command->device->id; 2813 int TargetID = Command->device->id;
2821 int LogicalUnit = Command->device->lun; 2814 int LogicalUnit = Command->device->lun;
2822 void *BufferPointer = Command->request_buffer; 2815 int BufferLength = scsi_bufflen(Command);
2823 int BufferLength = Command->request_bufflen; 2816 int Count;
2824 int SegmentCount = Command->use_sg;
2825 struct BusLogic_CCB *CCB; 2817 struct BusLogic_CCB *CCB;
2826 /* 2818 /*
2827 SCSI REQUEST_SENSE commands will be executed automatically by the Host 2819 SCSI REQUEST_SENSE commands will be executed automatically by the Host
@@ -2851,36 +2843,35 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2851 return 0; 2843 return 0;
2852 } 2844 }
2853 } 2845 }
2846
2854 /* 2847 /*
2855 Initialize the fields in the BusLogic Command Control Block (CCB). 2848 Initialize the fields in the BusLogic Command Control Block (CCB).
2856 */ 2849 */
2857 if (SegmentCount == 0 && BufferLength != 0) { 2850 Count = scsi_dma_map(Command);
2858 CCB->Opcode = BusLogic_InitiatorCCB; 2851 BUG_ON(Count < 0);
2859 CCB->DataLength = BufferLength; 2852 if (Count) {
2860 CCB->DataPointer = pci_map_single(HostAdapter->PCI_Device, 2853 struct scatterlist *sg;
2861 BufferPointer, BufferLength, 2854 int i;
2862 Command->sc_data_direction); 2855
2863 } else if (SegmentCount != 0) {
2864 struct scatterlist *ScatterList = (struct scatterlist *) BufferPointer;
2865 int Segment, Count;
2866
2867 Count = pci_map_sg(HostAdapter->PCI_Device, ScatterList, SegmentCount,
2868 Command->sc_data_direction);
2869 CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather; 2856 CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
2870 CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment); 2857 CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment);
2871 if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) 2858 if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
2872 CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB); 2859 CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB);
2873 else 2860 else
2874 CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList); 2861 CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList);
2875 for (Segment = 0; Segment < Count; Segment++) { 2862
2876 CCB->ScatterGatherList[Segment].SegmentByteCount = sg_dma_len(ScatterList + Segment); 2863 scsi_for_each_sg(Command, sg, Count, i) {
2877 CCB->ScatterGatherList[Segment].SegmentDataPointer = sg_dma_address(ScatterList + Segment); 2864 CCB->ScatterGatherList[i].SegmentByteCount =
2865 sg_dma_len(sg);
2866 CCB->ScatterGatherList[i].SegmentDataPointer =
2867 sg_dma_address(sg);
2878 } 2868 }
2879 } else { 2869 } else if (!Count) {
2880 CCB->Opcode = BusLogic_InitiatorCCB; 2870 CCB->Opcode = BusLogic_InitiatorCCB;
2881 CCB->DataLength = BufferLength; 2871 CCB->DataLength = BufferLength;
2882 CCB->DataPointer = 0; 2872 CCB->DataPointer = 0;
2883 } 2873 }
2874
2884 switch (CDB[0]) { 2875 switch (CDB[0]) {
2885 case READ_6: 2876 case READ_6:
2886 case READ_10: 2877 case READ_10:
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index eb46cb0e3cb7..9d2119b53ac9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -10,6 +10,7 @@ config RAID_ATTRS
10config SCSI 10config SCSI
11 tristate "SCSI device support" 11 tristate "SCSI device support"
12 depends on BLOCK 12 depends on BLOCK
13 select SCSI_DMA if HAS_DMA
13 ---help--- 14 ---help---
14 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or 15 If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
15 any other SCSI device under Linux, say Y and make sure that you know 16 any other SCSI device under Linux, say Y and make sure that you know
@@ -29,6 +30,10 @@ config SCSI
29 However, do not compile this as a module if your root file system 30 However, do not compile this as a module if your root file system
30 (the one containing the directory /) is located on a SCSI device. 31 (the one containing the directory /) is located on a SCSI device.
31 32
33config SCSI_DMA
34 bool
35 default n
36
32config SCSI_TGT 37config SCSI_TGT
33 tristate "SCSI target support" 38 tristate "SCSI target support"
34 depends on SCSI && EXPERIMENTAL 39 depends on SCSI && EXPERIMENTAL
@@ -739,7 +744,7 @@ config SCSI_GENERIC_NCR53C400
739 744
740config SCSI_IBMMCA 745config SCSI_IBMMCA
741 tristate "IBMMCA SCSI support" 746 tristate "IBMMCA SCSI support"
742 depends on MCA_LEGACY && SCSI 747 depends on MCA && SCSI
743 ---help--- 748 ---help---
744 This is support for the IBM SCSI adapter found in many of the PS/2 749 This is support for the IBM SCSI adapter found in many of the PS/2
745 series computers. These machines have an MCA bus, so you need to 750 series computers. These machines have an MCA bus, so you need to
@@ -1007,6 +1012,11 @@ config SCSI_STEX
1007 To compile this driver as a module, choose M here: the 1012 To compile this driver as a module, choose M here: the
1008 module will be called stex. 1013 module will be called stex.
1009 1014
1015config 53C700_BE_BUS
1016 bool
1017 depends on SCSI_A4000T || SCSI_ZORRO7XX || MVME16x_SCSI || BVME6000_SCSI
1018 default y
1019
1010config SCSI_SYM53C8XX_2 1020config SCSI_SYM53C8XX_2
1011 tristate "SYM53C8XX Version 2 SCSI support" 1021 tristate "SYM53C8XX Version 2 SCSI support"
1012 depends on PCI && SCSI 1022 depends on PCI && SCSI
@@ -1611,13 +1621,25 @@ config FASTLANE_SCSI
1611 If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use 1621 If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
1612 one in the near future, say Y to this question. Otherwise, say N. 1622 one in the near future, say Y to this question. Otherwise, say N.
1613 1623
1614config SCSI_AMIGA7XX 1624config SCSI_A4000T
1615 bool "Amiga NCR53c710 SCSI support (EXPERIMENTAL)" 1625 tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
1616 depends on AMIGA && SCSI && EXPERIMENTAL && BROKEN 1626 depends on AMIGA && SCSI && EXPERIMENTAL
1627 select SCSI_SPI_ATTRS
1617 help 1628 help
1618 Support for various NCR53c710-based SCSI controllers on the Amiga. 1629 If you have an Amiga 4000T and have SCSI devices connected to the
1630 built-in SCSI controller, say Y. Otherwise, say N.
1631
1632 To compile this driver as a module, choose M here: the
1633 module will be called a4000t.
1634
1635config SCSI_ZORRO7XX
1636 tristate "Zorro NCR53c710 SCSI support (EXPERIMENTAL)"
1637 depends on ZORRO && SCSI && EXPERIMENTAL
1638 select SCSI_SPI_ATTRS
1639 help
1640 Support for various NCR53c710-based SCSI controllers on Zorro
1641 expansion boards for the Amiga.
1619 This includes: 1642 This includes:
1620 - the builtin SCSI controller on the Amiga 4000T,
1621 - the Amiga 4091 Zorro III SCSI-2 controller, 1643 - the Amiga 4091 Zorro III SCSI-2 controller,
1622 - the MacroSystem Development's WarpEngine Amiga SCSI-2 controller 1644 - the MacroSystem Development's WarpEngine Amiga SCSI-2 controller
1623 (info at 1645 (info at
@@ -1625,10 +1647,6 @@ config SCSI_AMIGA7XX
1625 - the SCSI controller on the Phase5 Blizzard PowerUP 603e+ 1647 - the SCSI controller on the Phase5 Blizzard PowerUP 603e+
1626 accelerator card for the Amiga 1200, 1648 accelerator card for the Amiga 1200,
1627 - the SCSI controller on the GVP Turbo 040/060 accelerator. 1649 - the SCSI controller on the GVP Turbo 040/060 accelerator.
1628 Note that all of the above SCSI controllers, except for the builtin
1629 SCSI controller on the Amiga 4000T, reside on the Zorro expansion
1630 bus, so you also have to enable Zorro bus support if you want to use
1631 them.
1632 1650
1633config OKTAGON_SCSI 1651config OKTAGON_SCSI
1634 tristate "BSC Oktagon SCSI support (EXPERIMENTAL)" 1652 tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
@@ -1712,8 +1730,8 @@ config MVME147_SCSI
1712 single-board computer. 1730 single-board computer.
1713 1731
1714config MVME16x_SCSI 1732config MVME16x_SCSI
1715 bool "NCR53C710 SCSI driver for MVME16x" 1733 tristate "NCR53C710 SCSI driver for MVME16x"
1716 depends on MVME16x && SCSI && BROKEN 1734 depends on MVME16x && SCSI
1717 select SCSI_SPI_ATTRS 1735 select SCSI_SPI_ATTRS
1718 help 1736 help
1719 The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710 1737 The Motorola MVME162, 166, 167, 172 and 177 boards use the NCR53C710
@@ -1721,22 +1739,14 @@ config MVME16x_SCSI
1721 will want to say Y to this question. 1739 will want to say Y to this question.
1722 1740
1723config BVME6000_SCSI 1741config BVME6000_SCSI
1724 bool "NCR53C710 SCSI driver for BVME6000" 1742 tristate "NCR53C710 SCSI driver for BVME6000"
1725 depends on BVME6000 && SCSI && BROKEN 1743 depends on BVME6000 && SCSI
1726 select SCSI_SPI_ATTRS 1744 select SCSI_SPI_ATTRS
1727 help 1745 help
1728 The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710 1746 The BVME4000 and BVME6000 boards from BVM Ltd use the NCR53C710
1729 SCSI controller chip. Almost everyone using one of these boards 1747 SCSI controller chip. Almost everyone using one of these boards
1730 will want to say Y to this question. 1748 will want to say Y to this question.
1731 1749
1732config SCSI_NCR53C7xx_FAST
1733 bool "allow FAST-SCSI [10MHz]"
1734 depends on SCSI_AMIGA7XX || MVME16x_SCSI || BVME6000_SCSI
1735 help
1736 This will enable 10MHz FAST-SCSI transfers with your host
1737 adapter. Some systems have problems with that speed, so it's safest
1738 to say N here.
1739
1740config SUN3_SCSI 1750config SUN3_SCSI
1741 tristate "Sun3 NCR5380 SCSI" 1751 tristate "Sun3 NCR5380 SCSI"
1742 depends on SUN3 && SCSI 1752 depends on SUN3 && SCSI
@@ -1766,8 +1776,6 @@ config SCSI_SUNESP
1766 To compile this driver as a module, choose M here: the 1776 To compile this driver as a module, choose M here: the
1767 module will be called esp. 1777 module will be called esp.
1768 1778
1769# bool 'Cyberstorm Mk III SCSI support (EXPERIMENTAL)' CONFIG_CYBERSTORMIII_SCSI
1770
1771config ZFCP 1779config ZFCP
1772 tristate "FCP host bus adapter driver for IBM eServer zSeries" 1780 tristate "FCP host bus adapter driver for IBM eServer zSeries"
1773 depends on S390 && QDIO && SCSI 1781 depends on S390 && QDIO && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b1b632791580..0f8689557158 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -37,7 +37,8 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
37 37
38obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 38obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
39obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 39obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
40obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o 40obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
41obj-$(CONFIG_SCSI_ZORRO7XX) += 53c700.o zorro7xx.o
41obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o 42obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
42obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o 43obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
43obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o 44obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@@ -53,8 +54,8 @@ obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
53obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 54obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
54obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o 55obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
55obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 56obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
56obj-$(CONFIG_MVME16x_SCSI) += mvme16x.o 53c7xx.o 57obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
57obj-$(CONFIG_BVME6000_SCSI) += bvme6000.o 53c7xx.o 58obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
58obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o 59obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
59obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o 60obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
60obj-$(CONFIG_SCSI_PSI240I) += psi240i.o 61obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
@@ -89,7 +90,6 @@ obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
89obj-$(CONFIG_SCSI_LPFC) += lpfc/ 90obj-$(CONFIG_SCSI_LPFC) += lpfc/
90obj-$(CONFIG_SCSI_PAS16) += pas16.o 91obj-$(CONFIG_SCSI_PAS16) += pas16.o
91obj-$(CONFIG_SCSI_SEAGATE) += seagate.o 92obj-$(CONFIG_SCSI_SEAGATE) += seagate.o
92obj-$(CONFIG_SCSI_FD_8xx) += seagate.o
93obj-$(CONFIG_SCSI_T128) += t128.o 93obj-$(CONFIG_SCSI_T128) += t128.o
94obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 94obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
95obj-$(CONFIG_SCSI_DTC3280) += dtc.o 95obj-$(CONFIG_SCSI_DTC3280) += dtc.o
@@ -148,9 +148,9 @@ obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
148obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o 148obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
149 149
150scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \ 150scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
151 scsicam.o scsi_error.o scsi_lib.o \ 151 scsicam.o scsi_error.o scsi_lib.o
152 scsi_scan.o scsi_sysfs.o \ 152scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
153 scsi_devinfo.o 153scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o
154scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o 154scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
155scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o 155scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
156scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o 156scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
@@ -168,10 +168,8 @@ NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
169 169
170# Files generated that shall be removed upon make clean 170# Files generated that shall be removed upon make clean
171clean-files := 53c7xx_d.h 53c700_d.h \ 171clean-files := 53c700_d.h 53c700_u.h
172 53c7xx_u.h 53c700_u.h
173 172
174$(obj)/53c7xx.o: $(obj)/53c7xx_d.h $(obj)/53c7xx_u.h
175$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h 173$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
176 174
177# If you want to play with the firmware, uncomment 175# If you want to play with the firmware, uncomment
@@ -179,11 +177,6 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
179 177
180ifdef GENERATE_FIRMWARE 178ifdef GENERATE_FIRMWARE
181 179
182$(obj)/53c7xx_d.h: $(src)/53c7xx.scr $(src)/script_asm.pl
183 $(CPP) -traditional -DCHIP=710 - < $< | grep -v '^#' | $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h)
184
185$(obj)/53c7xx_u.h: $(obj)/53c7xx_d.h
186
187$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl 180$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl
188 $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $< 181 $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $<
189 182
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 88ea5a1fb606..f8e449a98d29 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -347,7 +347,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
347 if((r & bit) == val) 347 if((r & bit) == val)
348 return 0; 348 return 0;
349 if(!in_interrupt()) 349 if(!in_interrupt())
350 yield(); 350 cond_resched();
351 else 351 else
352 cpu_relax(); 352 cpu_relax();
353 } 353 }
@@ -357,7 +357,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
357static struct { 357static struct {
358 unsigned char value; 358 unsigned char value;
359 const char *name; 359 const char *name;
360} phases[] = { 360} phases[] __maybe_unused = {
361 {PHASE_DATAOUT, "DATAOUT"}, 361 {PHASE_DATAOUT, "DATAOUT"},
362 {PHASE_DATAIN, "DATAIN"}, 362 {PHASE_DATAIN, "DATAIN"},
363 {PHASE_CMDOUT, "CMDOUT"}, 363 {PHASE_CMDOUT, "CMDOUT"},
@@ -575,7 +575,8 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
575 * Locks: none, irqs must be enabled on entry 575 * Locks: none, irqs must be enabled on entry
576 */ 576 */
577 577
578static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible) 578static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
579 int possible)
579{ 580{
580 NCR5380_local_declare(); 581 NCR5380_local_declare();
581 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 582 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -629,7 +630,8 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
629 * Locks: none 630 * Locks: none
630 */ 631 */
631 632
632static void __init NCR5380_print_options(struct Scsi_Host *instance) 633static void __init __maybe_unused
634NCR5380_print_options(struct Scsi_Host *instance)
633{ 635{
634 printk(" generic options" 636 printk(" generic options"
635#ifdef AUTOPROBE_IRQ 637#ifdef AUTOPROBE_IRQ
@@ -703,8 +705,8 @@ char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len);
703static 705static
704char *lprint_opcode(int opcode, char *pos, char *buffer, int length); 706char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
705 707
706static 708static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
707int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout) 709 char *buffer, char **start, off_t offset, int length, int inout)
708{ 710{
709 char *pos = buffer; 711 char *pos = buffer;
710 struct NCR5380_hostdata *hostdata; 712 struct NCR5380_hostdata *hostdata;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 713a108c02ef..bccf13f71532 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -299,7 +299,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
299static irqreturn_t NCR5380_intr(int irq, void *dev_id); 299static irqreturn_t NCR5380_intr(int irq, void *dev_id);
300#endif 300#endif
301static void NCR5380_main(struct work_struct *work); 301static void NCR5380_main(struct work_struct *work);
302static void NCR5380_print_options(struct Scsi_Host *instance); 302static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
303#ifdef NDEBUG 303#ifdef NDEBUG
304static void NCR5380_print_phase(struct Scsi_Host *instance); 304static void NCR5380_print_phase(struct Scsi_Host *instance);
305static void NCR5380_print(struct Scsi_Host *instance); 305static void NCR5380_print(struct Scsi_Host *instance);
@@ -307,8 +307,8 @@ static void NCR5380_print(struct Scsi_Host *instance);
307static int NCR5380_abort(Scsi_Cmnd * cmd); 307static int NCR5380_abort(Scsi_Cmnd * cmd);
308static int NCR5380_bus_reset(Scsi_Cmnd * cmd); 308static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
309static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); 309static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *));
310static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, 310static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
311off_t offset, int length, int inout); 311 char *buffer, char **start, off_t offset, int length, int inout);
312 312
313static void NCR5380_reselect(struct Scsi_Host *instance); 313static void NCR5380_reselect(struct Scsi_Host *instance);
314static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag); 314static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 7c0b17f86903..eda8c48f6be7 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -698,7 +698,7 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
698 int i; 698 int i;
699 699
700 VDEB(printk("NCR53c406a_queue called\n")); 700 VDEB(printk("NCR53c406a_queue called\n"));
701 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, SCpnt->request_bufflen)); 701 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt)));
702 702
703#if 0 703#if 0
704 VDEB(for (i = 0; i < SCpnt->cmd_len; i++) 704 VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
@@ -785,8 +785,8 @@ static void NCR53c406a_intr(void *dev_id)
785 unsigned char status, int_reg; 785 unsigned char status, int_reg;
786#if USE_PIO 786#if USE_PIO
787 unsigned char pio_status; 787 unsigned char pio_status;
788 struct scatterlist *sglist; 788 struct scatterlist *sg;
789 unsigned int sgcount; 789 int i;
790#endif 790#endif
791 791
792 VDEB(printk("NCR53c406a_intr called\n")); 792 VDEB(printk("NCR53c406a_intr called\n"));
@@ -866,22 +866,18 @@ static void NCR53c406a_intr(void *dev_id)
866 current_SC->SCp.phase = data_out; 866 current_SC->SCp.phase = data_out;
867 VDEB(printk("NCR53c406a: Data-Out phase\n")); 867 VDEB(printk("NCR53c406a: Data-Out phase\n"));
868 outb(FLUSH_FIFO, CMD_REG); 868 outb(FLUSH_FIFO, CMD_REG);
869 LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */ 869 LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
870#if USE_DMA /* No s/g support for DMA */ 870#if USE_DMA /* No s/g support for DMA */
871 NCR53c406a_dma_write(current_SC->request_buffer, current_SC->request_bufflen); 871 NCR53c406a_dma_write(scsi_sglist(current_SC),
872 scsdi_bufflen(current_SC));
873
872#endif /* USE_DMA */ 874#endif /* USE_DMA */
873 outb(TRANSFER_INFO | DMA_OP, CMD_REG); 875 outb(TRANSFER_INFO | DMA_OP, CMD_REG);
874#if USE_PIO 876#if USE_PIO
875 if (!current_SC->use_sg) /* Don't use scatter-gather */ 877 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
876 NCR53c406a_pio_write(current_SC->request_buffer, current_SC->request_bufflen); 878 NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
877 else { /* use scatter-gather */ 879 sg->length);
878 sgcount = current_SC->use_sg; 880 }
879 sglist = current_SC->request_buffer;
880 while (sgcount--) {
881 NCR53c406a_pio_write(page_address(sglist->page) + sglist->offset, sglist->length);
882 sglist++;
883 }
884 }
885 REG0; 881 REG0;
886#endif /* USE_PIO */ 882#endif /* USE_PIO */
887 } 883 }
@@ -893,22 +889,17 @@ static void NCR53c406a_intr(void *dev_id)
893 current_SC->SCp.phase = data_in; 889 current_SC->SCp.phase = data_in;
894 VDEB(printk("NCR53c406a: Data-In phase\n")); 890 VDEB(printk("NCR53c406a: Data-In phase\n"));
895 outb(FLUSH_FIFO, CMD_REG); 891 outb(FLUSH_FIFO, CMD_REG);
896 LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */ 892 LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
897#if USE_DMA /* No s/g support for DMA */ 893#if USE_DMA /* No s/g support for DMA */
898 NCR53c406a_dma_read(current_SC->request_buffer, current_SC->request_bufflen); 894 NCR53c406a_dma_read(scsi_sglist(current_SC),
895 scsdi_bufflen(current_SC));
899#endif /* USE_DMA */ 896#endif /* USE_DMA */
900 outb(TRANSFER_INFO | DMA_OP, CMD_REG); 897 outb(TRANSFER_INFO | DMA_OP, CMD_REG);
901#if USE_PIO 898#if USE_PIO
902 if (!current_SC->use_sg) /* Don't use scatter-gather */ 899 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
903 NCR53c406a_pio_read(current_SC->request_buffer, current_SC->request_bufflen); 900 NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
904 else { /* Use scatter-gather */ 901 sg->length);
905 sgcount = current_SC->use_sg; 902 }
906 sglist = current_SC->request_buffer;
907 while (sgcount--) {
908 NCR53c406a_pio_read(page_address(sglist->page) + sglist->offset, sglist->length);
909 sglist++;
910 }
911 }
912 REG0; 903 REG0;
913#endif /* USE_PIO */ 904#endif /* USE_PIO */
914 } 905 }
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 7f4241bfb9c4..f608d4a1d6da 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -19,27 +19,6 @@
19 * along with this program; see the file COPYING. If not, write to 19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 * 21 *
22 * --------------------------------------------------------------------------
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions, and the following disclaimer,
29 * without modification, immediately at the beginning of the file.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. The name of the author may not be used to endorse or promote products
34 * derived from this software without specific prior written permission.
35 *
36 * Where this Software is combined with software released under the terms of
37 * the GNU General Public License ("GPL") and the terms of the GPL would require the
38 * combined work to also be released under the terms of the GPL, the terms
39 * and conditions of this License will apply in addition to those of the
40 * GPL with the exception of any terms or conditions of this License that
41 * conflict with, or are expressly prohibited by, the GPL.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -75,6 +54,8 @@
75 * 9/28/04 Christoph Hellwig <hch@lst.de> 54 * 9/28/04 Christoph Hellwig <hch@lst.de>
76 * - merge the two source files 55 * - merge the two source files
77 * - remove internal queueing code 56 * - remove internal queueing code
57 * 14/06/07 Alan Cox <alan@redhat.com>
58 * - Grand cleanup and Linuxisation
78 */ 59 */
79 60
80#include <linux/module.h> 61#include <linux/module.h>
@@ -102,14 +83,12 @@
102#include "a100u2w.h" 83#include "a100u2w.h"
103 84
104 85
105#define JIFFIES_TO_MS(t) ((t) * 1000 / HZ) 86static struct orc_scb *__orc_alloc_scb(struct orc_host * host);
106#define MS_TO_JIFFIES(j) ((j * HZ) / 1000) 87static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb);
107 88
108static ORC_SCB *orc_alloc_scb(ORC_HCS * hcsp); 89static struct orc_nvram nvram, *nvramp = &nvram;
109static void inia100SCBPost(BYTE * pHcb, BYTE * pScb);
110 90
111static NVRAM nvram, *nvramp = &nvram; 91static u8 default_nvram[64] =
112static UCHAR dftNvRam[64] =
113{ 92{
114/*----------header -------------*/ 93/*----------header -------------*/
115 0x01, /* 0x00: Sub System Vendor ID 0 */ 94 0x01, /* 0x00: Sub System Vendor ID 0 */
@@ -158,823 +137,882 @@ static UCHAR dftNvRam[64] =
158}; 137};
159 138
160 139
161/***************************************************************************/ 140static u8 wait_chip_ready(struct orc_host * host)
162static void waitForPause(unsigned amount)
163{
164 ULONG the_time = jiffies + MS_TO_JIFFIES(amount);
165 while (time_before_eq(jiffies, the_time))
166 cpu_relax();
167}
168
169/***************************************************************************/
170static UCHAR waitChipReady(ORC_HCS * hcsp)
171{ 141{
172 int i; 142 int i;
173 143
174 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ 144 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
175 if (ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */ 145 if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
176 return 1; 146 return 1;
177 waitForPause(100); /* wait 100ms before try again */ 147 mdelay(100);
178 } 148 }
179 return 0; 149 return 0;
180} 150}
181 151
182/***************************************************************************/ 152static u8 wait_firmware_ready(struct orc_host * host)
183static UCHAR waitFWReady(ORC_HCS * hcsp)
184{ 153{
185 int i; 154 int i;
186 155
187 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ 156 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
188 if (ORC_RD(hcsp->HCS_Base, ORC_HSTUS) & RREADY) /* Wait READY set */ 157 if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
189 return 1; 158 return 1;
190 waitForPause(100); /* wait 100ms before try again */ 159 mdelay(100); /* wait 100ms before try again */
191 } 160 }
192 return 0; 161 return 0;
193} 162}
194 163
195/***************************************************************************/ 164/***************************************************************************/
196static UCHAR waitSCSIRSTdone(ORC_HCS * hcsp) 165static u8 wait_scsi_reset_done(struct orc_host * host)
197{ 166{
198 int i; 167 int i;
199 168
200 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ 169 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
201 if (!(ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */ 170 if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
202 return 1; 171 return 1;
203 waitForPause(100); /* wait 100ms before try again */ 172 mdelay(100); /* wait 100ms before try again */
204 } 173 }
205 return 0; 174 return 0;
206} 175}
207 176
208/***************************************************************************/ 177/***************************************************************************/
209static UCHAR waitHDOoff(ORC_HCS * hcsp) 178static u8 wait_HDO_off(struct orc_host * host)
210{ 179{
211 int i; 180 int i;
212 181
213 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ 182 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
214 if (!(ORC_RD(hcsp->HCS_Base, ORC_HCTRL) & HDO)) /* Wait HDO off */ 183 if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */
215 return 1; 184 return 1;
216 waitForPause(100); /* wait 100ms before try again */ 185 mdelay(100); /* wait 100ms before try again */
217 } 186 }
218 return 0; 187 return 0;
219} 188}
220 189
221/***************************************************************************/ 190/***************************************************************************/
222static UCHAR waitHDIset(ORC_HCS * hcsp, UCHAR * pData) 191static u8 wait_hdi_set(struct orc_host * host, u8 * data)
223{ 192{
224 int i; 193 int i;
225 194
226 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ 195 for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
227 if ((*pData = ORC_RD(hcsp->HCS_Base, ORC_HSTUS)) & HDI) 196 if ((*data = inb(host->base + ORC_HSTUS)) & HDI)
228 return 1; /* Wait HDI set */ 197 return 1; /* Wait HDI set */
229 waitForPause(100); /* wait 100ms before try again */ 198 mdelay(100); /* wait 100ms before try again */
230 } 199 }
231 return 0; 200 return 0;
232} 201}
233 202
234/***************************************************************************/ 203/***************************************************************************/
235static unsigned short get_FW_version(ORC_HCS * hcsp) 204static unsigned short orc_read_fwrev(struct orc_host * host)
236{ 205{
237 UCHAR bData; 206 u16 version;
238 union { 207 u8 data;
239 unsigned short sVersion; 208
240 unsigned char cVersion[2]; 209 outb(ORC_CMD_VERSION, host->base + ORC_HDATA);
241 } Version; 210 outb(HDO, host->base + ORC_HCTRL);
242 211 if (wait_HDO_off(host) == 0) /* Wait HDO off */
243 ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_VERSION);
244 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO);
245 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */
246 return 0; 212 return 0;
247 213
248 if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */ 214 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
249 return 0; 215 return 0;
250 Version.cVersion[0] = ORC_RD(hcsp->HCS_Base, ORC_HDATA); 216 version = inb(host->base + ORC_HDATA);
251 ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */ 217 outb(data, host->base + ORC_HSTUS); /* Clear HDI */
252 218
253 if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */ 219 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
254 return 0; 220 return 0;
255 Version.cVersion[1] = ORC_RD(hcsp->HCS_Base, ORC_HDATA); 221 version |= inb(host->base + ORC_HDATA) << 8;
256 ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */ 222 outb(data, host->base + ORC_HSTUS); /* Clear HDI */
257 223
258 return (Version.sVersion); 224 return version;
259} 225}
260 226
261/***************************************************************************/ 227/***************************************************************************/
262static UCHAR set_NVRAM(ORC_HCS * hcsp, unsigned char address, unsigned char value) 228static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value)
263{ 229{
264 ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_SET_NVM); /* Write command */ 230 outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */
265 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 231 outb(HDO, host->base + ORC_HCTRL);
266 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 232 if (wait_HDO_off(host) == 0) /* Wait HDO off */
267 return 0; 233 return 0;
268 234
269 ORC_WR(hcsp->HCS_Base + ORC_HDATA, address); /* Write address */ 235 outb(address, host->base + ORC_HDATA); /* Write address */
270 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 236 outb(HDO, host->base + ORC_HCTRL);
271 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 237 if (wait_HDO_off(host) == 0) /* Wait HDO off */
272 return 0; 238 return 0;
273 239
274 ORC_WR(hcsp->HCS_Base + ORC_HDATA, value); /* Write value */ 240 outb(value, host->base + ORC_HDATA); /* Write value */
275 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 241 outb(HDO, host->base + ORC_HCTRL);
276 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 242 if (wait_HDO_off(host) == 0) /* Wait HDO off */
277 return 0; 243 return 0;
278 244
279 return 1; 245 return 1;
280} 246}
281 247
282/***************************************************************************/ 248/***************************************************************************/
283static UCHAR get_NVRAM(ORC_HCS * hcsp, unsigned char address, unsigned char *pDataIn) 249static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
284{ 250{
285 unsigned char bData; 251 unsigned char data;
286 252
287 ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_GET_NVM); /* Write command */ 253 outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */
288 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 254 outb(HDO, host->base + ORC_HCTRL);
289 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 255 if (wait_HDO_off(host) == 0) /* Wait HDO off */
290 return 0; 256 return 0;
291 257
292 ORC_WR(hcsp->HCS_Base + ORC_HDATA, address); /* Write address */ 258 outb(address, host->base + ORC_HDATA); /* Write address */
293 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 259 outb(HDO, host->base + ORC_HCTRL);
294 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 260 if (wait_HDO_off(host) == 0) /* Wait HDO off */
295 return 0; 261 return 0;
296 262
297 if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */ 263 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
298 return 0; 264 return 0;
299 *pDataIn = ORC_RD(hcsp->HCS_Base, ORC_HDATA); 265 *ptr = inb(host->base + ORC_HDATA);
300 ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */ 266 outb(data, host->base + ORC_HSTUS); /* Clear HDI */
301 267
302 return 1; 268 return 1;
269
303} 270}
304 271
305/***************************************************************************/ 272/**
306static void orc_exec_scb(ORC_HCS * hcsp, ORC_SCB * scbp) 273 * orc_exec_sb - Queue an SCB with the HA
274 * @host: host adapter the SCB belongs to
275 * @scb: SCB to queue for execution
276 */
277
278static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb)
307{ 279{
308 scbp->SCB_Status = ORCSCB_POST; 280 scb->status = ORCSCB_POST;
309 ORC_WR(hcsp->HCS_Base + ORC_PQUEUE, scbp->SCB_ScbIdx); 281 outb(scb->scbidx, host->base + ORC_PQUEUE);
310 return;
311} 282}
312 283
313 284
314/*********************************************************************** 285/**
315 Read SCSI H/A configuration parameters from serial EEPROM 286 * se2_rd_all - read SCSI parameters from EEPROM
316************************************************************************/ 287 * @host: Host whose EEPROM is being loaded
317static int se2_rd_all(ORC_HCS * hcsp) 288 *
289 * Read SCSI H/A configuration parameters from serial EEPROM
290 */
291
292static int se2_rd_all(struct orc_host * host)
318{ 293{
319 int i; 294 int i;
320 UCHAR *np, chksum = 0; 295 u8 *np, chksum = 0;
321 296
322 np = (UCHAR *) nvramp; 297 np = (u8 *) nvramp;
323 for (i = 0; i < 64; i++, np++) { /* <01> */ 298 for (i = 0; i < 64; i++, np++) { /* <01> */
324 if (get_NVRAM(hcsp, (unsigned char) i, np) == 0) 299 if (orc_nv_read(host, (u8) i, np) == 0)
325 return -1; 300 return -1;
326// *np++ = get_NVRAM(hcsp, (unsigned char ) i);
327 } 301 }
328 302
329/*------ Is ckecksum ok ? ------*/ 303 /*------ Is ckecksum ok ? ------*/
330 np = (UCHAR *) nvramp; 304 np = (u8 *) nvramp;
331 for (i = 0; i < 63; i++) 305 for (i = 0; i < 63; i++)
332 chksum += *np++; 306 chksum += *np++;
333 307
334 if (nvramp->CheckSum != (UCHAR) chksum) 308 if (nvramp->CheckSum != (u8) chksum)
335 return -1; 309 return -1;
336 return 1; 310 return 1;
337} 311}
338 312
339/************************************************************************ 313/**
340 Update SCSI H/A configuration parameters from serial EEPROM 314 * se2_update_all - update the EEPROM
341*************************************************************************/ 315 * @host: Host whose EEPROM is being updated
342static void se2_update_all(ORC_HCS * hcsp) 316 *
317 * Update changed bytes in the EEPROM image.
318 */
319
320static void se2_update_all(struct orc_host * host)
343{ /* setup default pattern */ 321{ /* setup default pattern */
344 int i; 322 int i;
345 UCHAR *np, *np1, chksum = 0; 323 u8 *np, *np1, chksum = 0;
346 324
347 /* Calculate checksum first */ 325 /* Calculate checksum first */
348 np = (UCHAR *) dftNvRam; 326 np = (u8 *) default_nvram;
349 for (i = 0; i < 63; i++) 327 for (i = 0; i < 63; i++)
350 chksum += *np++; 328 chksum += *np++;
351 *np = chksum; 329 *np = chksum;
352 330
353 np = (UCHAR *) dftNvRam; 331 np = (u8 *) default_nvram;
354 np1 = (UCHAR *) nvramp; 332 np1 = (u8 *) nvramp;
355 for (i = 0; i < 64; i++, np++, np1++) { 333 for (i = 0; i < 64; i++, np++, np1++) {
356 if (*np != *np1) { 334 if (*np != *np1)
357 set_NVRAM(hcsp, (unsigned char) i, *np); 335 orc_nv_write(host, (u8) i, *np);
358 }
359 } 336 }
360 return;
361} 337}
362 338
363/************************************************************************* 339/**
364 Function name : read_eeprom 340 * read_eeprom - load EEPROM
365**************************************************************************/ 341 * @host: Host EEPROM to read
366static void read_eeprom(ORC_HCS * hcsp) 342 *
343 * Read the EEPROM for a given host. If it is invalid or fails
344 * the restore the defaults and use them.
345 */
346
347static void read_eeprom(struct orc_host * host)
367{ 348{
368 if (se2_rd_all(hcsp) != 1) { 349 if (se2_rd_all(host) != 1) {
369 se2_update_all(hcsp); /* setup default pattern */ 350 se2_update_all(host); /* setup default pattern */
370 se2_rd_all(hcsp); /* load again */ 351 se2_rd_all(host); /* load again */
371 } 352 }
372} 353}
373 354
374 355
375/***************************************************************************/ 356/**
376static UCHAR load_FW(ORC_HCS * hcsp) 357 * orc_load_firmware - initialise firmware
358 * @host: Host to set up
359 *
360 * Load the firmware from the EEPROM into controller SRAM. This
361 * is basically a 4K block copy and then a 4K block read to check
362 * correctness. The rest is convulted by the indirect interfaces
363 * in the hardware
364 */
365
366static u8 orc_load_firmware(struct orc_host * host)
377{ 367{
378 U32 dData; 368 u32 data32;
379 USHORT wBIOSAddress; 369 u16 bios_addr;
380 USHORT i; 370 u16 i;
381 UCHAR *pData, bData; 371 u8 *data32_ptr, data;
382 372
383 373
384 bData = ORC_RD(hcsp->HCS_Base, ORC_GCFG); 374 /* Set up the EEPROM for access */
385 ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData | EEPRG); /* Enable EEPROM programming */ 375
386 ORC_WR(hcsp->HCS_Base + ORC_EBIOSADR2, 0x00); 376 data = inb(host->base + ORC_GCFG);
387 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x00); 377 outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */
388 if (ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA) != 0x55) { 378 outb(0x00, host->base + ORC_EBIOSADR2);
389 ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */ 379 outw(0x0000, host->base + ORC_EBIOSADR0);
380 if (inb(host->base + ORC_EBIOSDATA) != 0x55) {
381 outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
390 return 0; 382 return 0;
391 } 383 }
392 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x01); 384 outw(0x0001, host->base + ORC_EBIOSADR0);
393 if (ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA) != 0xAA) { 385 if (inb(host->base + ORC_EBIOSDATA) != 0xAA) {
394 ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */ 386 outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
395 return 0; 387 return 0;
396 } 388 }
397 ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST | DOWNLOAD); /* Enable SRAM programming */ 389
398 pData = (UCHAR *) & dData; 390 outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
399 dData = 0; /* Initial FW address to 0 */ 391 data32_ptr = (u8 *) & data32;
400 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x10); 392 data32 = 0; /* Initial FW address to 0 */
401 *pData = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */ 393 outw(0x0010, host->base + ORC_EBIOSADR0);
402 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x11); 394 *data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
403 *(pData + 1) = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */ 395 outw(0x0011, host->base + ORC_EBIOSADR0);
404 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, 0x12); 396 *(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
405 *(pData + 2) = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */ 397 outw(0x0012, host->base + ORC_EBIOSADR0);
406 ORC_WR(hcsp->HCS_Base + ORC_EBIOSADR2, *(pData + 2)); 398 *(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
407 ORC_WRLONG(hcsp->HCS_Base + ORC_FWBASEADR, dData); /* Write FW address */ 399 outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
408 400 outl(data32, host->base + ORC_FWBASEADR); /* Write FW address */
409 wBIOSAddress = (USHORT) dData; /* FW code locate at BIOS address + ? */ 401
410 for (i = 0, pData = (UCHAR *) & dData; /* Download the code */ 402 /* Copy the code from the BIOS to the SRAM */
403
404 bios_addr = (u16) data32; /* FW code locate at BIOS address + ? */
405 for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
411 i < 0x1000; /* Firmware code size = 4K */ 406 i < 0x1000; /* Firmware code size = 4K */
412 i++, wBIOSAddress++) { 407 i++, bios_addr++) {
413 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, wBIOSAddress); 408 outw(bios_addr, host->base + ORC_EBIOSADR0);
414 *pData++ = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */ 409 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
415 if ((i % 4) == 3) { 410 if ((i % 4) == 3) {
416 ORC_WRLONG(hcsp->HCS_Base + ORC_RISCRAM, dData); /* Write every 4 bytes */ 411 outl(data32, host->base + ORC_RISCRAM); /* Write every 4 bytes */
417 pData = (UCHAR *) & dData; 412 data32_ptr = (u8 *) & data32;
418 } 413 }
419 } 414 }
420 415
421 ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST | DOWNLOAD); /* Reset program count 0 */ 416 /* Go back and check they match */
422 wBIOSAddress -= 0x1000; /* Reset the BIOS adddress */ 417
423 for (i = 0, pData = (UCHAR *) & dData; /* Check the code */ 418 outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
419 bios_addr -= 0x1000; /* Reset the BIOS adddress */
420 for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
424 i < 0x1000; /* Firmware code size = 4K */ 421 i < 0x1000; /* Firmware code size = 4K */
425 i++, wBIOSAddress++) { 422 i++, bios_addr++) {
426 ORC_WRSHORT(hcsp->HCS_Base + ORC_EBIOSADR0, wBIOSAddress); 423 outw(bios_addr, host->base + ORC_EBIOSADR0);
427 *pData++ = ORC_RD(hcsp->HCS_Base, ORC_EBIOSDATA); /* Read from BIOS */ 424 *data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
428 if ((i % 4) == 3) { 425 if ((i % 4) == 3) {
429 if (ORC_RDLONG(hcsp->HCS_Base, ORC_RISCRAM) != dData) { 426 if (inl(host->base + ORC_RISCRAM) != data32) {
430 ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST); /* Reset program to 0 */ 427 outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
431 ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /*Disable EEPROM programming */ 428 outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
432 return 0; 429 return 0;
433 } 430 }
434 pData = (UCHAR *) & dData; 431 data32_ptr = (u8 *) & data32;
435 } 432 }
436 } 433 }
437 ORC_WR(hcsp->HCS_Base + ORC_RISCCTL, PRGMRST); /* Reset program to 0 */ 434
438 ORC_WR(hcsp->HCS_Base + ORC_GCFG, bData); /* Disable EEPROM programming */ 435 /* Success */
436 outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
437 outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
439 return 1; 438 return 1;
440} 439}
441 440
442/***************************************************************************/ 441/***************************************************************************/
443static void setup_SCBs(ORC_HCS * hcsp) 442static void setup_SCBs(struct orc_host * host)
444{ 443{
445 ORC_SCB *pVirScb; 444 struct orc_scb *scb;
446 int i; 445 int i;
447 ESCB *pVirEscb; 446 struct orc_extended_scb *escb;
448 dma_addr_t pPhysEscb; 447 dma_addr_t escb_phys;
449 448
450 /* Setup SCB HCS_Base and SCB Size registers */ 449 /* Setup SCB base and SCB Size registers */
451 ORC_WR(hcsp->HCS_Base + ORC_SCBSIZE, ORC_MAXQUEUE); /* Total number of SCBs */ 450 outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */
452 /* SCB HCS_Base address 0 */ 451 /* SCB base address 0 */
453 ORC_WRLONG(hcsp->HCS_Base + ORC_SCBBASE0, hcsp->HCS_physScbArray); 452 outl(host->scb_phys, host->base + ORC_SCBBASE0);
454 /* SCB HCS_Base address 1 */ 453 /* SCB base address 1 */
455 ORC_WRLONG(hcsp->HCS_Base + ORC_SCBBASE1, hcsp->HCS_physScbArray); 454 outl(host->scb_phys, host->base + ORC_SCBBASE1);
456 455
457 /* setup scatter list address with one buffer */ 456 /* setup scatter list address with one buffer */
458 pVirScb = hcsp->HCS_virScbArray; 457 scb = host->scb_virt;
459 pVirEscb = hcsp->HCS_virEscbArray; 458 escb = host->escb_virt;
460 459
461 for (i = 0; i < ORC_MAXQUEUE; i++) { 460 for (i = 0; i < ORC_MAXQUEUE; i++) {
462 pPhysEscb = (hcsp->HCS_physEscbArray + (sizeof(ESCB) * i)); 461 escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
463 pVirScb->SCB_SGPAddr = (U32) pPhysEscb; 462 scb->sg_addr = (u32) escb_phys;
464 pVirScb->SCB_SensePAddr = (U32) pPhysEscb; 463 scb->sense_addr = (u32) escb_phys;
465 pVirScb->SCB_EScb = pVirEscb; 464 scb->escb = escb;
466 pVirScb->SCB_ScbIdx = i; 465 scb->scbidx = i;
467 pVirScb++; 466 scb++;
468 pVirEscb++; 467 escb++;
469 } 468 }
470
471 return;
472} 469}
473 470
474/***************************************************************************/ 471/**
475static void initAFlag(ORC_HCS * hcsp) 472 * init_alloc_map - initialise allocation map
473 * @host: host map to configure
474 *
475 * Initialise the allocation maps for this device. If the device
476 * is not quiescent the caller must hold the allocation lock
477 */
478
479static void init_alloc_map(struct orc_host * host)
476{ 480{
477 UCHAR i, j; 481 u8 i, j;
478 482
479 for (i = 0; i < MAX_CHANNELS; i++) { 483 for (i = 0; i < MAX_CHANNELS; i++) {
480 for (j = 0; j < 8; j++) { 484 for (j = 0; j < 8; j++) {
481 hcsp->BitAllocFlag[i][j] = 0xffffffff; 485 host->allocation_map[i][j] = 0xffffffff;
482 } 486 }
483 } 487 }
484} 488}
485 489
486/***************************************************************************/ 490/**
487static int init_orchid(ORC_HCS * hcsp) 491 * init_orchid - initialise the host adapter
492 * @host:host adapter to initialise
493 *
494 * Initialise the controller and if neccessary load the firmware.
495 *
496 * Returns -1 if the initialisation fails.
497 */
498
499static int init_orchid(struct orc_host * host)
488{ 500{
489 UBYTE *readBytep; 501 u8 *ptr;
490 USHORT revision; 502 u16 revision;
491 UCHAR i; 503 u8 i;
492 504
493 initAFlag(hcsp); 505 init_alloc_map(host);
494 ORC_WR(hcsp->HCS_Base + ORC_GIMSK, 0xFF); /* Disable all interrupt */ 506 outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */
495 if (ORC_RD(hcsp->HCS_Base, ORC_HSTUS) & RREADY) { /* Orchid is ready */ 507
496 revision = get_FW_version(hcsp); 508 if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */
509 revision = orc_read_fwrev(host);
497 if (revision == 0xFFFF) { 510 if (revision == 0xFFFF) {
498 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, DEVRST); /* Reset Host Adapter */ 511 outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
499 if (waitChipReady(hcsp) == 0) 512 if (wait_chip_ready(host) == 0)
500 return (-1); 513 return -1;
501 load_FW(hcsp); /* Download FW */ 514 orc_load_firmware(host); /* Download FW */
502 setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */ 515 setup_SCBs(host); /* Setup SCB base and SCB Size registers */
503 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, 0); /* clear HOSTSTOP */ 516 outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */
504 if (waitFWReady(hcsp) == 0) 517 if (wait_firmware_ready(host) == 0)
505 return (-1); 518 return -1;
506 /* Wait for firmware ready */ 519 /* Wait for firmware ready */
507 } else { 520 } else {
508 setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */ 521 setup_SCBs(host); /* Setup SCB base and SCB Size registers */
509 } 522 }
510 } else { /* Orchid is not Ready */ 523 } else { /* Orchid is not Ready */
511 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, DEVRST); /* Reset Host Adapter */ 524 outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
512 if (waitChipReady(hcsp) == 0) 525 if (wait_chip_ready(host) == 0)
513 return (-1); 526 return -1;
514 load_FW(hcsp); /* Download FW */ 527 orc_load_firmware(host); /* Download FW */
515 setup_SCBs(hcsp); /* Setup SCB HCS_Base and SCB Size registers */ 528 setup_SCBs(host); /* Setup SCB base and SCB Size registers */
516 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); /* Do Hardware Reset & */ 529 outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */
517 530
518 /* clear HOSTSTOP */ 531 /* clear HOSTSTOP */
519 if (waitFWReady(hcsp) == 0) /* Wait for firmware ready */ 532 if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */
520 return (-1); 533 return -1;
521 } 534 }
522 535
523/*------------- get serial EEProm settting -------*/ 536 /* Load an EEProm copy into RAM */
524 537 /* Assumes single threaded at this point */
525 read_eeprom(hcsp); 538 read_eeprom(host);
526
527 if (nvramp->Revision != 1)
528 return (-1);
529 539
530 hcsp->HCS_SCSI_ID = nvramp->SCSI0Id; 540 if (nvramp->revision != 1)
531 hcsp->HCS_BIOS = nvramp->BIOSConfig1; 541 return -1;
532 hcsp->HCS_MaxTar = MAX_TARGETS;
533 readBytep = (UCHAR *) & (nvramp->Target00Config);
534 for (i = 0; i < 16; readBytep++, i++) {
535 hcsp->TargetFlag[i] = *readBytep;
536 hcsp->MaximumTags[i] = ORC_MAXTAGS;
537 } /* for */
538 542
539 if (nvramp->SCSI0Config & NCC_BUSRESET) { /* Reset SCSI bus */ 543 host->scsi_id = nvramp->scsi_id;
540 hcsp->HCS_Flags |= HCF_SCSI_RESET; 544 host->BIOScfg = nvramp->BIOSConfig1;
545 host->max_targets = MAX_TARGETS;
546 ptr = (u8 *) & (nvramp->Target00Config);
547 for (i = 0; i < 16; ptr++, i++) {
548 host->target_flag[i] = *ptr;
549 host->max_tags[i] = ORC_MAXTAGS;
541 } 550 }
542 ORC_WR(hcsp->HCS_Base + ORC_GIMSK, 0xFB); /* enable RP FIFO interrupt */ 551
543 return (0); 552 if (nvramp->SCSI0Config & NCC_BUSRESET)
553 host->flags |= HCF_SCSI_RESET;
554 outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */
555 return 0;
544} 556}
545 557
546/***************************************************************************** 558/**
547 Function name : orc_reset_scsi_bus 559 * orc_reset_scsi_bus - perform bus reset
548 Description : Reset registers, reset a hanging bus and 560 * @host: host being reset
549 kill active and disconnected commands for target w/o soft reset 561 *
550 Input : pHCB - Pointer to host adapter structure 562 * Perform a full bus reset on the adapter.
551 Output : None. 563 */
552 Return : pSRB - Pointer to SCSI request block. 564
553*****************************************************************************/ 565static int orc_reset_scsi_bus(struct orc_host * host)
554static int orc_reset_scsi_bus(ORC_HCS * pHCB)
555{ /* I need Host Control Block Information */ 566{ /* I need Host Control Block Information */
556 ULONG flags; 567 unsigned long flags;
557 568
558 spin_lock_irqsave(&(pHCB->BitAllocFlagLock), flags); 569 spin_lock_irqsave(&host->allocation_lock, flags);
559 570
560 initAFlag(pHCB); 571 init_alloc_map(host);
561 /* reset scsi bus */ 572 /* reset scsi bus */
562 ORC_WR(pHCB->HCS_Base + ORC_HCTRL, SCSIRST); 573 outb(SCSIRST, host->base + ORC_HCTRL);
563 if (waitSCSIRSTdone(pHCB) == 0) { 574 /* FIXME: We can spend up to a second with the lock held and
564 spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags); 575 interrupts off here */
576 if (wait_scsi_reset_done(host) == 0) {
577 spin_unlock_irqrestore(&host->allocation_lock, flags);
565 return FAILED; 578 return FAILED;
566 } else { 579 } else {
567 spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags); 580 spin_unlock_irqrestore(&host->allocation_lock, flags);
568 return SUCCESS; 581 return SUCCESS;
569 } 582 }
570} 583}
571 584
572/***************************************************************************** 585/**
573 Function name : orc_device_reset 586 * orc_device_reset - device reset handler
574 Description : Reset registers, reset a hanging bus and 587 * @host: host to reset
575 kill active and disconnected commands for target w/o soft reset 588 * @cmd: command causing the reset
576 Input : pHCB - Pointer to host adapter structure 589 * @target; target device
577 Output : None. 590 *
578 Return : pSRB - Pointer to SCSI request block. 591 * Reset registers, reset a hanging bus and kill active and disconnected
579*****************************************************************************/ 592 * commands for target w/o soft reset
580static int orc_device_reset(ORC_HCS * pHCB, struct scsi_cmnd *SCpnt, unsigned int target) 593 */
594
595static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target)
581{ /* I need Host Control Block Information */ 596{ /* I need Host Control Block Information */
582 ORC_SCB *pScb; 597 struct orc_scb *scb;
583 ESCB *pVirEscb; 598 struct orc_extended_scb *escb;
584 ORC_SCB *pVirScb; 599 struct orc_scb *host_scb;
585 UCHAR i; 600 u8 i;
586 ULONG flags; 601 unsigned long flags;
587 602
588 spin_lock_irqsave(&(pHCB->BitAllocFlagLock), flags); 603 spin_lock_irqsave(&(host->allocation_lock), flags);
589 pScb = (ORC_SCB *) NULL; 604 scb = (struct orc_scb *) NULL;
590 pVirEscb = (ESCB *) NULL; 605 escb = (struct orc_extended_scb *) NULL;
591 606
592 /* setup scatter list address with one buffer */ 607 /* setup scatter list address with one buffer */
593 pVirScb = pHCB->HCS_virScbArray; 608 host_scb = host->scb_virt;
609
610 /* FIXME: is this safe if we then fail to issue the reset or race
611 a completion ? */
612 init_alloc_map(host);
594 613
595 initAFlag(pHCB); 614 /* Find the scb corresponding to the command */
596 /* device reset */
597 for (i = 0; i < ORC_MAXQUEUE; i++) { 615 for (i = 0; i < ORC_MAXQUEUE; i++) {
598 pVirEscb = pVirScb->SCB_EScb; 616 escb = host_scb->escb;
599 if ((pVirScb->SCB_Status) && (pVirEscb->SCB_Srb == SCpnt)) 617 if (host_scb->status && escb->srb == cmd)
600 break; 618 break;
601 pVirScb++; 619 host_scb++;
602 } 620 }
603 621
604 if (i == ORC_MAXQUEUE) { 622 if (i == ORC_MAXQUEUE) {
605 printk("Unable to Reset - No SCB Found\n"); 623 printk(KERN_ERR "Unable to Reset - No SCB Found\n");
606 spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags); 624 spin_unlock_irqrestore(&(host->allocation_lock), flags);
607 return FAILED; 625 return FAILED;
608 } 626 }
609 if ((pScb = orc_alloc_scb(pHCB)) == NULL) { 627
610 spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags); 628 /* Allocate a new SCB for the reset command to the firmware */
629 if ((scb = __orc_alloc_scb(host)) == NULL) {
630 /* Can't happen.. */
631 spin_unlock_irqrestore(&(host->allocation_lock), flags);
611 return FAILED; 632 return FAILED;
612 } 633 }
613 pScb->SCB_Opcode = ORC_BUSDEVRST; 634
614 pScb->SCB_Target = target; 635 /* Reset device is handled by the firmare, we fill in an SCB and
615 pScb->SCB_HaStat = 0; 636 fire it at the controller, it does the rest */
616 pScb->SCB_TaStat = 0; 637 scb->opcode = ORC_BUSDEVRST;
617 pScb->SCB_Status = 0x0; 638 scb->target = target;
618 pScb->SCB_Link = 0xFF; 639 scb->hastat = 0;
619 pScb->SCB_Reserved0 = 0; 640 scb->tastat = 0;
620 pScb->SCB_Reserved1 = 0; 641 scb->status = 0x0;
621 pScb->SCB_XferLen = 0; 642 scb->link = 0xFF;
622 pScb->SCB_SGLen = 0; 643 scb->reserved0 = 0;
623 644 scb->reserved1 = 0;
624 pVirEscb->SCB_Srb = NULL; 645 scb->xferlen = 0;
625 pVirEscb->SCB_Srb = SCpnt; 646 scb->sg_len = 0;
626 orc_exec_scb(pHCB, pScb); /* Start execute SCB */ 647
627 spin_unlock_irqrestore(&(pHCB->BitAllocFlagLock), flags); 648 escb->srb = NULL;
649 escb->srb = cmd;
650 orc_exec_scb(host, scb); /* Start execute SCB */
651 spin_unlock_irqrestore(&host->allocation_lock, flags);
628 return SUCCESS; 652 return SUCCESS;
629} 653}
630 654
655/**
656 * __orc_alloc_scb - allocate an SCB
657 * @host: host to allocate from
658 *
659 * Allocate an SCB and return a pointer to the SCB object. NULL
660 * is returned if no SCB is free. The caller must already hold
661 * the allocator lock at this point.
662 */
631 663
632/***************************************************************************/ 664
633static ORC_SCB *__orc_alloc_scb(ORC_HCS * hcsp) 665static struct orc_scb *__orc_alloc_scb(struct orc_host * host)
634{ 666{
635 ORC_SCB *pTmpScb; 667 u8 channel;
636 UCHAR Ch; 668 unsigned long idx;
637 ULONG idx; 669 u8 index;
638 UCHAR index; 670 u8 i;
639 UCHAR i;
640 671
641 Ch = hcsp->HCS_Index; 672 channel = host->index;
642 for (i = 0; i < 8; i++) { 673 for (i = 0; i < 8; i++) {
643 for (index = 0; index < 32; index++) { 674 for (index = 0; index < 32; index++) {
644 if ((hcsp->BitAllocFlag[Ch][i] >> index) & 0x01) { 675 if ((host->allocation_map[channel][i] >> index) & 0x01) {
645 hcsp->BitAllocFlag[Ch][i] &= ~(1 << index); 676 host->allocation_map[channel][i] &= ~(1 << index);
646 break; 677 break;
647 } 678 }
648 } 679 }
649 idx = index + 32 * i; 680 idx = index + 32 * i;
650 pTmpScb = (ORC_SCB *) ((ULONG) hcsp->HCS_virScbArray + (idx * sizeof(ORC_SCB))); 681 /* Translate the index to a structure instance */
651 return (pTmpScb); 682 return (struct orc_scb *) ((unsigned long) host->scb_virt + (idx * sizeof(struct orc_scb)));
652 } 683 }
653 return (NULL); 684 return NULL;
654} 685}
655 686
656static ORC_SCB *orc_alloc_scb(ORC_HCS * hcsp) 687/**
688 * orc_alloc_scb - allocate an SCB
689 * @host: host to allocate from
690 *
691 * Allocate an SCB and return a pointer to the SCB object. NULL
692 * is returned if no SCB is free.
693 */
694
695static struct orc_scb *orc_alloc_scb(struct orc_host * host)
657{ 696{
658 ORC_SCB *pTmpScb; 697 struct orc_scb *scb;
659 ULONG flags; 698 unsigned long flags;
660 699
661 spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags); 700 spin_lock_irqsave(&host->allocation_lock, flags);
662 pTmpScb = __orc_alloc_scb(hcsp); 701 scb = __orc_alloc_scb(host);
663 spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags); 702 spin_unlock_irqrestore(&host->allocation_lock, flags);
664 return (pTmpScb); 703 return scb;
665} 704}
666 705
706/**
707 * orc_release_scb - release an SCB
708 * @host: host owning the SCB
709 * @scb: SCB that is now free
710 *
711 * Called to return a completed SCB to the allocation pool. Before
712 * calling the SCB must be out of use on both the host and the HA.
713 */
667 714
668/***************************************************************************/ 715static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
669static void orc_release_scb(ORC_HCS * hcsp, ORC_SCB * scbp)
670{ 716{
671 ULONG flags; 717 unsigned long flags;
672 UCHAR Index; 718 u8 index, i, channel;
673 UCHAR i; 719
674 UCHAR Ch; 720 spin_lock_irqsave(&(host->allocation_lock), flags);
675 721 channel = host->index; /* Channel */
676 spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags); 722 index = scb->scbidx;
677 Ch = hcsp->HCS_Index; 723 i = index / 32;
678 Index = scbp->SCB_ScbIdx; 724 index %= 32;
679 i = Index / 32; 725 host->allocation_map[channel][i] |= (1 << index);
680 Index %= 32; 726 spin_unlock_irqrestore(&(host->allocation_lock), flags);
681 hcsp->BitAllocFlag[Ch][i] |= (1 << Index);
682 spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags);
683} 727}
684 728
685/***************************************************************************** 729/**
686 Function name : abort_SCB 730 * orchid_abort_scb - abort a command
687 Description : Abort a queued command. 731 *
688 (commands that are on the bus can't be aborted easily) 732 * Abort a queued command that has been passed to the firmware layer
689 Input : pHCB - Pointer to host adapter structure 733 * if possible. This is all handled by the firmware. We aks the firmware
690 Output : None. 734 * and it either aborts the command or fails
691 Return : pSRB - Pointer to SCSI request block. 735 */
692*****************************************************************************/ 736
693static int abort_SCB(ORC_HCS * hcsp, ORC_SCB * pScb) 737static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb)
694{ 738{
695 unsigned char bData, bStatus; 739 unsigned char data, status;
696 740
697 ORC_WR(hcsp->HCS_Base + ORC_HDATA, ORC_CMD_ABORT_SCB); /* Write command */ 741 outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */
698 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 742 outb(HDO, host->base + ORC_HCTRL);
699 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 743 if (wait_HDO_off(host) == 0) /* Wait HDO off */
700 return 0; 744 return 0;
701 745
702 ORC_WR(hcsp->HCS_Base + ORC_HDATA, pScb->SCB_ScbIdx); /* Write address */ 746 outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */
703 ORC_WR(hcsp->HCS_Base + ORC_HCTRL, HDO); 747 outb(HDO, host->base + ORC_HCTRL);
704 if (waitHDOoff(hcsp) == 0) /* Wait HDO off */ 748 if (wait_HDO_off(host) == 0) /* Wait HDO off */
705 return 0; 749 return 0;
706 750
707 if (waitHDIset(hcsp, &bData) == 0) /* Wait HDI set */ 751 if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
708 return 0; 752 return 0;
709 bStatus = ORC_RD(hcsp->HCS_Base, ORC_HDATA); 753 status = inb(host->base + ORC_HDATA);
710 ORC_WR(hcsp->HCS_Base + ORC_HSTUS, bData); /* Clear HDI */ 754 outb(data, host->base + ORC_HSTUS); /* Clear HDI */
711 755
712 if (bStatus == 1) /* 0 - Successfully */ 756 if (status == 1) /* 0 - Successfully */
713 return 0; /* 1 - Fail */ 757 return 0; /* 1 - Fail */
714 return 1; 758 return 1;
715} 759}
716 760
717/***************************************************************************** 761static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd)
718 Function name : inia100_abort
719 Description : Abort a queued command.
720 (commands that are on the bus can't be aborted easily)
721 Input : pHCB - Pointer to host adapter structure
722 Output : None.
723 Return : pSRB - Pointer to SCSI request block.
724*****************************************************************************/
725static int orc_abort_srb(ORC_HCS * hcsp, struct scsi_cmnd *SCpnt)
726{ 762{
727 ESCB *pVirEscb; 763 struct orc_extended_scb *escb;
728 ORC_SCB *pVirScb; 764 struct orc_scb *scb;
729 UCHAR i; 765 u8 i;
730 ULONG flags; 766 unsigned long flags;
731 767
732 spin_lock_irqsave(&(hcsp->BitAllocFlagLock), flags); 768 spin_lock_irqsave(&(host->allocation_lock), flags);
733 769
734 pVirScb = hcsp->HCS_virScbArray; 770 scb = host->scb_virt;
735 771
736 for (i = 0; i < ORC_MAXQUEUE; i++, pVirScb++) { 772 /* Walk the queue until we find the SCB that belongs to the command
737 pVirEscb = pVirScb->SCB_EScb; 773 block. This isn't a performance critical path so a walk in the park
738 if ((pVirScb->SCB_Status) && (pVirEscb->SCB_Srb == SCpnt)) { 774 here does no harm */
739 if (pVirScb->SCB_TagMsg == 0) { 775
740 spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags); 776 for (i = 0; i < ORC_MAXQUEUE; i++, scb++) {
741 return FAILED; 777 escb = scb->escb;
778 if (scb->status && escb->srb == cmd) {
779 if (scb->tag_msg == 0) {
780 goto out;
742 } else { 781 } else {
743 if (abort_SCB(hcsp, pVirScb)) { 782 /* Issue an ABORT to the firmware */
744 pVirEscb->SCB_Srb = NULL; 783 if (orchid_abort_scb(host, scb)) {
745 spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags); 784 escb->srb = NULL;
785 spin_unlock_irqrestore(&host->allocation_lock, flags);
746 return SUCCESS; 786 return SUCCESS;
747 } else { 787 } else
748 spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags); 788 goto out;
749 return FAILED;
750 }
751 } 789 }
752 } 790 }
753 } 791 }
754 spin_unlock_irqrestore(&(hcsp->BitAllocFlagLock), flags); 792out:
793 spin_unlock_irqrestore(&host->allocation_lock, flags);
755 return FAILED; 794 return FAILED;
756} 795}
757 796
758/*********************************************************************** 797/**
759 Routine Description: 798 * orc_interrupt - IRQ processing
760 This is the interrupt service routine for the Orchid SCSI adapter. 799 * @host: Host causing the interrupt
761 It reads the interrupt register to determine if the adapter is indeed 800 *
762 the source of the interrupt and clears the interrupt at the device. 801 * This function is called from the IRQ handler and protected
763 Arguments: 802 * by the host lock. While the controller reports that there are
764 HwDeviceExtension - HBA miniport driver's adapter data storage 803 * scb's for processing we pull them off the controller, turn the
765 Return Value: 804 * index into a host address pointer to the scb and call the scb
766***********************************************************************/ 805 * handler.
767static void orc_interrupt( 806 *
768 ORC_HCS * hcsp 807 * Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise
769) 808 */
809
810static irqreturn_t orc_interrupt(struct orc_host * host)
770{ 811{
771 BYTE bScbIdx; 812 u8 scb_index;
772 ORC_SCB *pScb; 813 struct orc_scb *scb;
773 814
774 if (ORC_RD(hcsp->HCS_Base, ORC_RQUEUECNT) == 0) { 815 /* Check if we have an SCB queued for servicing */
775 return; // 0; 816 if (inb(host->base + ORC_RQUEUECNT) == 0)
817 return IRQ_NONE;
776 818
777 }
778 do { 819 do {
779 bScbIdx = ORC_RD(hcsp->HCS_Base, ORC_RQUEUE); 820 /* Get the SCB index of the SCB to service */
780 821 scb_index = inb(host->base + ORC_RQUEUE);
781 pScb = (ORC_SCB *) ((ULONG) hcsp->HCS_virScbArray + (ULONG) (sizeof(ORC_SCB) * bScbIdx)); 822
782 pScb->SCB_Status = 0x0; 823 /* Translate it back to a host pointer */
783 824 scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index));
784 inia100SCBPost((BYTE *) hcsp, (BYTE *) pScb); 825 scb->status = 0x0;
785 } while (ORC_RD(hcsp->HCS_Base, ORC_RQUEUECNT)); 826 /* Process the SCB */
786 return; //1; 827 inia100_scb_handler(host, scb);
787 828 } while (inb(host->base + ORC_RQUEUECNT));
829 return IRQ_HANDLED;
788} /* End of I1060Interrupt() */ 830} /* End of I1060Interrupt() */
789 831
790/***************************************************************************** 832/**
791 Function name : inia100BuildSCB 833 * inia100_build_scb - build SCB
792 Description : 834 * @host: host owing the control block
793 Input : pHCB - Pointer to host adapter structure 835 * @scb: control block to use
794 Output : None. 836 * @cmd: Mid layer command
795 Return : pSRB - Pointer to SCSI request block. 837 *
796*****************************************************************************/ 838 * Build a host adapter control block from the SCSI mid layer command
797static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * SCpnt) 839 */
840
841static void inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
798{ /* Create corresponding SCB */ 842{ /* Create corresponding SCB */
799 struct scatterlist *pSrbSG; 843 struct scatterlist *sg;
800 ORC_SG *pSG; /* Pointer to SG list */ 844 struct orc_sgent *sgent; /* Pointer to SG list */
801 int i, count_sg; 845 int i, count_sg;
802 ESCB *pEScb; 846 struct orc_extended_scb *escb;
803 847
804 pEScb = pSCB->SCB_EScb; 848 /* Links between the escb, scb and Linux scsi midlayer cmd */
805 pEScb->SCB_Srb = SCpnt; 849 escb = scb->escb;
806 pSG = NULL; 850 escb->srb = cmd;
807 851 sgent = NULL;
808 pSCB->SCB_Opcode = ORC_EXECSCSI; 852
809 pSCB->SCB_Flags = SCF_NO_DCHK; /* Clear done bit */ 853 /* Set up the SCB to do a SCSI command block */
810 pSCB->SCB_Target = SCpnt->device->id; 854 scb->opcode = ORC_EXECSCSI;
811 pSCB->SCB_Lun = SCpnt->device->lun; 855 scb->flags = SCF_NO_DCHK; /* Clear done bit */
812 pSCB->SCB_Reserved0 = 0; 856 scb->target = cmd->device->id;
813 pSCB->SCB_Reserved1 = 0; 857 scb->lun = cmd->device->lun;
814 pSCB->SCB_SGLen = 0; 858 scb->reserved0 = 0;
815 859 scb->reserved1 = 0;
816 if ((pSCB->SCB_XferLen = (U32) SCpnt->request_bufflen)) { 860 scb->sg_len = 0;
817 pSG = (ORC_SG *) & pEScb->ESCB_SGList[0]; 861
818 if (SCpnt->use_sg) { 862 scb->xferlen = (u32) scsi_bufflen(cmd);
819 pSrbSG = (struct scatterlist *) SCpnt->request_buffer; 863 sgent = (struct orc_sgent *) & escb->sglist[0];
820 count_sg = pci_map_sg(pHCB->pdev, pSrbSG, SCpnt->use_sg, 864
821 SCpnt->sc_data_direction); 865 count_sg = scsi_dma_map(cmd);
822 pSCB->SCB_SGLen = (U32) (count_sg * 8); 866 BUG_ON(count_sg < 0);
823 for (i = 0; i < count_sg; i++, pSG++, pSrbSG++) { 867
824 pSG->SG_Ptr = (U32) sg_dma_address(pSrbSG); 868 /* Build the scatter gather lists */
825 pSG->SG_Len = (U32) sg_dma_len(pSrbSG); 869 if (count_sg) {
826 } 870 scb->sg_len = (u32) (count_sg * 8);
827 } else if (SCpnt->request_bufflen != 0) {/* Non SG */ 871 scsi_for_each_sg(cmd, sg, count_sg, i) {
828 pSCB->SCB_SGLen = 0x8; 872 sgent->base = (u32) sg_dma_address(sg);
829 SCpnt->SCp.dma_handle = pci_map_single(pHCB->pdev, 873 sgent->length = (u32) sg_dma_len(sg);
830 SCpnt->request_buffer, 874 sgent++;
831 SCpnt->request_bufflen,
832 SCpnt->sc_data_direction);
833 pSG->SG_Ptr = (U32) SCpnt->SCp.dma_handle;
834 pSG->SG_Len = (U32) SCpnt->request_bufflen;
835 } else {
836 pSCB->SCB_SGLen = 0;
837 pSG->SG_Ptr = 0;
838 pSG->SG_Len = 0;
839 } 875 }
876 } else {
877 scb->sg_len = 0;
878 sgent->base = 0;
879 sgent->length = 0;
840 } 880 }
841 pSCB->SCB_SGPAddr = (U32) pSCB->SCB_SensePAddr; 881 scb->sg_addr = (u32) scb->sense_addr;
842 pSCB->SCB_HaStat = 0; 882 scb->hastat = 0;
843 pSCB->SCB_TaStat = 0; 883 scb->tastat = 0;
844 pSCB->SCB_Link = 0xFF; 884 scb->link = 0xFF;
845 pSCB->SCB_SenseLen = SENSE_SIZE; 885 scb->sense_len = SENSE_SIZE;
846 pSCB->SCB_CDBLen = SCpnt->cmd_len; 886 scb->cdb_len = cmd->cmd_len;
847 if (pSCB->SCB_CDBLen >= IMAX_CDB) { 887 if (scb->cdb_len >= IMAX_CDB) {
848 printk("max cdb length= %x\b", SCpnt->cmd_len); 888 printk("max cdb length= %x\b", cmd->cmd_len);
849 pSCB->SCB_CDBLen = IMAX_CDB; 889 scb->cdb_len = IMAX_CDB;
850 } 890 }
851 pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW; 891 scb->ident = cmd->device->lun | DISC_ALLOW;
852 if (SCpnt->device->tagged_supported) { /* Tag Support */ 892 if (cmd->device->tagged_supported) { /* Tag Support */
853 pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ 893 scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
854 } else { 894 } else {
855 pSCB->SCB_TagMsg = 0; /* No tag support */ 895 scb->tag_msg = 0; /* No tag support */
856 } 896 }
857 memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, pSCB->SCB_CDBLen); 897 memcpy(&scb->cdb[0], &cmd->cmnd, scb->cdb_len);
858 return;
859} 898}
860 899
861/***************************************************************************** 900/**
862 Function name : inia100_queue 901 * inia100_queue - queue command with host
863 Description : Queue a command and setup interrupts for a free bus. 902 * @cmd: Command block
864 Input : pHCB - Pointer to host adapter structure 903 * @done: Completion function
865 Output : None. 904 *
866 Return : pSRB - Pointer to SCSI request block. 905 * Called by the mid layer to queue a command. Process the command
867*****************************************************************************/ 906 * block, build the host specific scb structures and if there is room
868static int inia100_queue(struct scsi_cmnd * SCpnt, void (*done) (struct scsi_cmnd *)) 907 * queue the command down to the controller
908 */
909
910static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
869{ 911{
870 register ORC_SCB *pSCB; 912 struct orc_scb *scb;
871 ORC_HCS *pHCB; /* Point to Host adapter control block */ 913 struct orc_host *host; /* Point to Host adapter control block */
872 914
873 pHCB = (ORC_HCS *) SCpnt->device->host->hostdata; 915 host = (struct orc_host *) cmd->device->host->hostdata;
874 SCpnt->scsi_done = done; 916 cmd->scsi_done = done;
875 /* Get free SCSI control block */ 917 /* Get free SCSI control block */
876 if ((pSCB = orc_alloc_scb(pHCB)) == NULL) 918 if ((scb = orc_alloc_scb(host)) == NULL)
877 return SCSI_MLQUEUE_HOST_BUSY; 919 return SCSI_MLQUEUE_HOST_BUSY;
878 920
879 inia100BuildSCB(pHCB, pSCB, SCpnt); 921 inia100_build_scb(host, scb, cmd);
880 orc_exec_scb(pHCB, pSCB); /* Start execute SCB */ 922 orc_exec_scb(host, scb); /* Start execute SCB */
881 923 return 0;
882 return (0);
883} 924}
884 925
885/***************************************************************************** 926/*****************************************************************************
886 Function name : inia100_abort 927 Function name : inia100_abort
887 Description : Abort a queued command. 928 Description : Abort a queued command.
888 (commands that are on the bus can't be aborted easily) 929 (commands that are on the bus can't be aborted easily)
889 Input : pHCB - Pointer to host adapter structure 930 Input : host - Pointer to host adapter structure
890 Output : None. 931 Output : None.
891 Return : pSRB - Pointer to SCSI request block. 932 Return : pSRB - Pointer to SCSI request block.
892*****************************************************************************/ 933*****************************************************************************/
893static int inia100_abort(struct scsi_cmnd * SCpnt) 934static int inia100_abort(struct scsi_cmnd * cmd)
894{ 935{
895 ORC_HCS *hcsp; 936 struct orc_host *host;
896 937
897 hcsp = (ORC_HCS *) SCpnt->device->host->hostdata; 938 host = (struct orc_host *) cmd->device->host->hostdata;
898 return orc_abort_srb(hcsp, SCpnt); 939 return inia100_abort_cmd(host, cmd);
899} 940}
900 941
901/***************************************************************************** 942/*****************************************************************************
902 Function name : inia100_reset 943 Function name : inia100_reset
903 Description : Reset registers, reset a hanging bus and 944 Description : Reset registers, reset a hanging bus and
904 kill active and disconnected commands for target w/o soft reset 945 kill active and disconnected commands for target w/o soft reset
905 Input : pHCB - Pointer to host adapter structure 946 Input : host - Pointer to host adapter structure
906 Output : None. 947 Output : None.
907 Return : pSRB - Pointer to SCSI request block. 948 Return : pSRB - Pointer to SCSI request block.
908*****************************************************************************/ 949*****************************************************************************/
909static int inia100_bus_reset(struct scsi_cmnd * SCpnt) 950static int inia100_bus_reset(struct scsi_cmnd * cmd)
910{ /* I need Host Control Block Information */ 951{ /* I need Host Control Block Information */
911 ORC_HCS *pHCB; 952 struct orc_host *host;
912 pHCB = (ORC_HCS *) SCpnt->device->host->hostdata; 953 host = (struct orc_host *) cmd->device->host->hostdata;
913 return orc_reset_scsi_bus(pHCB); 954 return orc_reset_scsi_bus(host);
914} 955}
915 956
916/***************************************************************************** 957/*****************************************************************************
917 Function name : inia100_device_reset 958 Function name : inia100_device_reset
918 Description : Reset the device 959 Description : Reset the device
919 Input : pHCB - Pointer to host adapter structure 960 Input : host - Pointer to host adapter structure
920 Output : None. 961 Output : None.
921 Return : pSRB - Pointer to SCSI request block. 962 Return : pSRB - Pointer to SCSI request block.
922*****************************************************************************/ 963*****************************************************************************/
923static int inia100_device_reset(struct scsi_cmnd * SCpnt) 964static int inia100_device_reset(struct scsi_cmnd * cmd)
924{ /* I need Host Control Block Information */ 965{ /* I need Host Control Block Information */
925 ORC_HCS *pHCB; 966 struct orc_host *host;
926 pHCB = (ORC_HCS *) SCpnt->device->host->hostdata; 967 host = (struct orc_host *) cmd->device->host->hostdata;
927 return orc_device_reset(pHCB, SCpnt, scmd_id(SCpnt)); 968 return orc_device_reset(host, cmd, scmd_id(cmd));
928 969
929} 970}
930 971
931/***************************************************************************** 972/**
932 Function name : inia100SCBPost 973 * inia100_scb_handler - interrupt callback
933 Description : This is callback routine be called when orc finish one 974 * @host: Host causing the interrupt
934 SCSI command. 975 * @scb: SCB the controller returned as needing processing
935 Input : pHCB - Pointer to host adapter control block. 976 *
936 pSCB - Pointer to SCSI control block. 977 * Perform completion processing on a control block. Do the conversions
937 Output : None. 978 * from host to SCSI midlayer error coding, save any sense data and
938 Return : None. 979 * the complete with the midlayer and recycle the scb.
939*****************************************************************************/ 980 */
940static void inia100SCBPost(BYTE * pHcb, BYTE * pScb) 981
982static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
941{ 983{
942 struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */ 984 struct scsi_cmnd *cmd; /* Pointer to SCSI request block */
943 ORC_HCS *pHCB; 985 struct orc_extended_scb *escb;
944 ORC_SCB *pSCB; 986
945 ESCB *pEScb; 987 escb = scb->escb;
946 988 if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) {
947 pHCB = (ORC_HCS *) pHcb; 989 printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n");
948 pSCB = (ORC_SCB *) pScb; 990 orc_release_scb(host, scb); /* Release SCB for current channel */
949 pEScb = pSCB->SCB_EScb;
950 if ((pSRB = (struct scsi_cmnd *) pEScb->SCB_Srb) == 0) {
951 printk("inia100SCBPost: SRB pointer is empty\n");
952 orc_release_scb(pHCB, pSCB); /* Release SCB for current channel */
953 return; 991 return;
954 } 992 }
955 pEScb->SCB_Srb = NULL; 993 escb->srb = NULL;
956 994
957 switch (pSCB->SCB_HaStat) { 995 switch (scb->hastat) {
958 case 0x0: 996 case 0x0:
959 case 0xa: /* Linked command complete without error and linked normally */ 997 case 0xa: /* Linked command complete without error and linked normally */
960 case 0xb: /* Linked command complete without error interrupt generated */ 998 case 0xb: /* Linked command complete without error interrupt generated */
961 pSCB->SCB_HaStat = 0; 999 scb->hastat = 0;
962 break; 1000 break;
963 1001
964 case 0x11: /* Selection time out-The initiator selection or target 1002 case 0x11: /* Selection time out-The initiator selection or target
965 reselection was not complete within the SCSI Time out period */ 1003 reselection was not complete within the SCSI Time out period */
966 pSCB->SCB_HaStat = DID_TIME_OUT; 1004 scb->hastat = DID_TIME_OUT;
967 break; 1005 break;
968 1006
969 case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus 1007 case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
970 phase sequence was requested by the target. The host adapter 1008 phase sequence was requested by the target. The host adapter
971 will generate a SCSI Reset Condition, notifying the host with 1009 will generate a SCSI Reset Condition, notifying the host with
972 a SCRD interrupt */ 1010 a SCRD interrupt */
973 pSCB->SCB_HaStat = DID_RESET; 1011 scb->hastat = DID_RESET;
974 break; 1012 break;
975 1013
976 case 0x1a: /* SCB Aborted. 07/21/98 */ 1014 case 0x1a: /* SCB Aborted. 07/21/98 */
977 pSCB->SCB_HaStat = DID_ABORT; 1015 scb->hastat = DID_ABORT;
978 break; 1016 break;
979 1017
980 case 0x12: /* Data overrun/underrun-The target attempted to transfer more data 1018 case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -984,46 +1022,41 @@ static void inia100SCBPost(BYTE * pHcb, BYTE * pScb)
984 case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */ 1022 case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */
985 1023
986 default: 1024 default:
987 printk("inia100: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat); 1025 printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat);
988 pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */ 1026 scb->hastat = DID_ERROR; /* Couldn't find any better */
989 break; 1027 break;
990 } 1028 }
991 1029
992 if (pSCB->SCB_TaStat == 2) { /* Check condition */ 1030 if (scb->tastat == 2) { /* Check condition */
993 memcpy((unsigned char *) &pSRB->sense_buffer[0], 1031 memcpy((unsigned char *) &cmd->sense_buffer[0],
994 (unsigned char *) &pEScb->ESCB_SGList[0], SENSE_SIZE); 1032 (unsigned char *) &escb->sglist[0], SENSE_SIZE);
995 } 1033 }
996 pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16); 1034 cmd->result = scb->tastat | (scb->hastat << 16);
997 1035 scsi_dma_unmap(cmd);
998 if (pSRB->use_sg) { 1036 cmd->scsi_done(cmd); /* Notify system DONE */
999 pci_unmap_sg(pHCB->pdev, 1037 orc_release_scb(host, scb); /* Release SCB for current channel */
1000 (struct scatterlist *)pSRB->request_buffer,
1001 pSRB->use_sg, pSRB->sc_data_direction);
1002 } else if (pSRB->request_bufflen != 0) {
1003 pci_unmap_single(pHCB->pdev, pSRB->SCp.dma_handle,
1004 pSRB->request_bufflen,
1005 pSRB->sc_data_direction);
1006 }
1007
1008 pSRB->scsi_done(pSRB); /* Notify system DONE */
1009
1010 orc_release_scb(pHCB, pSCB); /* Release SCB for current channel */
1011} 1038}
1012 1039
1013/* 1040/**
1014 * Interrupt handler (main routine of the driver) 1041 * inia100_intr - interrupt handler
1042 * @irqno: Interrupt value
1043 * @devid: Host adapter
1044 *
1045 * Entry point for IRQ handling. All the real work is performed
1046 * by orc_interrupt.
1015 */ 1047 */
1016static irqreturn_t inia100_intr(int irqno, void *devid) 1048static irqreturn_t inia100_intr(int irqno, void *devid)
1017{ 1049{
1018 struct Scsi_Host *host = (struct Scsi_Host *)devid; 1050 struct Scsi_Host *shost = (struct Scsi_Host *)devid;
1019 ORC_HCS *pHcb = (ORC_HCS *)host->hostdata; 1051 struct orc_host *host = (struct orc_host *)shost->hostdata;
1020 unsigned long flags; 1052 unsigned long flags;
1053 irqreturn_t res;
1021 1054
1022 spin_lock_irqsave(host->host_lock, flags); 1055 spin_lock_irqsave(shost->host_lock, flags);
1023 orc_interrupt(pHcb); 1056 res = orc_interrupt(host);
1024 spin_unlock_irqrestore(host->host_lock, flags); 1057 spin_unlock_irqrestore(shost->host_lock, flags);
1025 1058
1026 return IRQ_HANDLED; 1059 return res;
1027} 1060}
1028 1061
1029static struct scsi_host_template inia100_template = { 1062static struct scsi_host_template inia100_template = {
@@ -1044,12 +1077,12 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
1044 const struct pci_device_id *id) 1077 const struct pci_device_id *id)
1045{ 1078{
1046 struct Scsi_Host *shost; 1079 struct Scsi_Host *shost;
1047 ORC_HCS *pHCB; 1080 struct orc_host *host;
1048 unsigned long port, bios; 1081 unsigned long port, bios;
1049 int error = -ENODEV; 1082 int error = -ENODEV;
1050 u32 sz; 1083 u32 sz;
1051 unsigned long dBiosAdr; 1084 unsigned long biosaddr;
1052 char *pbBiosAdr; 1085 char *bios_phys;
1053 1086
1054 if (pci_enable_device(pdev)) 1087 if (pci_enable_device(pdev))
1055 goto out; 1088 goto out;
@@ -1068,55 +1101,55 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
1068 } 1101 }
1069 1102
1070 /* <02> read from base address + 0x50 offset to get the bios value. */ 1103 /* <02> read from base address + 0x50 offset to get the bios value. */
1071 bios = ORC_RDWORD(port, 0x50); 1104 bios = inw(port + 0x50);
1072 1105
1073 1106
1074 shost = scsi_host_alloc(&inia100_template, sizeof(ORC_HCS)); 1107 shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host));
1075 if (!shost) 1108 if (!shost)
1076 goto out_release_region; 1109 goto out_release_region;
1077 1110
1078 pHCB = (ORC_HCS *)shost->hostdata; 1111 host = (struct orc_host *)shost->hostdata;
1079 pHCB->pdev = pdev; 1112 host->pdev = pdev;
1080 pHCB->HCS_Base = port; 1113 host->base = port;
1081 pHCB->HCS_BIOS = bios; 1114 host->BIOScfg = bios;
1082 spin_lock_init(&pHCB->BitAllocFlagLock); 1115 spin_lock_init(&host->allocation_lock);
1083 1116
1084 /* Get total memory needed for SCB */ 1117 /* Get total memory needed for SCB */
1085 sz = ORC_MAXQUEUE * sizeof(ORC_SCB); 1118 sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
1086 pHCB->HCS_virScbArray = pci_alloc_consistent(pdev, sz, 1119 host->scb_virt = pci_alloc_consistent(pdev, sz,
1087 &pHCB->HCS_physScbArray); 1120 &host->scb_phys);
1088 if (!pHCB->HCS_virScbArray) { 1121 if (!host->scb_virt) {
1089 printk("inia100: SCB memory allocation error\n"); 1122 printk("inia100: SCB memory allocation error\n");
1090 goto out_host_put; 1123 goto out_host_put;
1091 } 1124 }
1092 memset(pHCB->HCS_virScbArray, 0, sz); 1125 memset(host->scb_virt, 0, sz);
1093 1126
1094 /* Get total memory needed for ESCB */ 1127 /* Get total memory needed for ESCB */
1095 sz = ORC_MAXQUEUE * sizeof(ESCB); 1128 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
1096 pHCB->HCS_virEscbArray = pci_alloc_consistent(pdev, sz, 1129 host->escb_virt = pci_alloc_consistent(pdev, sz,
1097 &pHCB->HCS_physEscbArray); 1130 &host->escb_phys);
1098 if (!pHCB->HCS_virEscbArray) { 1131 if (!host->escb_virt) {
1099 printk("inia100: ESCB memory allocation error\n"); 1132 printk("inia100: ESCB memory allocation error\n");
1100 goto out_free_scb_array; 1133 goto out_free_scb_array;
1101 } 1134 }
1102 memset(pHCB->HCS_virEscbArray, 0, sz); 1135 memset(host->escb_virt, 0, sz);
1103 1136
1104 dBiosAdr = pHCB->HCS_BIOS; 1137 biosaddr = host->BIOScfg;
1105 dBiosAdr = (dBiosAdr << 4); 1138 biosaddr = (biosaddr << 4);
1106 pbBiosAdr = phys_to_virt(dBiosAdr); 1139 bios_phys = phys_to_virt(biosaddr);
1107 if (init_orchid(pHCB)) { /* Initialize orchid chip */ 1140 if (init_orchid(host)) { /* Initialize orchid chip */
1108 printk("inia100: initial orchid fail!!\n"); 1141 printk("inia100: initial orchid fail!!\n");
1109 goto out_free_escb_array; 1142 goto out_free_escb_array;
1110 } 1143 }
1111 1144
1112 shost->io_port = pHCB->HCS_Base; 1145 shost->io_port = host->base;
1113 shost->n_io_port = 0xff; 1146 shost->n_io_port = 0xff;
1114 shost->can_queue = ORC_MAXQUEUE; 1147 shost->can_queue = ORC_MAXQUEUE;
1115 shost->unique_id = shost->io_port; 1148 shost->unique_id = shost->io_port;
1116 shost->max_id = pHCB->HCS_MaxTar; 1149 shost->max_id = host->max_targets;
1117 shost->max_lun = 16; 1150 shost->max_lun = 16;
1118 shost->irq = pHCB->HCS_Intr = pdev->irq; 1151 shost->irq = pdev->irq;
1119 shost->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */ 1152 shost->this_id = host->scsi_id; /* Assign HCS index */
1120 shost->sg_tablesize = TOTAL_SG_ENTRY; 1153 shost->sg_tablesize = TOTAL_SG_ENTRY;
1121 1154
1122 /* Initial orc chip */ 1155 /* Initial orc chip */
@@ -1137,36 +1170,36 @@ static int __devinit inia100_probe_one(struct pci_dev *pdev,
1137 scsi_scan_host(shost); 1170 scsi_scan_host(shost);
1138 return 0; 1171 return 0;
1139 1172
1140 out_free_irq: 1173out_free_irq:
1141 free_irq(shost->irq, shost); 1174 free_irq(shost->irq, shost);
1142 out_free_escb_array: 1175out_free_escb_array:
1143 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ESCB), 1176 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
1144 pHCB->HCS_virEscbArray, pHCB->HCS_physEscbArray); 1177 host->escb_virt, host->escb_phys);
1145 out_free_scb_array: 1178out_free_scb_array:
1146 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ORC_SCB), 1179 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
1147 pHCB->HCS_virScbArray, pHCB->HCS_physScbArray); 1180 host->scb_virt, host->scb_phys);
1148 out_host_put: 1181out_host_put:
1149 scsi_host_put(shost); 1182 scsi_host_put(shost);
1150 out_release_region: 1183out_release_region:
1151 release_region(port, 256); 1184 release_region(port, 256);
1152 out_disable_device: 1185out_disable_device:
1153 pci_disable_device(pdev); 1186 pci_disable_device(pdev);
1154 out: 1187out:
1155 return error; 1188 return error;
1156} 1189}
1157 1190
1158static void __devexit inia100_remove_one(struct pci_dev *pdev) 1191static void __devexit inia100_remove_one(struct pci_dev *pdev)
1159{ 1192{
1160 struct Scsi_Host *shost = pci_get_drvdata(pdev); 1193 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1161 ORC_HCS *pHCB = (ORC_HCS *)shost->hostdata; 1194 struct orc_host *host = (struct orc_host *)shost->hostdata;
1162 1195
1163 scsi_remove_host(shost); 1196 scsi_remove_host(shost);
1164 1197
1165 free_irq(shost->irq, shost); 1198 free_irq(shost->irq, shost);
1166 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ESCB), 1199 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
1167 pHCB->HCS_virEscbArray, pHCB->HCS_physEscbArray); 1200 host->escb_virt, host->escb_phys);
1168 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(ORC_SCB), 1201 pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
1169 pHCB->HCS_virScbArray, pHCB->HCS_physScbArray); 1202 host->scb_virt, host->scb_phys);
1170 release_region(shost->io_port, 256); 1203 release_region(shost->io_port, 256);
1171 1204
1172 scsi_host_put(shost); 1205 scsi_host_put(shost);
diff --git a/drivers/scsi/a100u2w.h b/drivers/scsi/a100u2w.h
index 6f542d2600ea..d40e0c528198 100644
--- a/drivers/scsi/a100u2w.h
+++ b/drivers/scsi/a100u2w.h
@@ -18,27 +18,6 @@
18 * along with this program; see the file COPYING. If not, write to 18 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 * 20 *
21 * --------------------------------------------------------------------------
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions, and the following disclaimer,
28 * without modification, immediately at the beginning of the file.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. The name of the author may not be used to endorse or promote products
33 * derived from this software without specific prior written permission.
34 *
35 * Where this Software is combined with software released under the terms of
36 * the GNU General Public License ("GPL") and the terms of the GPL would require the
37 * combined work to also be released under the terms of the GPL, the terms
38 * and conditions of this License will apply in addition to those of the
39 * GPL with the exception of any terms or conditions of this License that
40 * conflict with, or are expressly prohibited by, the GPL.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -50,30 +29,19 @@
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE. 31 * SUCH DAMAGE.
53 */ 32 *
54
55/*
56 * Revision History: 33 * Revision History:
57 * 06/18/98 HL, Initial production Version 1.02 34 * 06/18/98 HL, Initial production Version 1.02
58 * 12/19/98 bv, Use spinlocks for 2.1.95 and up 35 * 12/19/98 bv, Use spinlocks for 2.1.95 and up
59 * 06/25/02 Doug Ledford <dledford@redhat.com> 36 * 06/25/02 Doug Ledford <dledford@redhat.com>
60 * - This and the i60uscsi.h file are almost identical, 37 * - This and the i60uscsi.h file are almost identical,
61 * merged them into a single header used by both .c files. 38 * merged them into a single header used by both .c files.
39 * 14/06/07 Alan Cox <alan@redhat.com>
40 * - Grand cleanup and Linuxisation
62 */ 41 */
63 42
64#define inia100_REVID "Initio INI-A100U2W SCSI device driver; Revision: 1.02d" 43#define inia100_REVID "Initio INI-A100U2W SCSI device driver; Revision: 1.02d"
65 44
66#define ULONG unsigned long
67#define USHORT unsigned short
68#define UCHAR unsigned char
69#define BYTE unsigned char
70#define WORD unsigned short
71#define DWORD unsigned long
72#define UBYTE unsigned char
73#define UWORD unsigned short
74#define UDWORD unsigned long
75#define U32 u32
76
77#if 1 45#if 1
78#define ORC_MAXQUEUE 245 46#define ORC_MAXQUEUE 245
79#define ORC_MAXTAGS 64 47#define ORC_MAXTAGS 64
@@ -90,10 +58,10 @@
90/************************************************************************/ 58/************************************************************************/
91/* Scatter-Gather Element Structure */ 59/* Scatter-Gather Element Structure */
92/************************************************************************/ 60/************************************************************************/
93typedef struct ORC_SG_Struc { 61struct orc_sgent {
94 U32 SG_Ptr; /* Data Pointer */ 62 u32 base; /* Data Pointer */
95 U32 SG_Len; /* Data Length */ 63 u32 length; /* Data Length */
96} ORC_SG; 64};
97 65
98/* SCSI related definition */ 66/* SCSI related definition */
99#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */ 67#define DISC_NOT_ALLOW 0x80 /* Disconnect is not allowed */
@@ -165,42 +133,45 @@ typedef struct ORC_SG_Struc {
165#define ORC_PRGMCTR1 0xE3 /* RISC program counter */ 133#define ORC_PRGMCTR1 0xE3 /* RISC program counter */
166#define ORC_RISCRAM 0xEC /* RISC RAM data port 4 bytes */ 134#define ORC_RISCRAM 0xEC /* RISC RAM data port 4 bytes */
167 135
168typedef struct orc_extended_scb { /* Extended SCB */ 136struct orc_extended_scb { /* Extended SCB */
169 ORC_SG ESCB_SGList[TOTAL_SG_ENTRY]; /*0 Start of SG list */ 137 struct orc_sgent sglist[TOTAL_SG_ENTRY]; /*0 Start of SG list */
170 struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */ 138 struct scsi_cmnd *srb; /*50 SRB Pointer */
171} ESCB; 139};
172 140
173/*********************************************************************** 141/***********************************************************************
174 SCSI Control Block 142 SCSI Control Block
143
144 0x40 bytes long, the last 8 are user bytes
175************************************************************************/ 145************************************************************************/
176typedef struct orc_scb { /* Scsi_Ctrl_Blk */ 146struct orc_scb { /* Scsi_Ctrl_Blk */
177 UBYTE SCB_Opcode; /*00 SCB command code&residual */ 147 u8 opcode; /*00 SCB command code&residual */
178 UBYTE SCB_Flags; /*01 SCB Flags */ 148 u8 flags; /*01 SCB Flags */
179 UBYTE SCB_Target; /*02 Target Id */ 149 u8 target; /*02 Target Id */
180 UBYTE SCB_Lun; /*03 Lun */ 150 u8 lun; /*03 Lun */
181 U32 SCB_Reserved0; /*04 Reserved for ORCHID must 0 */ 151 u32 reserved0; /*04 Reserved for ORCHID must 0 */
182 U32 SCB_XferLen; /*08 Data Transfer Length */ 152 u32 xferlen; /*08 Data Transfer Length */
183 U32 SCB_Reserved1; /*0C Reserved for ORCHID must 0 */ 153 u32 reserved1; /*0C Reserved for ORCHID must 0 */
184 U32 SCB_SGLen; /*10 SG list # * 8 */ 154 u32 sg_len; /*10 SG list # * 8 */
185 U32 SCB_SGPAddr; /*14 SG List Buf physical Addr */ 155 u32 sg_addr; /*14 SG List Buf physical Addr */
186 U32 SCB_SGPAddrHigh; /*18 SG Buffer high physical Addr */ 156 u32 sg_addrhigh; /*18 SG Buffer high physical Addr */
187 UBYTE SCB_HaStat; /*1C Host Status */ 157 u8 hastat; /*1C Host Status */
188 UBYTE SCB_TaStat; /*1D Target Status */ 158 u8 tastat; /*1D Target Status */
189 UBYTE SCB_Status; /*1E SCB status */ 159 u8 status; /*1E SCB status */
190 UBYTE SCB_Link; /*1F Link pointer, default 0xFF */ 160 u8 link; /*1F Link pointer, default 0xFF */
191 UBYTE SCB_SenseLen; /*20 Sense Allocation Length */ 161 u8 sense_len; /*20 Sense Allocation Length */
192 UBYTE SCB_CDBLen; /*21 CDB Length */ 162 u8 cdb_len; /*21 CDB Length */
193 UBYTE SCB_Ident; /*22 Identify */ 163 u8 ident; /*22 Identify */
194 UBYTE SCB_TagMsg; /*23 Tag Message */ 164 u8 tag_msg; /*23 Tag Message */
195 UBYTE SCB_CDB[IMAX_CDB]; /*24 SCSI CDBs */ 165 u8 cdb[IMAX_CDB]; /*24 SCSI CDBs */
196 UBYTE SCB_ScbIdx; /*3C Index for this ORCSCB */ 166 u8 scbidx; /*3C Index for this ORCSCB */
197 U32 SCB_SensePAddr; /*34 Sense Buffer physical Addr */ 167 u32 sense_addr; /*34 Sense Buffer physical Addr */
198 168
199 ESCB *SCB_EScb; /*38 Extended SCB Pointer */ 169 struct orc_extended_scb *escb; /*38 Extended SCB Pointer */
200#ifndef ALPHA 170 /* 64bit pointer or 32bit pointer + reserved ? */
201 UBYTE SCB_Reserved2[4]; /*3E Reserved for Driver use */ 171#ifndef CONFIG_64BIT
172 u8 reserved2[4]; /*3E Reserved for Driver use */
202#endif 173#endif
203} ORC_SCB; 174};
204 175
205/* Opcodes of ORCSCB_Opcode */ 176/* Opcodes of ORCSCB_Opcode */
206#define ORC_EXECSCSI 0x00 /* SCSI initiator command with residual */ 177#define ORC_EXECSCSI 0x00 /* SCSI initiator command with residual */
@@ -239,13 +210,13 @@ typedef struct orc_scb { /* Scsi_Ctrl_Blk */
239 Target Device Control Structure 210 Target Device Control Structure
240**********************************************************************/ 211**********************************************************************/
241 212
242typedef struct ORC_Tar_Ctrl_Struc { 213struct orc_target {
243 UBYTE TCS_DrvDASD; /* 6 */ 214 u8 TCS_DrvDASD; /* 6 */
244 UBYTE TCS_DrvSCSI; /* 7 */ 215 u8 TCS_DrvSCSI; /* 7 */
245 UBYTE TCS_DrvHead; /* 8 */ 216 u8 TCS_DrvHead; /* 8 */
246 UWORD TCS_DrvFlags; /* 4 */ 217 u16 TCS_DrvFlags; /* 4 */
247 UBYTE TCS_DrvSector; /* 7 */ 218 u8 TCS_DrvSector; /* 7 */
248} ORC_TCS; 219};
249 220
250/* Bit Definition for TCF_DrvFlags */ 221/* Bit Definition for TCF_DrvFlags */
251#define TCS_DF_NODASD_SUPT 0x20 /* Suppress OS/2 DASD Mgr support */ 222#define TCS_DF_NODASD_SUPT 0x20 /* Suppress OS/2 DASD Mgr support */
@@ -255,32 +226,23 @@ typedef struct ORC_Tar_Ctrl_Struc {
255/*********************************************************************** 226/***********************************************************************
256 Host Adapter Control Structure 227 Host Adapter Control Structure
257************************************************************************/ 228************************************************************************/
258typedef struct ORC_Ha_Ctrl_Struc { 229struct orc_host {
259 USHORT HCS_Base; /* 00 */ 230 unsigned long base; /* Base address */
260 UBYTE HCS_Index; /* 02 */ 231 u8 index; /* Index (Channel)*/
261 UBYTE HCS_Intr; /* 04 */ 232 u8 scsi_id; /* H/A SCSI ID */
262 UBYTE HCS_SCSI_ID; /* 06 H/A SCSI ID */ 233 u8 BIOScfg; /*BIOS configuration */
263 UBYTE HCS_BIOS; /* 07 BIOS configuration */ 234 u8 flags;
264 235 u8 max_targets; /* SCSI0MAXTags */
265 UBYTE HCS_Flags; /* 0B */ 236 struct orc_scb *scb_virt; /* Virtual Pointer to SCB array */
266 UBYTE HCS_HAConfig1; /* 1B SCSI0MAXTags */ 237 dma_addr_t scb_phys; /* Scb Physical address */
267 UBYTE HCS_MaxTar; /* 1B SCSI0MAXTags */ 238 struct orc_extended_scb *escb_virt; /* Virtual pointer to ESCB Scatter list */
268 239 dma_addr_t escb_phys; /* scatter list Physical address */
269 USHORT HCS_Units; /* Number of units this adapter */ 240 u8 target_flag[16]; /* target configuration, TCF_EN_TAG */
270 USHORT HCS_AFlags; /* Adapter info. defined flags */ 241 u8 max_tags[16]; /* ORC_MAX_SCBS */
271 ULONG HCS_Timeout; /* Adapter timeout value */ 242 u32 allocation_map[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
272 ORC_SCB *HCS_virScbArray; /* 28 Virtual Pointer to SCB array */ 243 spinlock_t allocation_lock;
273 dma_addr_t HCS_physScbArray; /* Scb Physical address */
274 ESCB *HCS_virEscbArray; /* Virtual pointer to ESCB Scatter list */
275 dma_addr_t HCS_physEscbArray; /* scatter list Physical address */
276 UBYTE TargetFlag[16]; /* 30 target configuration, TCF_EN_TAG */
277 UBYTE MaximumTags[16]; /* 40 ORC_MAX_SCBS */
278 UBYTE ActiveTags[16][16]; /* 50 */
279 ORC_TCS HCS_Tcs[16]; /* 28 */
280 U32 BitAllocFlag[MAX_CHANNELS][8]; /* Max STB is 256, So 256/32 */
281 spinlock_t BitAllocFlagLock;
282 struct pci_dev *pdev; 244 struct pci_dev *pdev;
283} ORC_HCS; 245};
284 246
285/* Bit Definition for HCS_Flags */ 247/* Bit Definition for HCS_Flags */
286 248
@@ -301,79 +263,79 @@ typedef struct ORC_Ha_Ctrl_Struc {
301#define HCS_AF_DISABLE_RESET 0x10 /* Adapter disable reset */ 263#define HCS_AF_DISABLE_RESET 0x10 /* Adapter disable reset */
302#define HCS_AF_DISABLE_ADPT 0x80 /* Adapter disable */ 264#define HCS_AF_DISABLE_ADPT 0x80 /* Adapter disable */
303 265
304typedef struct _NVRAM { 266struct orc_nvram {
305/*----------header ---------------*/ 267/*----------header ---------------*/
306 UCHAR SubVendorID0; /* 00 - Sub Vendor ID */ 268 u8 SubVendorID0; /* 00 - Sub Vendor ID */
307 UCHAR SubVendorID1; /* 00 - Sub Vendor ID */ 269 u8 SubVendorID1; /* 00 - Sub Vendor ID */
308 UCHAR SubSysID0; /* 02 - Sub System ID */ 270 u8 SubSysID0; /* 02 - Sub System ID */
309 UCHAR SubSysID1; /* 02 - Sub System ID */ 271 u8 SubSysID1; /* 02 - Sub System ID */
310 UCHAR SubClass; /* 04 - Sub Class */ 272 u8 SubClass; /* 04 - Sub Class */
311 UCHAR VendorID0; /* 05 - Vendor ID */ 273 u8 VendorID0; /* 05 - Vendor ID */
312 UCHAR VendorID1; /* 05 - Vendor ID */ 274 u8 VendorID1; /* 05 - Vendor ID */
313 UCHAR DeviceID0; /* 07 - Device ID */ 275 u8 DeviceID0; /* 07 - Device ID */
314 UCHAR DeviceID1; /* 07 - Device ID */ 276 u8 DeviceID1; /* 07 - Device ID */
315 UCHAR Reserved0[2]; /* 09 - Reserved */ 277 u8 Reserved0[2]; /* 09 - Reserved */
316 UCHAR Revision; /* 0B - Revision of data structure */ 278 u8 revision; /* 0B - revision of data structure */
317 /* ----Host Adapter Structure ---- */ 279 /* ----Host Adapter Structure ---- */
318 UCHAR NumOfCh; /* 0C - Number of SCSI channel */ 280 u8 NumOfCh; /* 0C - Number of SCSI channel */
319 UCHAR BIOSConfig1; /* 0D - BIOS configuration 1 */ 281 u8 BIOSConfig1; /* 0D - BIOS configuration 1 */
320 UCHAR BIOSConfig2; /* 0E - BIOS boot channel&target ID */ 282 u8 BIOSConfig2; /* 0E - BIOS boot channel&target ID */
321 UCHAR BIOSConfig3; /* 0F - BIOS configuration 3 */ 283 u8 BIOSConfig3; /* 0F - BIOS configuration 3 */
322 /* ----SCSI channel Structure ---- */ 284 /* ----SCSI channel Structure ---- */
323 /* from "CTRL-I SCSI Host Adapter SetUp menu " */ 285 /* from "CTRL-I SCSI Host Adapter SetUp menu " */
324 UCHAR SCSI0Id; /* 10 - Channel 0 SCSI ID */ 286 u8 scsi_id; /* 10 - Channel 0 SCSI ID */
325 UCHAR SCSI0Config; /* 11 - Channel 0 SCSI configuration */ 287 u8 SCSI0Config; /* 11 - Channel 0 SCSI configuration */
326 UCHAR SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */ 288 u8 SCSI0MaxTags; /* 12 - Channel 0 Maximum tags */
327 UCHAR SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */ 289 u8 SCSI0ResetTime; /* 13 - Channel 0 Reset recovering time */
328 UCHAR ReservedforChannel0[2]; /* 14 - Reserved */ 290 u8 ReservedforChannel0[2]; /* 14 - Reserved */
329 291
330 /* ----SCSI target Structure ---- */ 292 /* ----SCSI target Structure ---- */
331 /* from "CTRL-I SCSI device SetUp menu " */ 293 /* from "CTRL-I SCSI device SetUp menu " */
332 UCHAR Target00Config; /* 16 - Channel 0 Target 0 config */ 294 u8 Target00Config; /* 16 - Channel 0 Target 0 config */
333 UCHAR Target01Config; /* 17 - Channel 0 Target 1 config */ 295 u8 Target01Config; /* 17 - Channel 0 Target 1 config */
334 UCHAR Target02Config; /* 18 - Channel 0 Target 2 config */ 296 u8 Target02Config; /* 18 - Channel 0 Target 2 config */
335 UCHAR Target03Config; /* 19 - Channel 0 Target 3 config */ 297 u8 Target03Config; /* 19 - Channel 0 Target 3 config */
336 UCHAR Target04Config; /* 1A - Channel 0 Target 4 config */ 298 u8 Target04Config; /* 1A - Channel 0 Target 4 config */
337 UCHAR Target05Config; /* 1B - Channel 0 Target 5 config */ 299 u8 Target05Config; /* 1B - Channel 0 Target 5 config */
338 UCHAR Target06Config; /* 1C - Channel 0 Target 6 config */ 300 u8 Target06Config; /* 1C - Channel 0 Target 6 config */
339 UCHAR Target07Config; /* 1D - Channel 0 Target 7 config */ 301 u8 Target07Config; /* 1D - Channel 0 Target 7 config */
340 UCHAR Target08Config; /* 1E - Channel 0 Target 8 config */ 302 u8 Target08Config; /* 1E - Channel 0 Target 8 config */
341 UCHAR Target09Config; /* 1F - Channel 0 Target 9 config */ 303 u8 Target09Config; /* 1F - Channel 0 Target 9 config */
342 UCHAR Target0AConfig; /* 20 - Channel 0 Target A config */ 304 u8 Target0AConfig; /* 20 - Channel 0 Target A config */
343 UCHAR Target0BConfig; /* 21 - Channel 0 Target B config */ 305 u8 Target0BConfig; /* 21 - Channel 0 Target B config */
344 UCHAR Target0CConfig; /* 22 - Channel 0 Target C config */ 306 u8 Target0CConfig; /* 22 - Channel 0 Target C config */
345 UCHAR Target0DConfig; /* 23 - Channel 0 Target D config */ 307 u8 Target0DConfig; /* 23 - Channel 0 Target D config */
346 UCHAR Target0EConfig; /* 24 - Channel 0 Target E config */ 308 u8 Target0EConfig; /* 24 - Channel 0 Target E config */
347 UCHAR Target0FConfig; /* 25 - Channel 0 Target F config */ 309 u8 Target0FConfig; /* 25 - Channel 0 Target F config */
348 310
349 UCHAR SCSI1Id; /* 26 - Channel 1 SCSI ID */ 311 u8 SCSI1Id; /* 26 - Channel 1 SCSI ID */
350 UCHAR SCSI1Config; /* 27 - Channel 1 SCSI configuration */ 312 u8 SCSI1Config; /* 27 - Channel 1 SCSI configuration */
351 UCHAR SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */ 313 u8 SCSI1MaxTags; /* 28 - Channel 1 Maximum tags */
352 UCHAR SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */ 314 u8 SCSI1ResetTime; /* 29 - Channel 1 Reset recovering time */
353 UCHAR ReservedforChannel1[2]; /* 2A - Reserved */ 315 u8 ReservedforChannel1[2]; /* 2A - Reserved */
354 316
355 /* ----SCSI target Structure ---- */ 317 /* ----SCSI target Structure ---- */
356 /* from "CTRL-I SCSI device SetUp menu " */ 318 /* from "CTRL-I SCSI device SetUp menu " */
357 UCHAR Target10Config; /* 2C - Channel 1 Target 0 config */ 319 u8 Target10Config; /* 2C - Channel 1 Target 0 config */
358 UCHAR Target11Config; /* 2D - Channel 1 Target 1 config */ 320 u8 Target11Config; /* 2D - Channel 1 Target 1 config */
359 UCHAR Target12Config; /* 2E - Channel 1 Target 2 config */ 321 u8 Target12Config; /* 2E - Channel 1 Target 2 config */
360 UCHAR Target13Config; /* 2F - Channel 1 Target 3 config */ 322 u8 Target13Config; /* 2F - Channel 1 Target 3 config */
361 UCHAR Target14Config; /* 30 - Channel 1 Target 4 config */ 323 u8 Target14Config; /* 30 - Channel 1 Target 4 config */
362 UCHAR Target15Config; /* 31 - Channel 1 Target 5 config */ 324 u8 Target15Config; /* 31 - Channel 1 Target 5 config */
363 UCHAR Target16Config; /* 32 - Channel 1 Target 6 config */ 325 u8 Target16Config; /* 32 - Channel 1 Target 6 config */
364 UCHAR Target17Config; /* 33 - Channel 1 Target 7 config */ 326 u8 Target17Config; /* 33 - Channel 1 Target 7 config */
365 UCHAR Target18Config; /* 34 - Channel 1 Target 8 config */ 327 u8 Target18Config; /* 34 - Channel 1 Target 8 config */
366 UCHAR Target19Config; /* 35 - Channel 1 Target 9 config */ 328 u8 Target19Config; /* 35 - Channel 1 Target 9 config */
367 UCHAR Target1AConfig; /* 36 - Channel 1 Target A config */ 329 u8 Target1AConfig; /* 36 - Channel 1 Target A config */
368 UCHAR Target1BConfig; /* 37 - Channel 1 Target B config */ 330 u8 Target1BConfig; /* 37 - Channel 1 Target B config */
369 UCHAR Target1CConfig; /* 38 - Channel 1 Target C config */ 331 u8 Target1CConfig; /* 38 - Channel 1 Target C config */
370 UCHAR Target1DConfig; /* 39 - Channel 1 Target D config */ 332 u8 Target1DConfig; /* 39 - Channel 1 Target D config */
371 UCHAR Target1EConfig; /* 3A - Channel 1 Target E config */ 333 u8 Target1EConfig; /* 3A - Channel 1 Target E config */
372 UCHAR Target1FConfig; /* 3B - Channel 1 Target F config */ 334 u8 Target1FConfig; /* 3B - Channel 1 Target F config */
373 UCHAR reserved[3]; /* 3C - Reserved */ 335 u8 reserved[3]; /* 3C - Reserved */
374 /* ---------- CheckSum ---------- */ 336 /* ---------- CheckSum ---------- */
375 UCHAR CheckSum; /* 3F - Checksum of NVRam */ 337 u8 CheckSum; /* 3F - Checksum of NVRam */
376} NVRAM, *PNVRAM; 338};
377 339
378/* Bios Configuration for nvram->BIOSConfig1 */ 340/* Bios Configuration for nvram->BIOSConfig1 */
379#define NBC_BIOSENABLE 0x01 /* BIOS enable */ 341#define NBC_BIOSENABLE 0x01 /* BIOS enable */
@@ -407,10 +369,3 @@ typedef struct _NVRAM {
407#define NCC_RESET_TIME 0x0A /* SCSI RESET recovering time */ 369#define NCC_RESET_TIME 0x0A /* SCSI RESET recovering time */
408#define NTC_DEFAULT (NTC_1GIGA | NTC_NO_WIDESYNC | NTC_DISC_ENABLE) 370#define NTC_DEFAULT (NTC_1GIGA | NTC_NO_WIDESYNC | NTC_DISC_ENABLE)
409 371
410#define ORC_RD(x,y) (UCHAR)(inb( (int)((ULONG)((ULONG)x+(UCHAR)y)) ))
411#define ORC_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
412#define ORC_RDLONG(x,y) (long)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) ))
413
414#define ORC_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
415#define ORC_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
416#define ORC_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c
new file mode 100644
index 000000000000..6a5784683ed3
--- /dev/null
+++ b/drivers/scsi/a4000t.c
@@ -0,0 +1,143 @@
1/*
2 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
3 * Amiga Technologies A4000T SCSI controller.
4 *
5 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
6 * plus modifications of the 53c7xx.c driver to support the Amiga.
7 *
8 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
9 */
10
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <asm/amigahw.h>
16#include <asm/amigaints.h>
17#include <scsi/scsi_host.h>
18#include <scsi/scsi_transport_spi.h>
19
20#include "53c700.h"
21
22MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
23MODULE_DESCRIPTION("Amiga A4000T NCR53C710 driver");
24MODULE_LICENSE("GPL");
25
26
27static struct scsi_host_template a4000t_scsi_driver_template = {
28 .name = "A4000T builtin SCSI",
29 .proc_name = "A4000t",
30 .this_id = 7,
31 .module = THIS_MODULE,
32};
33
34static struct platform_device *a4000t_scsi_device;
35
36#define A4000T_SCSI_ADDR 0xdd0040
37
38static int __devinit a4000t_probe(struct device *dev)
39{
40 struct Scsi_Host * host = NULL;
41 struct NCR_700_Host_Parameters *hostdata;
42
43 if (!(MACH_IS_AMIGA && AMIGAHW_PRESENT(A4000_SCSI)))
44 goto out;
45
46 if (!request_mem_region(A4000T_SCSI_ADDR, 0x1000,
47 "A4000T builtin SCSI"))
48 goto out;
49
50 hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
51 if (hostdata == NULL) {
52 printk(KERN_ERR "a4000t-scsi: Failed to allocate host data\n");
53 goto out_release;
54 }
55 memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
56
57 /* Fill in the required pieces of hostdata */
58 hostdata->base = (void __iomem *)ZTWO_VADDR(A4000T_SCSI_ADDR);
59 hostdata->clock = 50;
60 hostdata->chip710 = 1;
61 hostdata->dmode_extra = DMODE_FC2;
62 hostdata->dcntl_extra = EA_710;
63
64 /* and register the chip */
65 host = NCR_700_detect(&a4000t_scsi_driver_template, hostdata, dev);
66 if (!host) {
67 printk(KERN_ERR "a4000t-scsi: No host detected; "
68 "board configuration problem?\n");
69 goto out_free;
70 }
71
72 host->this_id = 7;
73 host->base = A4000T_SCSI_ADDR;
74 host->irq = IRQ_AMIGA_PORTS;
75
76 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "a4000t-scsi",
77 host)) {
78 printk(KERN_ERR "a4000t-scsi: request_irq failed\n");
79 goto out_put_host;
80 }
81
82 scsi_scan_host(host);
83
84 return 0;
85
86 out_put_host:
87 scsi_host_put(host);
88 out_free:
89 kfree(hostdata);
90 out_release:
91 release_mem_region(A4000T_SCSI_ADDR, 0x1000);
92 out:
93 return -ENODEV;
94}
95
96static __devexit int a4000t_device_remove(struct device *dev)
97{
98 struct Scsi_Host *host = dev_to_shost(dev);
99 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
100
101 scsi_remove_host(host);
102
103 NCR_700_release(host);
104 kfree(hostdata);
105 free_irq(host->irq, host);
106 release_mem_region(A4000T_SCSI_ADDR, 0x1000);
107
108 return 0;
109}
110
111static struct device_driver a4000t_scsi_driver = {
112 .name = "a4000t-scsi",
113 .bus = &platform_bus_type,
114 .probe = a4000t_probe,
115 .remove = __devexit_p(a4000t_device_remove),
116};
117
118static int __init a4000t_scsi_init(void)
119{
120 int err;
121
122 err = driver_register(&a4000t_scsi_driver);
123 if (err)
124 return err;
125
126 a4000t_scsi_device = platform_device_register_simple("a4000t-scsi",
127 -1, NULL, 0);
128 if (IS_ERR(a4000t_scsi_device)) {
129 driver_unregister(&a4000t_scsi_driver);
130 return PTR_ERR(a4000t_scsi_device);
131 }
132
133 return err;
134}
135
136static void __exit a4000t_scsi_exit(void)
137{
138 platform_device_unregister(a4000t_scsi_device);
139 driver_unregister(&a4000t_scsi_driver);
140}
141
142module_init(a4000t_scsi_init);
143module_exit(a4000t_scsi_exit);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8dcfe4ec35c2..0b6fd0b654d2 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -169,6 +169,18 @@ int acbsize = -1;
169module_param(acbsize, int, S_IRUGO|S_IWUSR); 169module_param(acbsize, int, S_IRUGO|S_IWUSR);
170MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware."); 170MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB) size. Valid values are 512, 2048, 4096 and 8192. Default is to use suggestion from Firmware.");
171 171
172int update_interval = 30 * 60;
173module_param(update_interval, int, S_IRUGO|S_IWUSR);
174MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync updates issued to adapter.");
175
176int check_interval = 24 * 60 * 60;
177module_param(check_interval, int, S_IRUGO|S_IWUSR);
178MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health checks.");
179
180int check_reset = 1;
181module_param(check_reset, int, S_IRUGO|S_IWUSR);
182MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the adapter.");
183
172int expose_physicals = -1; 184int expose_physicals = -1;
173module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 185module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
174MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on"); 186MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays. -1=protect 0=off, 1=on");
@@ -312,11 +324,10 @@ int aac_get_containers(struct aac_dev *dev)
312 324
313 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 325 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
314 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 326 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
315 fsa_dev_ptr = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers, 327 fsa_dev_ptr = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
316 GFP_KERNEL); 328 GFP_KERNEL);
317 if (!fsa_dev_ptr) 329 if (!fsa_dev_ptr)
318 return -ENOMEM; 330 return -ENOMEM;
319 memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
320 331
321 dev->fsa_dev = fsa_dev_ptr; 332 dev->fsa_dev = fsa_dev_ptr;
322 dev->maximum_num_containers = maximum_num_containers; 333 dev->maximum_num_containers = maximum_num_containers;
@@ -344,21 +355,16 @@ static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigne
344{ 355{
345 void *buf; 356 void *buf;
346 int transfer_len; 357 int transfer_len;
347 struct scatterlist *sg = scsicmd->request_buffer; 358 struct scatterlist *sg = scsi_sglist(scsicmd);
359
360 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
361 transfer_len = min(sg->length, len + offset);
348 362
349 if (scsicmd->use_sg) {
350 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
351 transfer_len = min(sg->length, len + offset);
352 } else {
353 buf = scsicmd->request_buffer;
354 transfer_len = min(scsicmd->request_bufflen, len + offset);
355 }
356 transfer_len -= offset; 363 transfer_len -= offset;
357 if (buf && transfer_len > 0) 364 if (buf && transfer_len > 0)
358 memcpy(buf + offset, data, transfer_len); 365 memcpy(buf + offset, data, transfer_len);
359 366
360 if (scsicmd->use_sg) 367 kunmap_atomic(buf - sg->offset, KM_IRQ0);
361 kunmap_atomic(buf - sg->offset, KM_IRQ0);
362 368
363} 369}
364 370
@@ -451,7 +457,7 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
451{ 457{
452 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; 458 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
453 459
454 if (fsa_dev_ptr[scmd_id(scsicmd)].valid) 460 if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
455 return aac_scsi_cmd(scsicmd); 461 return aac_scsi_cmd(scsicmd);
456 462
457 scsicmd->result = DID_NO_CONNECT << 16; 463 scsicmd->result = DID_NO_CONNECT << 16;
@@ -459,18 +465,18 @@ static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
459 return 0; 465 return 0;
460} 466}
461 467
462static int _aac_probe_container2(void * context, struct fib * fibptr) 468static void _aac_probe_container2(void * context, struct fib * fibptr)
463{ 469{
464 struct fsa_dev_info *fsa_dev_ptr; 470 struct fsa_dev_info *fsa_dev_ptr;
465 int (*callback)(struct scsi_cmnd *); 471 int (*callback)(struct scsi_cmnd *);
466 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context; 472 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
467 473
468 if (!aac_valid_context(scsicmd, fibptr))
469 return 0;
470 474
471 fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; 475 if (!aac_valid_context(scsicmd, fibptr))
476 return;
472 477
473 scsicmd->SCp.Status = 0; 478 scsicmd->SCp.Status = 0;
479 fsa_dev_ptr = fibptr->dev->fsa_dev;
474 if (fsa_dev_ptr) { 480 if (fsa_dev_ptr) {
475 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr); 481 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
476 fsa_dev_ptr += scmd_id(scsicmd); 482 fsa_dev_ptr += scmd_id(scsicmd);
@@ -493,10 +499,11 @@ static int _aac_probe_container2(void * context, struct fib * fibptr)
493 aac_fib_free(fibptr); 499 aac_fib_free(fibptr);
494 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr); 500 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
495 scsicmd->SCp.ptr = NULL; 501 scsicmd->SCp.ptr = NULL;
496 return (*callback)(scsicmd); 502 (*callback)(scsicmd);
503 return;
497} 504}
498 505
499static int _aac_probe_container1(void * context, struct fib * fibptr) 506static void _aac_probe_container1(void * context, struct fib * fibptr)
500{ 507{
501 struct scsi_cmnd * scsicmd; 508 struct scsi_cmnd * scsicmd;
502 struct aac_mount * dresp; 509 struct aac_mount * dresp;
@@ -506,13 +513,14 @@ static int _aac_probe_container1(void * context, struct fib * fibptr)
506 dresp = (struct aac_mount *) fib_data(fibptr); 513 dresp = (struct aac_mount *) fib_data(fibptr);
507 dresp->mnt[0].capacityhigh = 0; 514 dresp->mnt[0].capacityhigh = 0;
508 if ((le32_to_cpu(dresp->status) != ST_OK) || 515 if ((le32_to_cpu(dresp->status) != ST_OK) ||
509 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) 516 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
510 return _aac_probe_container2(context, fibptr); 517 _aac_probe_container2(context, fibptr);
518 return;
519 }
511 scsicmd = (struct scsi_cmnd *) context; 520 scsicmd = (struct scsi_cmnd *) context;
512 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
513 521
514 if (!aac_valid_context(scsicmd, fibptr)) 522 if (!aac_valid_context(scsicmd, fibptr))
515 return 0; 523 return;
516 524
517 aac_fib_init(fibptr); 525 aac_fib_init(fibptr);
518 526
@@ -527,21 +535,18 @@ static int _aac_probe_container1(void * context, struct fib * fibptr)
527 sizeof(struct aac_query_mount), 535 sizeof(struct aac_query_mount),
528 FsaNormal, 536 FsaNormal,
529 0, 1, 537 0, 1,
530 (fib_callback) _aac_probe_container2, 538 _aac_probe_container2,
531 (void *) scsicmd); 539 (void *) scsicmd);
532 /* 540 /*
533 * Check that the command queued to the controller 541 * Check that the command queued to the controller
534 */ 542 */
535 if (status == -EINPROGRESS) { 543 if (status == -EINPROGRESS)
536 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 544 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
537 return 0; 545 else if (status < 0) {
538 }
539 if (status < 0) {
540 /* Inherit results from VM_NameServe, if any */ 546 /* Inherit results from VM_NameServe, if any */
541 dresp->status = cpu_to_le32(ST_OK); 547 dresp->status = cpu_to_le32(ST_OK);
542 return _aac_probe_container2(context, fibptr); 548 _aac_probe_container2(context, fibptr);
543 } 549 }
544 return 0;
545} 550}
546 551
547static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *)) 552static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
@@ -566,7 +571,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
566 sizeof(struct aac_query_mount), 571 sizeof(struct aac_query_mount),
567 FsaNormal, 572 FsaNormal,
568 0, 1, 573 0, 1,
569 (fib_callback) _aac_probe_container1, 574 _aac_probe_container1,
570 (void *) scsicmd); 575 (void *) scsicmd);
571 /* 576 /*
572 * Check that the command queued to the controller 577 * Check that the command queued to the controller
@@ -620,7 +625,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
620 return -ENOMEM; 625 return -ENOMEM;
621 } 626 }
622 scsicmd->list.next = NULL; 627 scsicmd->list.next = NULL;
623 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))_aac_probe_container1; 628 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
624 629
625 scsicmd->device = scsidev; 630 scsicmd->device = scsidev;
626 scsidev->sdev_state = 0; 631 scsidev->sdev_state = 0;
@@ -825,7 +830,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
825 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 830 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
826 readcmd->count = cpu_to_le32(count<<9); 831 readcmd->count = cpu_to_le32(count<<9);
827 readcmd->cid = cpu_to_le16(scmd_id(cmd)); 832 readcmd->cid = cpu_to_le16(scmd_id(cmd));
828 readcmd->flags = cpu_to_le16(1); 833 readcmd->flags = cpu_to_le16(IO_TYPE_READ);
829 readcmd->bpTotal = 0; 834 readcmd->bpTotal = 0;
830 readcmd->bpComplete = 0; 835 readcmd->bpComplete = 0;
831 836
@@ -904,7 +909,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
904 (void *) cmd); 909 (void *) cmd);
905} 910}
906 911
907static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 912static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
908{ 913{
909 u16 fibsize; 914 u16 fibsize;
910 struct aac_raw_io *writecmd; 915 struct aac_raw_io *writecmd;
@@ -914,7 +919,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
914 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 919 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
915 writecmd->count = cpu_to_le32(count<<9); 920 writecmd->count = cpu_to_le32(count<<9);
916 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 921 writecmd->cid = cpu_to_le16(scmd_id(cmd));
917 writecmd->flags = 0; 922 writecmd->flags = fua ?
923 cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
924 cpu_to_le16(IO_TYPE_WRITE);
918 writecmd->bpTotal = 0; 925 writecmd->bpTotal = 0;
919 writecmd->bpComplete = 0; 926 writecmd->bpComplete = 0;
920 927
@@ -933,7 +940,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
933 (void *) cmd); 940 (void *) cmd);
934} 941}
935 942
936static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 943static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
937{ 944{
938 u16 fibsize; 945 u16 fibsize;
939 struct aac_write64 *writecmd; 946 struct aac_write64 *writecmd;
@@ -964,7 +971,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
964 (void *) cmd); 971 (void *) cmd);
965} 972}
966 973
967static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 974static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
968{ 975{
969 u16 fibsize; 976 u16 fibsize;
970 struct aac_write *writecmd; 977 struct aac_write *writecmd;
@@ -1041,7 +1048,7 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
1041 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); 1048 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1042 1049
1043 aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg); 1050 aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
1044 srbcmd->count = cpu_to_le32(cmd->request_bufflen); 1051 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1045 1052
1046 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1053 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1047 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); 1054 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
@@ -1069,7 +1076,7 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
1069 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); 1076 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
1070 1077
1071 aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg); 1078 aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
1072 srbcmd->count = cpu_to_le32(cmd->request_bufflen); 1079 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
1073 1080
1074 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1081 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1075 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); 1082 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
@@ -1172,6 +1179,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
1172 } 1179 }
1173 1180
1174 if (!dev->in_reset) { 1181 if (!dev->in_reset) {
1182 char buffer[16];
1175 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 1183 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
1176 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 1184 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
1177 dev->name, 1185 dev->name,
@@ -1192,16 +1200,23 @@ int aac_get_adapter_info(struct aac_dev* dev)
1192 dev->name, dev->id, 1200 dev->name, dev->id,
1193 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 1201 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
1194 le32_to_cpu(dev->adapter_info.biosbuild)); 1202 le32_to_cpu(dev->adapter_info.biosbuild));
1195 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 1203 buffer[0] = '\0';
1196 printk(KERN_INFO "%s%d: serial %x\n", 1204 if (aac_show_serial_number(
1197 dev->name, dev->id, 1205 shost_to_class(dev->scsi_host_ptr), buffer))
1198 le32_to_cpu(dev->adapter_info.serial[0])); 1206 printk(KERN_INFO "%s%d: serial %s",
1207 dev->name, dev->id, buffer);
1199 if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) { 1208 if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
1200 printk(KERN_INFO "%s%d: TSID %.*s\n", 1209 printk(KERN_INFO "%s%d: TSID %.*s\n",
1201 dev->name, dev->id, 1210 dev->name, dev->id,
1202 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid), 1211 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
1203 dev->supplement_adapter_info.VpdInfo.Tsid); 1212 dev->supplement_adapter_info.VpdInfo.Tsid);
1204 } 1213 }
1214 if (!check_reset ||
1215 (dev->supplement_adapter_info.SupportedOptions2 &
1216 le32_to_cpu(AAC_OPTION_IGNORE_RESET))) {
1217 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
1218 dev->name, dev->id);
1219 }
1205 } 1220 }
1206 1221
1207 dev->nondasd_support = 0; 1222 dev->nondasd_support = 0;
@@ -1332,7 +1347,7 @@ static void io_callback(void *context, struct fib * fibptr)
1332 if (!aac_valid_context(scsicmd, fibptr)) 1347 if (!aac_valid_context(scsicmd, fibptr))
1333 return; 1348 return;
1334 1349
1335 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1350 dev = fibptr->dev;
1336 cid = scmd_id(scsicmd); 1351 cid = scmd_id(scsicmd);
1337 1352
1338 if (nblank(dprintk(x))) { 1353 if (nblank(dprintk(x))) {
@@ -1371,16 +1386,9 @@ static void io_callback(void *context, struct fib * fibptr)
1371 } 1386 }
1372 1387
1373 BUG_ON(fibptr == NULL); 1388 BUG_ON(fibptr == NULL);
1374 1389
1375 if(scsicmd->use_sg) 1390 scsi_dma_unmap(scsicmd);
1376 pci_unmap_sg(dev->pdev, 1391
1377 (struct scatterlist *)scsicmd->request_buffer,
1378 scsicmd->use_sg,
1379 scsicmd->sc_data_direction);
1380 else if(scsicmd->request_bufflen)
1381 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
1382 scsicmd->request_bufflen,
1383 scsicmd->sc_data_direction);
1384 readreply = (struct aac_read_reply *)fib_data(fibptr); 1392 readreply = (struct aac_read_reply *)fib_data(fibptr);
1385 if (le32_to_cpu(readreply->status) == ST_OK) 1393 if (le32_to_cpu(readreply->status) == ST_OK)
1386 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1394 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
@@ -1498,6 +1506,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1498{ 1506{
1499 u64 lba; 1507 u64 lba;
1500 u32 count; 1508 u32 count;
1509 int fua;
1501 int status; 1510 int status;
1502 struct aac_dev *dev; 1511 struct aac_dev *dev;
1503 struct fib * cmd_fibcontext; 1512 struct fib * cmd_fibcontext;
@@ -1512,6 +1521,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1512 count = scsicmd->cmnd[4]; 1521 count = scsicmd->cmnd[4];
1513 if (count == 0) 1522 if (count == 0)
1514 count = 256; 1523 count = 256;
1524 fua = 0;
1515 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ 1525 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1516 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd))); 1526 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
1517 1527
@@ -1524,6 +1534,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1524 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1534 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1525 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | 1535 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1526 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1536 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1537 fua = scsicmd->cmnd[1] & 0x8;
1527 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ 1538 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1528 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd))); 1539 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
1529 1540
@@ -1531,10 +1542,12 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1531 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1542 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1532 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) 1543 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1533 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1544 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1545 fua = scsicmd->cmnd[1] & 0x8;
1534 } else { 1546 } else {
1535 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd))); 1547 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
1536 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1548 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1537 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1549 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1550 fua = scsicmd->cmnd[1] & 0x8;
1538 } 1551 }
1539 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", 1552 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1540 smp_processor_id(), (unsigned long long)lba, jiffies)); 1553 smp_processor_id(), (unsigned long long)lba, jiffies));
@@ -1549,7 +1562,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1549 return 0; 1562 return 0;
1550 } 1563 }
1551 1564
1552 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count); 1565 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
1553 1566
1554 /* 1567 /*
1555 * Check that the command queued to the controller 1568 * Check that the command queued to the controller
@@ -1592,7 +1605,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
1592 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1605 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1593 else { 1606 else {
1594 struct scsi_device *sdev = cmd->device; 1607 struct scsi_device *sdev = cmd->device;
1595 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 1608 struct aac_dev *dev = fibptr->dev;
1596 u32 cid = sdev_id(sdev); 1609 u32 cid = sdev_id(sdev);
1597 printk(KERN_WARNING 1610 printk(KERN_WARNING
1598 "synchronize_callback: synchronize failed, status = %d\n", 1611 "synchronize_callback: synchronize failed, status = %d\n",
@@ -1699,7 +1712,7 @@ static int aac_synchronize(struct scsi_cmnd *scsicmd)
1699 1712
1700int aac_scsi_cmd(struct scsi_cmnd * scsicmd) 1713int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1701{ 1714{
1702 u32 cid = 0; 1715 u32 cid;
1703 struct Scsi_Host *host = scsicmd->device->host; 1716 struct Scsi_Host *host = scsicmd->device->host;
1704 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 1717 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1705 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 1718 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
@@ -1711,15 +1724,15 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1711 * Test does not apply to ID 16, the pseudo id for the controller 1724 * Test does not apply to ID 16, the pseudo id for the controller
1712 * itself. 1725 * itself.
1713 */ 1726 */
1714 if (scmd_id(scsicmd) != host->this_id) { 1727 cid = scmd_id(scsicmd);
1715 if ((scmd_channel(scsicmd) == CONTAINER_CHANNEL)) { 1728 if (cid != host->this_id) {
1716 if((scmd_id(scsicmd) >= dev->maximum_num_containers) || 1729 if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
1730 if((cid >= dev->maximum_num_containers) ||
1717 (scsicmd->device->lun != 0)) { 1731 (scsicmd->device->lun != 0)) {
1718 scsicmd->result = DID_NO_CONNECT << 16; 1732 scsicmd->result = DID_NO_CONNECT << 16;
1719 scsicmd->scsi_done(scsicmd); 1733 scsicmd->scsi_done(scsicmd);
1720 return 0; 1734 return 0;
1721 } 1735 }
1722 cid = scmd_id(scsicmd);
1723 1736
1724 /* 1737 /*
1725 * If the target container doesn't exist, it may have 1738 * If the target container doesn't exist, it may have
@@ -1782,7 +1795,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1782 { 1795 {
1783 struct inquiry_data inq_data; 1796 struct inquiry_data inq_data;
1784 1797
1785 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scmd_id(scsicmd))); 1798 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
1786 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1799 memset(&inq_data, 0, sizeof (struct inquiry_data));
1787 1800
1788 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 1801 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
@@ -1794,7 +1807,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1794 * Set the Vendor, Product, and Revision Level 1807 * Set the Vendor, Product, and Revision Level
1795 * see: <vendor>.c i.e. aac.c 1808 * see: <vendor>.c i.e. aac.c
1796 */ 1809 */
1797 if (scmd_id(scsicmd) == host->this_id) { 1810 if (cid == host->this_id) {
1798 setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types)); 1811 setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
1799 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ 1812 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
1800 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1813 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
@@ -1886,15 +1899,29 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1886 1899
1887 case MODE_SENSE: 1900 case MODE_SENSE:
1888 { 1901 {
1889 char mode_buf[4]; 1902 char mode_buf[7];
1903 int mode_buf_length = 4;
1890 1904
1891 dprintk((KERN_DEBUG "MODE SENSE command.\n")); 1905 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1892 mode_buf[0] = 3; /* Mode data length */ 1906 mode_buf[0] = 3; /* Mode data length */
1893 mode_buf[1] = 0; /* Medium type - default */ 1907 mode_buf[1] = 0; /* Medium type - default */
1894 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1908 mode_buf[2] = 0; /* Device-specific param,
1909 bit 8: 0/1 = write enabled/protected
1910 bit 4: 0/1 = FUA enabled */
1911 if (dev->raw_io_interface)
1912 mode_buf[2] = 0x10;
1895 mode_buf[3] = 0; /* Block descriptor length */ 1913 mode_buf[3] = 0; /* Block descriptor length */
1896 1914 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
1897 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf)); 1915 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
1916 mode_buf[0] = 6;
1917 mode_buf[4] = 8;
1918 mode_buf[5] = 1;
1919 mode_buf[6] = 0x04; /* WCE */
1920 mode_buf_length = 7;
1921 if (mode_buf_length > scsicmd->cmnd[4])
1922 mode_buf_length = scsicmd->cmnd[4];
1923 }
1924 aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
1898 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1925 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1899 scsicmd->scsi_done(scsicmd); 1926 scsicmd->scsi_done(scsicmd);
1900 1927
@@ -1902,18 +1929,33 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1902 } 1929 }
1903 case MODE_SENSE_10: 1930 case MODE_SENSE_10:
1904 { 1931 {
1905 char mode_buf[8]; 1932 char mode_buf[11];
1933 int mode_buf_length = 8;
1906 1934
1907 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); 1935 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1908 mode_buf[0] = 0; /* Mode data length (MSB) */ 1936 mode_buf[0] = 0; /* Mode data length (MSB) */
1909 mode_buf[1] = 6; /* Mode data length (LSB) */ 1937 mode_buf[1] = 6; /* Mode data length (LSB) */
1910 mode_buf[2] = 0; /* Medium type - default */ 1938 mode_buf[2] = 0; /* Medium type - default */
1911 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1939 mode_buf[3] = 0; /* Device-specific param,
1940 bit 8: 0/1 = write enabled/protected
1941 bit 4: 0/1 = FUA enabled */
1942 if (dev->raw_io_interface)
1943 mode_buf[3] = 0x10;
1912 mode_buf[4] = 0; /* reserved */ 1944 mode_buf[4] = 0; /* reserved */
1913 mode_buf[5] = 0; /* reserved */ 1945 mode_buf[5] = 0; /* reserved */
1914 mode_buf[6] = 0; /* Block descriptor length (MSB) */ 1946 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1915 mode_buf[7] = 0; /* Block descriptor length (LSB) */ 1947 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1916 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf)); 1948 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
1949 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
1950 mode_buf[1] = 9;
1951 mode_buf[8] = 8;
1952 mode_buf[9] = 1;
1953 mode_buf[10] = 0x04; /* WCE */
1954 mode_buf_length = 11;
1955 if (mode_buf_length > scsicmd->cmnd[8])
1956 mode_buf_length = scsicmd->cmnd[8];
1957 }
1958 aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
1917 1959
1918 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1960 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1919 scsicmd->scsi_done(scsicmd); 1961 scsicmd->scsi_done(scsicmd);
@@ -2136,28 +2178,21 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2136 if (!aac_valid_context(scsicmd, fibptr)) 2178 if (!aac_valid_context(scsicmd, fibptr))
2137 return; 2179 return;
2138 2180
2139 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2140
2141 BUG_ON(fibptr == NULL); 2181 BUG_ON(fibptr == NULL);
2142 2182
2183 dev = fibptr->dev;
2184
2143 srbreply = (struct aac_srb_reply *) fib_data(fibptr); 2185 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
2144 2186
2145 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 2187 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
2146 /* 2188 /*
2147 * Calculate resid for sg 2189 * Calculate resid for sg
2148 */ 2190 */
2149 2191
2150 scsicmd->resid = scsicmd->request_bufflen - 2192 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
2151 le32_to_cpu(srbreply->data_xfer_length); 2193 - le32_to_cpu(srbreply->data_xfer_length));
2152 2194
2153 if(scsicmd->use_sg) 2195 scsi_dma_unmap(scsicmd);
2154 pci_unmap_sg(dev->pdev,
2155 (struct scatterlist *)scsicmd->request_buffer,
2156 scsicmd->use_sg,
2157 scsicmd->sc_data_direction);
2158 else if(scsicmd->request_bufflen)
2159 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
2160 scsicmd->sc_data_direction);
2161 2196
2162 /* 2197 /*
2163 * First check the fib status 2198 * First check the fib status
@@ -2233,7 +2268,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
2233 break; 2268 break;
2234 2269
2235 case SRB_STATUS_BUSY: 2270 case SRB_STATUS_BUSY:
2236 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; 2271 scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
2237 break; 2272 break;
2238 2273
2239 case SRB_STATUS_BUS_RESET: 2274 case SRB_STATUS_BUS_RESET:
@@ -2343,34 +2378,33 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
2343{ 2378{
2344 struct aac_dev *dev; 2379 struct aac_dev *dev;
2345 unsigned long byte_count = 0; 2380 unsigned long byte_count = 0;
2381 int nseg;
2346 2382
2347 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2383 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2348 // Get rid of old data 2384 // Get rid of old data
2349 psg->count = 0; 2385 psg->count = 0;
2350 psg->sg[0].addr = 0; 2386 psg->sg[0].addr = 0;
2351 psg->sg[0].count = 0; 2387 psg->sg[0].count = 0;
2352 if (scsicmd->use_sg) { 2388
2389 nseg = scsi_dma_map(scsicmd);
2390 BUG_ON(nseg < 0);
2391 if (nseg) {
2353 struct scatterlist *sg; 2392 struct scatterlist *sg;
2354 int i; 2393 int i;
2355 int sg_count;
2356 sg = (struct scatterlist *) scsicmd->request_buffer;
2357 2394
2358 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, 2395 psg->count = cpu_to_le32(nseg);
2359 scsicmd->sc_data_direction);
2360 psg->count = cpu_to_le32(sg_count);
2361 2396
2362 for (i = 0; i < sg_count; i++) { 2397 scsi_for_each_sg(scsicmd, sg, nseg, i) {
2363 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); 2398 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
2364 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); 2399 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
2365 byte_count += sg_dma_len(sg); 2400 byte_count += sg_dma_len(sg);
2366 sg++;
2367 } 2401 }
2368 /* hba wants the size to be exact */ 2402 /* hba wants the size to be exact */
2369 if(byte_count > scsicmd->request_bufflen){ 2403 if (byte_count > scsi_bufflen(scsicmd)) {
2370 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 2404 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
2371 (byte_count - scsicmd->request_bufflen); 2405 (byte_count - scsi_bufflen(scsicmd));
2372 psg->sg[i-1].count = cpu_to_le32(temp); 2406 psg->sg[i-1].count = cpu_to_le32(temp);
2373 byte_count = scsicmd->request_bufflen; 2407 byte_count = scsi_bufflen(scsicmd);
2374 } 2408 }
2375 /* Check for command underflow */ 2409 /* Check for command underflow */
2376 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ 2410 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2378,18 +2412,6 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
2378 byte_count, scsicmd->underflow); 2412 byte_count, scsicmd->underflow);
2379 } 2413 }
2380 } 2414 }
2381 else if(scsicmd->request_bufflen) {
2382 u32 addr;
2383 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2384 scsicmd->request_buffer,
2385 scsicmd->request_bufflen,
2386 scsicmd->sc_data_direction);
2387 addr = scsicmd->SCp.dma_handle;
2388 psg->count = cpu_to_le32(1);
2389 psg->sg[0].addr = cpu_to_le32(addr);
2390 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
2391 byte_count = scsicmd->request_bufflen;
2392 }
2393 return byte_count; 2415 return byte_count;
2394} 2416}
2395 2417
@@ -2399,6 +2421,7 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
2399 struct aac_dev *dev; 2421 struct aac_dev *dev;
2400 unsigned long byte_count = 0; 2422 unsigned long byte_count = 0;
2401 u64 addr; 2423 u64 addr;
2424 int nseg;
2402 2425
2403 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2426 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2404 // Get rid of old data 2427 // Get rid of old data
@@ -2406,31 +2429,28 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
2406 psg->sg[0].addr[0] = 0; 2429 psg->sg[0].addr[0] = 0;
2407 psg->sg[0].addr[1] = 0; 2430 psg->sg[0].addr[1] = 0;
2408 psg->sg[0].count = 0; 2431 psg->sg[0].count = 0;
2409 if (scsicmd->use_sg) { 2432
2433 nseg = scsi_dma_map(scsicmd);
2434 BUG_ON(nseg < 0);
2435 if (nseg) {
2410 struct scatterlist *sg; 2436 struct scatterlist *sg;
2411 int i; 2437 int i;
2412 int sg_count;
2413 sg = (struct scatterlist *) scsicmd->request_buffer;
2414
2415 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
2416 scsicmd->sc_data_direction);
2417 2438
2418 for (i = 0; i < sg_count; i++) { 2439 scsi_for_each_sg(scsicmd, sg, nseg, i) {
2419 int count = sg_dma_len(sg); 2440 int count = sg_dma_len(sg);
2420 addr = sg_dma_address(sg); 2441 addr = sg_dma_address(sg);
2421 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 2442 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
2422 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 2443 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
2423 psg->sg[i].count = cpu_to_le32(count); 2444 psg->sg[i].count = cpu_to_le32(count);
2424 byte_count += count; 2445 byte_count += count;
2425 sg++;
2426 } 2446 }
2427 psg->count = cpu_to_le32(sg_count); 2447 psg->count = cpu_to_le32(nseg);
2428 /* hba wants the size to be exact */ 2448 /* hba wants the size to be exact */
2429 if(byte_count > scsicmd->request_bufflen){ 2449 if (byte_count > scsi_bufflen(scsicmd)) {
2430 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 2450 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
2431 (byte_count - scsicmd->request_bufflen); 2451 (byte_count - scsi_bufflen(scsicmd));
2432 psg->sg[i-1].count = cpu_to_le32(temp); 2452 psg->sg[i-1].count = cpu_to_le32(temp);
2433 byte_count = scsicmd->request_bufflen; 2453 byte_count = scsi_bufflen(scsicmd);
2434 } 2454 }
2435 /* Check for command underflow */ 2455 /* Check for command underflow */
2436 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ 2456 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2438,26 +2458,13 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
2438 byte_count, scsicmd->underflow); 2458 byte_count, scsicmd->underflow);
2439 } 2459 }
2440 } 2460 }
2441 else if(scsicmd->request_bufflen) {
2442 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2443 scsicmd->request_buffer,
2444 scsicmd->request_bufflen,
2445 scsicmd->sc_data_direction);
2446 addr = scsicmd->SCp.dma_handle;
2447 psg->count = cpu_to_le32(1);
2448 psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
2449 psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
2450 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
2451 byte_count = scsicmd->request_bufflen;
2452 }
2453 return byte_count; 2461 return byte_count;
2454} 2462}
2455 2463
2456static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg) 2464static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
2457{ 2465{
2458 struct Scsi_Host *host = scsicmd->device->host;
2459 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
2460 unsigned long byte_count = 0; 2466 unsigned long byte_count = 0;
2467 int nseg;
2461 2468
2462 // Get rid of old data 2469 // Get rid of old data
2463 psg->count = 0; 2470 psg->count = 0;
@@ -2467,16 +2474,14 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
2467 psg->sg[0].addr[1] = 0; 2474 psg->sg[0].addr[1] = 0;
2468 psg->sg[0].count = 0; 2475 psg->sg[0].count = 0;
2469 psg->sg[0].flags = 0; 2476 psg->sg[0].flags = 0;
2470 if (scsicmd->use_sg) { 2477
2478 nseg = scsi_dma_map(scsicmd);
2479 BUG_ON(nseg < 0);
2480 if (nseg) {
2471 struct scatterlist *sg; 2481 struct scatterlist *sg;
2472 int i; 2482 int i;
2473 int sg_count;
2474 sg = (struct scatterlist *) scsicmd->request_buffer;
2475 2483
2476 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg, 2484 scsi_for_each_sg(scsicmd, sg, nseg, i) {
2477 scsicmd->sc_data_direction);
2478
2479 for (i = 0; i < sg_count; i++) {
2480 int count = sg_dma_len(sg); 2485 int count = sg_dma_len(sg);
2481 u64 addr = sg_dma_address(sg); 2486 u64 addr = sg_dma_address(sg);
2482 psg->sg[i].next = 0; 2487 psg->sg[i].next = 0;
@@ -2486,15 +2491,14 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
2486 psg->sg[i].count = cpu_to_le32(count); 2491 psg->sg[i].count = cpu_to_le32(count);
2487 psg->sg[i].flags = 0; 2492 psg->sg[i].flags = 0;
2488 byte_count += count; 2493 byte_count += count;
2489 sg++;
2490 } 2494 }
2491 psg->count = cpu_to_le32(sg_count); 2495 psg->count = cpu_to_le32(nseg);
2492 /* hba wants the size to be exact */ 2496 /* hba wants the size to be exact */
2493 if(byte_count > scsicmd->request_bufflen){ 2497 if (byte_count > scsi_bufflen(scsicmd)) {
2494 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 2498 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
2495 (byte_count - scsicmd->request_bufflen); 2499 (byte_count - scsi_bufflen(scsicmd));
2496 psg->sg[i-1].count = cpu_to_le32(temp); 2500 psg->sg[i-1].count = cpu_to_le32(temp);
2497 byte_count = scsicmd->request_bufflen; 2501 byte_count = scsi_bufflen(scsicmd);
2498 } 2502 }
2499 /* Check for command underflow */ 2503 /* Check for command underflow */
2500 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){ 2504 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
@@ -2502,24 +2506,6 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
2502 byte_count, scsicmd->underflow); 2506 byte_count, scsicmd->underflow);
2503 } 2507 }
2504 } 2508 }
2505 else if(scsicmd->request_bufflen) {
2506 int count;
2507 u64 addr;
2508 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
2509 scsicmd->request_buffer,
2510 scsicmd->request_bufflen,
2511 scsicmd->sc_data_direction);
2512 addr = scsicmd->SCp.dma_handle;
2513 count = scsicmd->request_bufflen;
2514 psg->count = cpu_to_le32(1);
2515 psg->sg[0].next = 0;
2516 psg->sg[0].prev = 0;
2517 psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
2518 psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2519 psg->sg[0].count = cpu_to_le32(count);
2520 psg->sg[0].flags = 0;
2521 byte_count = scsicmd->request_bufflen;
2522 }
2523 return byte_count; 2509 return byte_count;
2524} 2510}
2525 2511
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index c81edf36913f..f1d3b66af879 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,8 +12,8 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 2437 15# define AAC_DRIVER_BUILD 2447
16# define AAC_DRIVER_BRANCH "-mh4" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
19 19
@@ -464,12 +464,12 @@ struct adapter_ops
464 int (*adapter_restart)(struct aac_dev *dev, int bled); 464 int (*adapter_restart)(struct aac_dev *dev, int bled);
465 /* Transport operations */ 465 /* Transport operations */
466 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 466 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
467 irqreturn_t (*adapter_intr)(int irq, void *dev_id); 467 irq_handler_t adapter_intr;
468 /* Packet operations */ 468 /* Packet operations */
469 int (*adapter_deliver)(struct fib * fib); 469 int (*adapter_deliver)(struct fib * fib);
470 int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba); 470 int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
471 int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); 471 int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
472 int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); 472 int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
473 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); 473 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
474 /* Administrative operations */ 474 /* Administrative operations */
475 int (*adapter_comm)(struct aac_dev * dev, int comm); 475 int (*adapter_comm)(struct aac_dev * dev, int comm);
@@ -860,10 +860,12 @@ struct aac_supplement_adapter_info
860 __le32 FlashFirmwareBootBuild; 860 __le32 FlashFirmwareBootBuild;
861 u8 MfgPcbaSerialNo[12]; 861 u8 MfgPcbaSerialNo[12];
862 u8 MfgWWNName[8]; 862 u8 MfgWWNName[8];
863 __le32 MoreFeatureBits; 863 __le32 SupportedOptions2;
864 __le32 ReservedGrowth[1]; 864 __le32 ReservedGrowth[1];
865}; 865};
866#define AAC_FEATURE_FALCON 0x00000010 866#define AAC_FEATURE_FALCON 0x00000010
867#define AAC_OPTION_MU_RESET 0x00000001
868#define AAC_OPTION_IGNORE_RESET 0x00000002
867#define AAC_SIS_VERSION_V3 3 869#define AAC_SIS_VERSION_V3 3
868#define AAC_SIS_SLOT_UNKNOWN 0xFF 870#define AAC_SIS_SLOT_UNKNOWN 0xFF
869 871
@@ -1054,8 +1056,8 @@ struct aac_dev
1054#define aac_adapter_read(fib,cmd,lba,count) \ 1056#define aac_adapter_read(fib,cmd,lba,count) \
1055 ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count) 1057 ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
1056 1058
1057#define aac_adapter_write(fib,cmd,lba,count) \ 1059#define aac_adapter_write(fib,cmd,lba,count,fua) \
1058 ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count) 1060 ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
1059 1061
1060#define aac_adapter_scsi(fib,cmd) \ 1062#define aac_adapter_scsi(fib,cmd) \
1061 ((fib)->dev)->a_ops.adapter_scsi(fib,cmd) 1063 ((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
@@ -1213,6 +1215,9 @@ struct aac_write64
1213 __le32 block; 1215 __le32 block;
1214 __le16 pad; 1216 __le16 pad;
1215 __le16 flags; 1217 __le16 flags;
1218#define IO_TYPE_WRITE 0x00000000
1219#define IO_TYPE_READ 0x00000001
1220#define IO_SUREWRITE 0x00000008
1216 struct sgmap64 sg; // Must be last in struct because it is variable 1221 struct sgmap64 sg; // Must be last in struct because it is variable
1217}; 1222};
1218struct aac_write_reply 1223struct aac_write_reply
@@ -1257,6 +1262,19 @@ struct aac_synchronize_reply {
1257 u8 data[16]; 1262 u8 data[16];
1258}; 1263};
1259 1264
1265#define CT_PAUSE_IO 65
1266#define CT_RELEASE_IO 66
1267struct aac_pause {
1268 __le32 command; /* VM_ContainerConfig */
1269 __le32 type; /* CT_PAUSE_IO */
1270 __le32 timeout; /* 10ms ticks */
1271 __le32 min;
1272 __le32 noRescan;
1273 __le32 parm3;
1274 __le32 parm4;
1275 __le32 count; /* sizeof(((struct aac_pause_reply *)NULL)->data) */
1276};
1277
1260struct aac_srb 1278struct aac_srb
1261{ 1279{
1262 __le32 function; 1280 __le32 function;
@@ -1804,6 +1822,10 @@ int aac_get_config_status(struct aac_dev *dev, int commit_flag);
1804int aac_get_containers(struct aac_dev *dev); 1822int aac_get_containers(struct aac_dev *dev);
1805int aac_scsi_cmd(struct scsi_cmnd *cmd); 1823int aac_scsi_cmd(struct scsi_cmnd *cmd);
1806int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); 1824int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
1825#ifndef shost_to_class
1826#define shost_to_class(shost) &shost->shost_classdev
1827#endif
1828ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf);
1807int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); 1829int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
1808int aac_rx_init(struct aac_dev *dev); 1830int aac_rx_init(struct aac_dev *dev);
1809int aac_rkt_init(struct aac_dev *dev); 1831int aac_rkt_init(struct aac_dev *dev);
@@ -1813,6 +1835,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
1813unsigned int aac_response_normal(struct aac_queue * q); 1835unsigned int aac_response_normal(struct aac_queue * q);
1814unsigned int aac_command_normal(struct aac_queue * q); 1836unsigned int aac_command_normal(struct aac_queue * q);
1815unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index); 1837unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index);
1838int aac_reset_adapter(struct aac_dev * dev, int forced);
1816int aac_check_health(struct aac_dev * dev); 1839int aac_check_health(struct aac_dev * dev);
1817int aac_command_thread(void *data); 1840int aac_command_thread(void *data);
1818int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx); 1841int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
@@ -1832,3 +1855,6 @@ extern int aif_timeout;
1832extern int expose_physicals; 1855extern int expose_physicals;
1833extern int aac_reset_devices; 1856extern int aac_reset_devices;
1834extern int aac_commit; 1857extern int aac_commit;
1858extern int update_interval;
1859extern int check_interval;
1860extern int check_reset;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 9aca57eda943..d510839c0bb2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1021,7 +1021,7 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
1021 1021
1022} 1022}
1023 1023
1024static int _aac_reset_adapter(struct aac_dev *aac) 1024static int _aac_reset_adapter(struct aac_dev *aac, int forced)
1025{ 1025{
1026 int index, quirks; 1026 int index, quirks;
1027 int retval; 1027 int retval;
@@ -1029,25 +1029,32 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1029 struct scsi_device *dev; 1029 struct scsi_device *dev;
1030 struct scsi_cmnd *command; 1030 struct scsi_cmnd *command;
1031 struct scsi_cmnd *command_list; 1031 struct scsi_cmnd *command_list;
1032 int jafo = 0;
1032 1033
1033 /* 1034 /*
1034 * Assumptions: 1035 * Assumptions:
1035 * - host is locked. 1036 * - host is locked, unless called by the aacraid thread.
1037 * (a matter of convenience, due to legacy issues surrounding
1038 * eh_host_adapter_reset).
1036 * - in_reset is asserted, so no new i/o is getting to the 1039 * - in_reset is asserted, so no new i/o is getting to the
1037 * card. 1040 * card.
1038 * - The card is dead. 1041 * - The card is dead, or will be very shortly ;-/ so no new
1042 * commands are completing in the interrupt service.
1039 */ 1043 */
1040 host = aac->scsi_host_ptr; 1044 host = aac->scsi_host_ptr;
1041 scsi_block_requests(host); 1045 scsi_block_requests(host);
1042 aac_adapter_disable_int(aac); 1046 aac_adapter_disable_int(aac);
1043 spin_unlock_irq(host->host_lock); 1047 if (aac->thread->pid != current->pid) {
1044 kthread_stop(aac->thread); 1048 spin_unlock_irq(host->host_lock);
1049 kthread_stop(aac->thread);
1050 jafo = 1;
1051 }
1045 1052
1046 /* 1053 /*
1047 * If a positive health, means in a known DEAD PANIC 1054 * If a positive health, means in a known DEAD PANIC
1048 * state and the adapter could be reset to `try again'. 1055 * state and the adapter could be reset to `try again'.
1049 */ 1056 */
1050 retval = aac_adapter_restart(aac, aac_adapter_check_health(aac)); 1057 retval = aac_adapter_restart(aac, forced ? 0 : aac_adapter_check_health(aac));
1051 1058
1052 if (retval) 1059 if (retval)
1053 goto out; 1060 goto out;
@@ -1104,10 +1111,12 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1104 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) 1111 if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1105 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) 1112 if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1106 goto out; 1113 goto out;
1107 aac->thread = kthread_run(aac_command_thread, aac, aac->name); 1114 if (jafo) {
1108 if (IS_ERR(aac->thread)) { 1115 aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1109 retval = PTR_ERR(aac->thread); 1116 if (IS_ERR(aac->thread)) {
1110 goto out; 1117 retval = PTR_ERR(aac->thread);
1118 goto out;
1119 }
1111 } 1120 }
1112 (void)aac_get_adapter_info(aac); 1121 (void)aac_get_adapter_info(aac);
1113 quirks = aac_get_driver_ident(index)->quirks; 1122 quirks = aac_get_driver_ident(index)->quirks;
@@ -1150,7 +1159,98 @@ static int _aac_reset_adapter(struct aac_dev *aac)
1150out: 1159out:
1151 aac->in_reset = 0; 1160 aac->in_reset = 0;
1152 scsi_unblock_requests(host); 1161 scsi_unblock_requests(host);
1153 spin_lock_irq(host->host_lock); 1162 if (jafo) {
1163 spin_lock_irq(host->host_lock);
1164 }
1165 return retval;
1166}
1167
1168int aac_reset_adapter(struct aac_dev * aac, int forced)
1169{
1170 unsigned long flagv = 0;
1171 int retval;
1172 struct Scsi_Host * host;
1173
1174 if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1175 return -EBUSY;
1176
1177 if (aac->in_reset) {
1178 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1179 return -EBUSY;
1180 }
1181 aac->in_reset = 1;
1182 spin_unlock_irqrestore(&aac->fib_lock, flagv);
1183
1184 /*
1185 * Wait for all commands to complete to this specific
1186 * target (block maximum 60 seconds). Although not necessary,
1187 * it does make us a good storage citizen.
1188 */
1189 host = aac->scsi_host_ptr;
1190 scsi_block_requests(host);
1191 if (forced < 2) for (retval = 60; retval; --retval) {
1192 struct scsi_device * dev;
1193 struct scsi_cmnd * command;
1194 int active = 0;
1195
1196 __shost_for_each_device(dev, host) {
1197 spin_lock_irqsave(&dev->list_lock, flagv);
1198 list_for_each_entry(command, &dev->cmd_list, list) {
1199 if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1200 active++;
1201 break;
1202 }
1203 }
1204 spin_unlock_irqrestore(&dev->list_lock, flagv);
1205 if (active)
1206 break;
1207
1208 }
1209 /*
1210 * We can exit If all the commands are complete
1211 */
1212 if (active == 0)
1213 break;
1214 ssleep(1);
1215 }
1216
1217 /* Quiesce build, flush cache, write through mode */
1218 aac_send_shutdown(aac);
1219 spin_lock_irqsave(host->host_lock, flagv);
1220 retval = _aac_reset_adapter(aac, forced);
1221 spin_unlock_irqrestore(host->host_lock, flagv);
1222
1223 if (retval == -ENODEV) {
1224 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1225 struct fib * fibctx = aac_fib_alloc(aac);
1226 if (fibctx) {
1227 struct aac_pause *cmd;
1228 int status;
1229
1230 aac_fib_init(fibctx);
1231
1232 cmd = (struct aac_pause *) fib_data(fibctx);
1233
1234 cmd->command = cpu_to_le32(VM_ContainerConfig);
1235 cmd->type = cpu_to_le32(CT_PAUSE_IO);
1236 cmd->timeout = cpu_to_le32(1);
1237 cmd->min = cpu_to_le32(1);
1238 cmd->noRescan = cpu_to_le32(1);
1239 cmd->count = cpu_to_le32(0);
1240
1241 status = aac_fib_send(ContainerCommand,
1242 fibctx,
1243 sizeof(struct aac_pause),
1244 FsaNormal,
1245 -2 /* Timeout silently */, 1,
1246 NULL, NULL);
1247
1248 if (status >= 0)
1249 aac_fib_complete(fibctx);
1250 aac_fib_free(fibctx);
1251 }
1252 }
1253
1154 return retval; 1254 return retval;
1155} 1255}
1156 1256
@@ -1270,10 +1370,15 @@ int aac_check_health(struct aac_dev * aac)
1270 1370
1271 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED); 1371 printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1272 1372
1373 if (!check_reset || (aac->supplement_adapter_info.SupportedOptions2 &
1374 le32_to_cpu(AAC_OPTION_IGNORE_RESET)))
1375 goto out;
1273 host = aac->scsi_host_ptr; 1376 host = aac->scsi_host_ptr;
1274 spin_lock_irqsave(host->host_lock, flagv); 1377 if (aac->thread->pid != current->pid)
1275 BlinkLED = _aac_reset_adapter(aac); 1378 spin_lock_irqsave(host->host_lock, flagv);
1276 spin_unlock_irqrestore(host->host_lock, flagv); 1379 BlinkLED = _aac_reset_adapter(aac, 0);
1380 if (aac->thread->pid != current->pid)
1381 spin_unlock_irqrestore(host->host_lock, flagv);
1277 return BlinkLED; 1382 return BlinkLED;
1278 1383
1279out: 1384out:
@@ -1300,6 +1405,9 @@ int aac_command_thread(void *data)
1300 struct aac_fib_context *fibctx; 1405 struct aac_fib_context *fibctx;
1301 unsigned long flags; 1406 unsigned long flags;
1302 DECLARE_WAITQUEUE(wait, current); 1407 DECLARE_WAITQUEUE(wait, current);
1408 unsigned long next_jiffies = jiffies + HZ;
1409 unsigned long next_check_jiffies = next_jiffies;
1410 long difference = HZ;
1303 1411
1304 /* 1412 /*
1305 * We can only have one thread per adapter for AIF's. 1413 * We can only have one thread per adapter for AIF's.
@@ -1368,7 +1476,7 @@ int aac_command_thread(void *data)
1368 cpu_to_le32(AifCmdJobProgress))) { 1476 cpu_to_le32(AifCmdJobProgress))) {
1369 aac_handle_aif(dev, fib); 1477 aac_handle_aif(dev, fib);
1370 } 1478 }
1371 1479
1372 time_now = jiffies/HZ; 1480 time_now = jiffies/HZ;
1373 1481
1374 /* 1482 /*
@@ -1507,11 +1615,79 @@ int aac_command_thread(void *data)
1507 * There are no more AIF's 1615 * There are no more AIF's
1508 */ 1616 */
1509 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); 1617 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1510 schedule(); 1618
1619 /*
1620 * Background activity
1621 */
1622 if ((time_before(next_check_jiffies,next_jiffies))
1623 && ((difference = next_check_jiffies - jiffies) <= 0)) {
1624 next_check_jiffies = next_jiffies;
1625 if (aac_check_health(dev) == 0) {
1626 difference = ((long)(unsigned)check_interval)
1627 * HZ;
1628 next_check_jiffies = jiffies + difference;
1629 } else if (!dev->queues)
1630 break;
1631 }
1632 if (!time_before(next_check_jiffies,next_jiffies)
1633 && ((difference = next_jiffies - jiffies) <= 0)) {
1634 struct timeval now;
1635 int ret;
1636
1637 /* Don't even try to talk to adapter if its sick */
1638 ret = aac_check_health(dev);
1639 if (!ret && !dev->queues)
1640 break;
1641 next_check_jiffies = jiffies
1642 + ((long)(unsigned)check_interval)
1643 * HZ;
1644 do_gettimeofday(&now);
1645
1646 /* Synchronize our watches */
1647 if (((1000000 - (1000000 / HZ)) > now.tv_usec)
1648 && (now.tv_usec > (1000000 / HZ)))
1649 difference = (((1000000 - now.tv_usec) * HZ)
1650 + 500000) / 1000000;
1651 else if (ret == 0) {
1652 struct fib *fibptr;
1653
1654 if ((fibptr = aac_fib_alloc(dev))) {
1655 u32 * info;
1656
1657 aac_fib_init(fibptr);
1658
1659 info = (u32 *) fib_data(fibptr);
1660 if (now.tv_usec > 500000)
1661 ++now.tv_sec;
1662
1663 *info = cpu_to_le32(now.tv_sec);
1664
1665 (void)aac_fib_send(SendHostTime,
1666 fibptr,
1667 sizeof(*info),
1668 FsaNormal,
1669 1, 1,
1670 NULL,
1671 NULL);
1672 aac_fib_complete(fibptr);
1673 aac_fib_free(fibptr);
1674 }
1675 difference = (long)(unsigned)update_interval*HZ;
1676 } else {
1677 /* retry shortly */
1678 difference = 10 * HZ;
1679 }
1680 next_jiffies = jiffies + difference;
1681 if (time_before(next_check_jiffies,next_jiffies))
1682 difference = next_check_jiffies - jiffies;
1683 }
1684 if (difference <= 0)
1685 difference = 1;
1686 set_current_state(TASK_INTERRUPTIBLE);
1687 schedule_timeout(difference);
1511 1688
1512 if (kthread_should_stop()) 1689 if (kthread_should_stop())
1513 break; 1690 break;
1514 set_current_state(TASK_INTERRUPTIBLE);
1515 } 1691 }
1516 if (dev->queues) 1692 if (dev->queues)
1517 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); 1693 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 5c487ff096c7..d76e1a8cb93a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -39,10 +39,8 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42#include <linux/dma-mapping.h>
43#include <linux/syscalls.h> 42#include <linux/syscalls.h>
44#include <linux/delay.h> 43#include <linux/delay.h>
45#include <linux/smp_lock.h>
46#include <linux/kthread.h> 44#include <linux/kthread.h>
47#include <asm/semaphore.h> 45#include <asm/semaphore.h>
48 46
@@ -223,12 +221,12 @@ static struct aac_driver_ident aac_drivers[] = {
223 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/ 221 { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
224 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ 222 { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
225 { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/ 223 { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
226 { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell PERC2/QC */ 224 { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
227 { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */ 225 { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
228 226
229 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */ 227 { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Dell Catchall */
230 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */ 228 { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Legend Catchall */
231 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec Catch All */ 229 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
232 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ 230 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
233 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */ 231 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec NEMER/ARK Catch All */
234}; 232};
@@ -403,10 +401,6 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
403 401
404static int aac_slave_configure(struct scsi_device *sdev) 402static int aac_slave_configure(struct scsi_device *sdev)
405{ 403{
406 if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
407 sdev->skip_ms_page_8 = 1;
408 sdev->skip_ms_page_3f = 1;
409 }
410 if ((sdev->type == TYPE_DISK) && 404 if ((sdev->type == TYPE_DISK) &&
411 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 405 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
412 if (expose_physicals == 0) 406 if (expose_physicals == 0)
@@ -450,6 +444,43 @@ static int aac_slave_configure(struct scsi_device *sdev)
450 return 0; 444 return 0;
451} 445}
452 446
447/**
448 * aac_change_queue_depth - alter queue depths
449 * @sdev: SCSI device we are considering
450 * @depth: desired queue depth
451 *
452 * Alters queue depths for target device based on the host adapter's
453 * total capacity and the queue depth supported by the target device.
454 */
455
456static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
457{
458 if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
459 (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
460 struct scsi_device * dev;
461 struct Scsi_Host *host = sdev->host;
462 unsigned num = 0;
463
464 __shost_for_each_device(dev, host) {
465 if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
466 (sdev_channel(dev) == CONTAINER_CHANNEL))
467 ++num;
468 ++num;
469 }
470 if (num >= host->can_queue)
471 num = host->can_queue - 1;
472 if (depth > (host->can_queue - num))
473 depth = host->can_queue - num;
474 if (depth > 256)
475 depth = 256;
476 else if (depth < 2)
477 depth = 2;
478 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
479 } else
480 scsi_adjust_queue_depth(sdev, 0, 1);
481 return sdev->queue_depth;
482}
483
453static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg) 484static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
454{ 485{
455 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; 486 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
@@ -548,6 +579,14 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
548 ssleep(1); 579 ssleep(1);
549 } 580 }
550 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME); 581 printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
582 /*
583 * This adapter needs a blind reset, only do so for Adapters that
584 * support a register, instead of a commanded, reset.
585 */
586 if ((aac->supplement_adapter_info.SupportedOptions2 &
587 le32_to_cpu(AAC_OPTION_MU_RESET|AAC_OPTION_IGNORE_RESET)) ==
588 le32_to_cpu(AAC_OPTION_MU_RESET))
589 aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
551 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */ 590 return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
552} 591}
553 592
@@ -731,15 +770,21 @@ static ssize_t aac_show_bios_version(struct class_device *class_dev,
731 return len; 770 return len;
732} 771}
733 772
734static ssize_t aac_show_serial_number(struct class_device *class_dev, 773ssize_t aac_show_serial_number(struct class_device *class_dev, char *buf)
735 char *buf)
736{ 774{
737 struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata; 775 struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
738 int len = 0; 776 int len = 0;
739 777
740 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 778 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
741 len = snprintf(buf, PAGE_SIZE, "%x\n", 779 len = snprintf(buf, PAGE_SIZE, "%06X\n",
742 le32_to_cpu(dev->adapter_info.serial[0])); 780 le32_to_cpu(dev->adapter_info.serial[0]));
781 if (len &&
782 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
783 sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)+2-len],
784 buf, len))
785 len = snprintf(buf, PAGE_SIZE, "%.*s\n",
786 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
787 dev->supplement_adapter_info.MfgPcbaSerialNo);
743 return len; 788 return len;
744} 789}
745 790
@@ -755,6 +800,31 @@ static ssize_t aac_show_max_id(struct class_device *class_dev, char *buf)
755 class_to_shost(class_dev)->max_id); 800 class_to_shost(class_dev)->max_id);
756} 801}
757 802
803static ssize_t aac_store_reset_adapter(struct class_device *class_dev,
804 const char *buf, size_t count)
805{
806 int retval = -EACCES;
807
808 if (!capable(CAP_SYS_ADMIN))
809 return retval;
810 retval = aac_reset_adapter((struct aac_dev*)class_to_shost(class_dev)->hostdata, buf[0] == '!');
811 if (retval >= 0)
812 retval = count;
813 return retval;
814}
815
816static ssize_t aac_show_reset_adapter(struct class_device *class_dev,
817 char *buf)
818{
819 struct aac_dev *dev = (struct aac_dev*)class_to_shost(class_dev)->hostdata;
820 int len, tmp;
821
822 tmp = aac_adapter_check_health(dev);
823 if ((tmp == 0) && dev->in_reset)
824 tmp = -EBUSY;
825 len = snprintf(buf, PAGE_SIZE, "0x%x", tmp);
826 return len;
827}
758 828
759static struct class_device_attribute aac_model = { 829static struct class_device_attribute aac_model = {
760 .attr = { 830 .attr = {
@@ -812,6 +882,14 @@ static struct class_device_attribute aac_max_id = {
812 }, 882 },
813 .show = aac_show_max_id, 883 .show = aac_show_max_id,
814}; 884};
885static struct class_device_attribute aac_reset = {
886 .attr = {
887 .name = "reset_host",
888 .mode = S_IWUSR|S_IRUGO,
889 },
890 .store = aac_store_reset_adapter,
891 .show = aac_show_reset_adapter,
892};
815 893
816static struct class_device_attribute *aac_attrs[] = { 894static struct class_device_attribute *aac_attrs[] = {
817 &aac_model, 895 &aac_model,
@@ -822,6 +900,7 @@ static struct class_device_attribute *aac_attrs[] = {
822 &aac_serial_number, 900 &aac_serial_number,
823 &aac_max_channel, 901 &aac_max_channel,
824 &aac_max_id, 902 &aac_max_id,
903 &aac_reset,
825 NULL 904 NULL
826}; 905};
827 906
@@ -848,6 +927,7 @@ static struct scsi_host_template aac_driver_template = {
848 .bios_param = aac_biosparm, 927 .bios_param = aac_biosparm,
849 .shost_attrs = aac_attrs, 928 .shost_attrs = aac_attrs,
850 .slave_configure = aac_slave_configure, 929 .slave_configure = aac_slave_configure,
930 .change_queue_depth = aac_change_queue_depth,
851 .eh_abort_handler = aac_eh_abort, 931 .eh_abort_handler = aac_eh_abort,
852 .eh_host_reset_handler = aac_eh_reset, 932 .eh_host_reset_handler = aac_eh_reset,
853 .can_queue = AAC_NUM_IO_FIB, 933 .can_queue = AAC_NUM_IO_FIB,
@@ -1086,7 +1166,7 @@ static int __init aac_init(void)
1086{ 1166{
1087 int error; 1167 int error;
1088 1168
1089 printk(KERN_INFO "Adaptec %s driver (%s)\n", 1169 printk(KERN_INFO "Adaptec %s driver %s\n",
1090 AAC_DRIVERNAME, aac_driver_version); 1170 AAC_DRIVERNAME, aac_driver_version);
1091 1171
1092 error = pci_register_driver(&aac_pci_driver); 1172 error = pci_register_driver(&aac_pci_driver);
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ae978a373c56..ebc65b9fea92 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -464,21 +464,24 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)
464{ 464{
465 u32 var; 465 u32 var;
466 466
467 if (bled) 467 if (!(dev->supplement_adapter_info.SupportedOptions2 &
468 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", 468 le32_to_cpu(AAC_OPTION_MU_RESET)) || (bled >= 0) || (bled == -2)) {
469 dev->name, dev->id, bled); 469 if (bled)
470 else { 470 printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
471 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 471 dev->name, dev->id, bled);
472 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); 472 else {
473 if (!bled && (var != 0x00000001)) 473 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
474 bled = -EINVAL; 474 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
475 } 475 if (!bled && (var != 0x00000001))
476 if (bled && (bled != -ETIMEDOUT)) 476 bled = -EINVAL;
477 bled = aac_adapter_sync_cmd(dev, IOP_RESET, 477 }
478 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL); 478 if (bled && (bled != -ETIMEDOUT))
479 bled = aac_adapter_sync_cmd(dev, IOP_RESET,
480 0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
479 481
480 if (bled && (bled != -ETIMEDOUT)) 482 if (bled && (bled != -ETIMEDOUT))
481 return -EINVAL; 483 return -EINVAL;
484 }
482 if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ 485 if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */
483 rx_writel(dev, MUnit.reserved2, 3); 486 rx_writel(dev, MUnit.reserved2, 3);
484 msleep(5000); /* Delay 5 seconds */ 487 msleep(5000); /* Delay 5 seconds */
@@ -596,7 +599,7 @@ int _aac_rx_init(struct aac_dev *dev)
596 } 599 }
597 msleep(1); 600 msleep(1);
598 } 601 }
599 if (restart) 602 if (restart && aac_commit)
600 aac_commit = 1; 603 aac_commit = 1;
601 /* 604 /*
602 * Fill in the common function dispatch table. 605 * Fill in the common function dispatch table.
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 9b3303b64113..2b6689709e53 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -798,7 +798,6 @@
798#include <scsi/scsi_tcq.h> 798#include <scsi/scsi_tcq.h>
799#include <scsi/scsi.h> 799#include <scsi/scsi.h>
800#include <scsi/scsi_host.h> 800#include <scsi/scsi_host.h>
801#include "advansys.h"
802#ifdef CONFIG_PCI 801#ifdef CONFIG_PCI
803#include <linux/pci.h> 802#include <linux/pci.h>
804#endif /* CONFIG_PCI */ 803#endif /* CONFIG_PCI */
@@ -2014,7 +2013,7 @@ STATIC int AscSgListToQueue(int);
2014STATIC void AscEnableIsaDma(uchar); 2013STATIC void AscEnableIsaDma(uchar);
2015#endif /* CONFIG_ISA */ 2014#endif /* CONFIG_ISA */
2016STATIC ASC_DCNT AscGetMaxDmaCount(ushort); 2015STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2017 2016static const char *advansys_info(struct Scsi_Host *shp);
2018 2017
2019/* 2018/*
2020 * --- Adv Library Constants and Macros 2019 * --- Adv Library Constants and Macros
@@ -3970,10 +3969,6 @@ STATIC ushort asc_bus[ASC_NUM_BUS] __initdata = {
3970 ASC_IS_PCI, 3969 ASC_IS_PCI,
3971}; 3970};
3972 3971
3973/*
3974 * Used with the LILO 'advansys' option to eliminate or
3975 * limit I/O port probing at boot time, cf. advansys_setup().
3976 */
3977STATIC int asc_iopflag = ASC_FALSE; 3972STATIC int asc_iopflag = ASC_FALSE;
3978STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 }; 3973STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 };
3979 3974
@@ -4055,10 +4050,6 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
4055#endif /* ADVANSYS_DEBUG */ 4050#endif /* ADVANSYS_DEBUG */
4056 4051
4057 4052
4058/*
4059 * --- Linux 'struct scsi_host_template' and advansys_setup() Functions
4060 */
4061
4062#ifdef CONFIG_PROC_FS 4053#ifdef CONFIG_PROC_FS
4063/* 4054/*
4064 * advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)] 4055 * advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
@@ -4080,7 +4071,7 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
4080 * if 'prtbuf' is too small it will not be overwritten. Instead the 4071 * if 'prtbuf' is too small it will not be overwritten. Instead the
4081 * user just won't get all the available statistics. 4072 * user just won't get all the available statistics.
4082 */ 4073 */
4083int 4074static int
4084advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start, 4075advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
4085 off_t offset, int length, int inout) 4076 off_t offset, int length, int inout)
4086{ 4077{
@@ -4296,7 +4287,7 @@ advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
4296 * it must not call SCSI mid-level functions including scsi_malloc() 4287 * it must not call SCSI mid-level functions including scsi_malloc()
4297 * and scsi_free(). 4288 * and scsi_free().
4298 */ 4289 */
4299int __init 4290static int __init
4300advansys_detect(struct scsi_host_template *tpnt) 4291advansys_detect(struct scsi_host_template *tpnt)
4301{ 4292{
4302 static int detect_called = ASC_FALSE; 4293 static int detect_called = ASC_FALSE;
@@ -5428,7 +5419,7 @@ advansys_detect(struct scsi_host_template *tpnt)
5428 * 5419 *
5429 * Release resources allocated for a single AdvanSys adapter. 5420 * Release resources allocated for a single AdvanSys adapter.
5430 */ 5421 */
5431int 5422static int
5432advansys_release(struct Scsi_Host *shp) 5423advansys_release(struct Scsi_Host *shp)
5433{ 5424{
5434 asc_board_t *boardp; 5425 asc_board_t *boardp;
@@ -5475,7 +5466,7 @@ advansys_release(struct Scsi_Host *shp)
5475 * Note: The information line should not exceed ASC_INFO_SIZE bytes, 5466 * Note: The information line should not exceed ASC_INFO_SIZE bytes,
5476 * otherwise the static 'info' array will be overrun. 5467 * otherwise the static 'info' array will be overrun.
5477 */ 5468 */
5478const char * 5469static const char *
5479advansys_info(struct Scsi_Host *shp) 5470advansys_info(struct Scsi_Host *shp)
5480{ 5471{
5481 static char info[ASC_INFO_SIZE]; 5472 static char info[ASC_INFO_SIZE];
@@ -5568,7 +5559,7 @@ advansys_info(struct Scsi_Host *shp)
5568 * This function always returns 0. Command return status is saved 5559 * This function always returns 0. Command return status is saved
5569 * in the 'scp' result field. 5560 * in the 'scp' result field.
5570 */ 5561 */
5571int 5562static int
5572advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) 5563advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
5573{ 5564{
5574 struct Scsi_Host *shp; 5565 struct Scsi_Host *shp;
@@ -5656,7 +5647,7 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
5656 * sleeping is allowed and no locking other than for host structures is 5647 * sleeping is allowed and no locking other than for host structures is
5657 * required. Returns SUCCESS or FAILED. 5648 * required. Returns SUCCESS or FAILED.
5658 */ 5649 */
5659int 5650static int
5660advansys_reset(struct scsi_cmnd *scp) 5651advansys_reset(struct scsi_cmnd *scp)
5661{ 5652{
5662 struct Scsi_Host *shp; 5653 struct Scsi_Host *shp;
@@ -5841,7 +5832,7 @@ advansys_reset(struct scsi_cmnd *scp)
5841 * ip[1]: sectors 5832 * ip[1]: sectors
5842 * ip[2]: cylinders 5833 * ip[2]: cylinders
5843 */ 5834 */
5844int 5835static int
5845advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, 5836advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
5846 sector_t capacity, int ip[]) 5837 sector_t capacity, int ip[])
5847{ 5838{
@@ -5875,82 +5866,6 @@ advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
5875} 5866}
5876 5867
5877/* 5868/*
5878 * advansys_setup()
5879 *
5880 * This function is called from init/main.c at boot time.
5881 * It it passed LILO parameters that can be set from the
5882 * LILO command line or in /etc/lilo.conf.
5883 *
5884 * It is used by the AdvanSys driver to either disable I/O
5885 * port scanning or to limit scanning to 1 - 4 I/O ports.
5886 * Regardless of the option setting EISA and PCI boards
5887 * will still be searched for and detected. This option
5888 * only affects searching for ISA and VL boards.
5889 *
5890 * If ADVANSYS_DEBUG is defined the driver debug level may
5891 * be set using the 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port.
5892 *
5893 * Examples:
5894 * 1. Eliminate I/O port scanning:
5895 * boot: linux advansys=
5896 * or
5897 * boot: linux advansys=0x0
5898 * 2. Limit I/O port scanning to one I/O port:
5899 * boot: linux advansys=0x110
5900 * 3. Limit I/O port scanning to four I/O ports:
5901 * boot: linux advansys=0x110,0x210,0x230,0x330
5902 * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
5903 * set the driver debug level to 2.
5904 * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
5905 *
5906 * ints[0] - number of arguments
5907 * ints[1] - first argument
5908 * ints[2] - second argument
5909 * ...
5910 */
5911void __init
5912advansys_setup(char *str, int *ints)
5913{
5914 int i;
5915
5916 if (asc_iopflag == ASC_TRUE) {
5917 printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
5918 return;
5919 }
5920
5921 asc_iopflag = ASC_TRUE;
5922
5923 if (ints[0] > ASC_NUM_IOPORT_PROBE) {
5924#ifdef ADVANSYS_DEBUG
5925 if ((ints[0] == ASC_NUM_IOPORT_PROBE + 1) &&
5926 (ints[ASC_NUM_IOPORT_PROBE + 1] >> 4 == 0xdeb)) {
5927 asc_dbglvl = ints[ASC_NUM_IOPORT_PROBE + 1] & 0xf;
5928 } else {
5929#endif /* ADVANSYS_DEBUG */
5930 printk("AdvanSys SCSI: only %d I/O ports accepted\n",
5931 ASC_NUM_IOPORT_PROBE);
5932#ifdef ADVANSYS_DEBUG
5933 }
5934#endif /* ADVANSYS_DEBUG */
5935 }
5936
5937#ifdef ADVANSYS_DEBUG
5938 ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
5939 for (i = 1; i < ints[0]; i++) {
5940 ASC_DBG2(1, " ints[%d] 0x%x", i, ints[i]);
5941 }
5942 ASC_DBG(1, "\n");
5943#endif /* ADVANSYS_DEBUG */
5944
5945 for (i = 1; i <= ints[0] && i <= ASC_NUM_IOPORT_PROBE; i++) {
5946 asc_ioport[i-1] = ints[i];
5947 ASC_DBG2(1, "advansys_setup: asc_ioport[%d] 0x%x\n",
5948 i - 1, asc_ioport[i-1]);
5949 }
5950}
5951
5952
5953/*
5954 * --- Loadable Driver Support 5869 * --- Loadable Driver Support
5955 */ 5870 */
5956 5871
diff --git a/drivers/scsi/advansys.h b/drivers/scsi/advansys.h
deleted file mode 100644
index 8ee7fb16a725..000000000000
--- a/drivers/scsi/advansys.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
3 *
4 * Copyright (c) 1995-2000 Advanced System Products, Inc.
5 * Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
6 * All Rights Reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that redistributions of source
10 * code retain the above copyright notice and this comment without
11 * modification.
12 *
13 * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys)
14 * changed its name to ConnectCom Solutions, Inc.
15 *
16 */
17
18#ifndef _ADVANSYS_H
19#define _ADVANSYS_H
20
21/*
22 * struct scsi_host_template function prototypes.
23 */
24int advansys_detect(struct scsi_host_template *);
25int advansys_release(struct Scsi_Host *);
26const char *advansys_info(struct Scsi_Host *);
27int advansys_queuecommand(struct scsi_cmnd *, void (* done)(struct scsi_cmnd *));
28int advansys_reset(struct scsi_cmnd *);
29int advansys_biosparam(struct scsi_device *, struct block_device *,
30 sector_t, int[]);
31static int advansys_slave_configure(struct scsi_device *);
32
33/* init/main.c setup function */
34void advansys_setup(char *, int *);
35
36#endif /* _ADVANSYS_H */
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4b4d1233ce8a..85f2394ffc3e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -240,6 +240,7 @@
240#include <linux/io.h> 240#include <linux/io.h>
241#include <linux/blkdev.h> 241#include <linux/blkdev.h>
242#include <asm/system.h> 242#include <asm/system.h>
243#include <linux/completion.h>
243#include <linux/errno.h> 244#include <linux/errno.h>
244#include <linux/string.h> 245#include <linux/string.h>
245#include <linux/wait.h> 246#include <linux/wait.h>
@@ -253,7 +254,6 @@
253#include <linux/spinlock.h> 254#include <linux/spinlock.h>
254#include <linux/workqueue.h> 255#include <linux/workqueue.h>
255#include <linux/list.h> 256#include <linux/list.h>
256#include <asm/semaphore.h>
257#include <scsi/scsicam.h> 257#include <scsi/scsicam.h>
258 258
259#include "scsi.h" 259#include "scsi.h"
@@ -551,7 +551,7 @@ struct aha152x_hostdata {
551 */ 551 */
552struct aha152x_scdata { 552struct aha152x_scdata {
553 Scsi_Cmnd *next; /* next sc in queue */ 553 Scsi_Cmnd *next; /* next sc in queue */
554 struct semaphore *sem; /* semaphore to block on */ 554 struct completion *done;/* semaphore to block on */
555 unsigned char cmd_len; 555 unsigned char cmd_len;
556 unsigned char cmnd[MAX_COMMAND_SIZE]; 556 unsigned char cmnd[MAX_COMMAND_SIZE];
557 unsigned short use_sg; 557 unsigned short use_sg;
@@ -608,7 +608,7 @@ struct aha152x_scdata {
608 608
609#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble) 609#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble)
610#define SCNEXT(SCpnt) SCDATA(SCpnt)->next 610#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
611#define SCSEM(SCpnt) SCDATA(SCpnt)->sem 611#define SCSEM(SCpnt) SCDATA(SCpnt)->done
612 612
613#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset)) 613#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
614 614
@@ -969,7 +969,8 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
969/* 969/*
970 * Queue a command and setup interrupts for a free bus. 970 * Queue a command and setup interrupts for a free bus.
971 */ 971 */
972static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int phase, void (*done)(Scsi_Cmnd *)) 972static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
973 int phase, void (*done)(Scsi_Cmnd *))
973{ 974{
974 struct Scsi_Host *shpnt = SCpnt->device->host; 975 struct Scsi_Host *shpnt = SCpnt->device->host;
975 unsigned long flags; 976 unsigned long flags;
@@ -1013,7 +1014,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
1013 } 1014 }
1014 1015
1015 SCNEXT(SCpnt) = NULL; 1016 SCNEXT(SCpnt) = NULL;
1016 SCSEM(SCpnt) = sem; 1017 SCSEM(SCpnt) = complete;
1017 1018
1018 /* setup scratch area 1019 /* setup scratch area
1019 SCp.ptr : buffer pointer 1020 SCp.ptr : buffer pointer
@@ -1084,9 +1085,9 @@ static void reset_done(Scsi_Cmnd *SCpnt)
1084 DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt)); 1085 DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt));
1085#endif 1086#endif
1086 if(SCSEM(SCpnt)) { 1087 if(SCSEM(SCpnt)) {
1087 up(SCSEM(SCpnt)); 1088 complete(SCSEM(SCpnt));
1088 } else { 1089 } else {
1089 printk(KERN_ERR "aha152x: reset_done w/o semaphore\n"); 1090 printk(KERN_ERR "aha152x: reset_done w/o completion\n");
1090 } 1091 }
1091} 1092}
1092 1093
@@ -1139,21 +1140,6 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
1139 return FAILED; 1140 return FAILED;
1140} 1141}
1141 1142
1142static void timer_expired(unsigned long p)
1143{
1144 Scsi_Cmnd *SCp = (Scsi_Cmnd *)p;
1145 struct semaphore *sem = SCSEM(SCp);
1146 struct Scsi_Host *shpnt = SCp->device->host;
1147 unsigned long flags;
1148
1149 /* remove command from issue queue */
1150 DO_LOCK(flags);
1151 remove_SC(&ISSUE_SC, SCp);
1152 DO_UNLOCK(flags);
1153
1154 up(sem);
1155}
1156
1157/* 1143/*
1158 * Reset a device 1144 * Reset a device
1159 * 1145 *
@@ -1161,14 +1147,14 @@ static void timer_expired(unsigned long p)
1161static int aha152x_device_reset(Scsi_Cmnd * SCpnt) 1147static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1162{ 1148{
1163 struct Scsi_Host *shpnt = SCpnt->device->host; 1149 struct Scsi_Host *shpnt = SCpnt->device->host;
1164 DECLARE_MUTEX_LOCKED(sem); 1150 DECLARE_COMPLETION(done);
1165 struct timer_list timer;
1166 int ret, issued, disconnected; 1151 int ret, issued, disconnected;
1167 unsigned char old_cmd_len = SCpnt->cmd_len; 1152 unsigned char old_cmd_len = SCpnt->cmd_len;
1168 unsigned short old_use_sg = SCpnt->use_sg; 1153 unsigned short old_use_sg = SCpnt->use_sg;
1169 void *old_buffer = SCpnt->request_buffer; 1154 void *old_buffer = SCpnt->request_buffer;
1170 unsigned old_bufflen = SCpnt->request_bufflen; 1155 unsigned old_bufflen = SCpnt->request_bufflen;
1171 unsigned long flags; 1156 unsigned long flags;
1157 unsigned long timeleft;
1172 1158
1173#if defined(AHA152X_DEBUG) 1159#if defined(AHA152X_DEBUG)
1174 if(HOSTDATA(shpnt)->debug & debug_eh) { 1160 if(HOSTDATA(shpnt)->debug & debug_eh) {
@@ -1192,15 +1178,15 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1192 SCpnt->request_buffer = NULL; 1178 SCpnt->request_buffer = NULL;
1193 SCpnt->request_bufflen = 0; 1179 SCpnt->request_bufflen = 0;
1194 1180
1195 init_timer(&timer); 1181 aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
1196 timer.data = (unsigned long) SCpnt;
1197 timer.expires = jiffies + 100*HZ; /* 10s */
1198 timer.function = (void (*)(unsigned long)) timer_expired;
1199 1182
1200 aha152x_internal_queue(SCpnt, &sem, resetting, reset_done); 1183 timeleft = wait_for_completion_timeout(&done, 100*HZ);
1201 add_timer(&timer); 1184 if (!timeleft) {
1202 down(&sem); 1185 /* remove command from issue queue */
1203 del_timer(&timer); 1186 DO_LOCK(flags);
1187 remove_SC(&ISSUE_SC, SCpnt);
1188 DO_UNLOCK(flags);
1189 }
1204 1190
1205 SCpnt->cmd_len = old_cmd_len; 1191 SCpnt->cmd_len = old_cmd_len;
1206 SCpnt->use_sg = old_use_sg; 1192 SCpnt->use_sg = old_use_sg;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index d7af9c63a04d..e4a4f3a965d9 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -271,20 +271,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
271 continue; 271 continue;
272 } 272 }
273 sgptr = (struct aha1740_sg *) SCtmp->host_scribble; 273 sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
274 if (SCtmp->use_sg) { 274 scsi_dma_unmap(SCtmp);
275 /* We used scatter-gather. 275
276 Do the unmapping dance. */
277 dma_unmap_sg (&edev->dev,
278 (struct scatterlist *) SCtmp->request_buffer,
279 SCtmp->use_sg,
280 SCtmp->sc_data_direction);
281 } else {
282 dma_unmap_single (&edev->dev,
283 sgptr->buf_dma_addr,
284 SCtmp->request_bufflen,
285 DMA_BIDIRECTIONAL);
286 }
287
288 /* Free the sg block */ 276 /* Free the sg block */
289 dma_free_coherent (&edev->dev, 277 dma_free_coherent (&edev->dev,
290 sizeof (struct aha1740_sg), 278 sizeof (struct aha1740_sg),
@@ -349,11 +337,9 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
349 unchar target = scmd_id(SCpnt); 337 unchar target = scmd_id(SCpnt);
350 struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host); 338 struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
351 unsigned long flags; 339 unsigned long flags;
352 void *buff = SCpnt->request_buffer;
353 int bufflen = SCpnt->request_bufflen;
354 dma_addr_t sg_dma; 340 dma_addr_t sg_dma;
355 struct aha1740_sg *sgptr; 341 struct aha1740_sg *sgptr;
356 int ecbno; 342 int ecbno, nseg;
357 DEB(int i); 343 DEB(int i);
358 344
359 if(*cmd == REQUEST_SENSE) { 345 if(*cmd == REQUEST_SENSE) {
@@ -423,24 +409,23 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
423 } 409 }
424 sgptr = (struct aha1740_sg *) SCpnt->host_scribble; 410 sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
425 sgptr->sg_dma_addr = sg_dma; 411 sgptr->sg_dma_addr = sg_dma;
426 412
427 if (SCpnt->use_sg) { 413 nseg = scsi_dma_map(SCpnt);
428 struct scatterlist * sgpnt; 414 BUG_ON(nseg < 0);
415 if (nseg) {
416 struct scatterlist *sg;
429 struct aha1740_chain * cptr; 417 struct aha1740_chain * cptr;
430 int i, count; 418 int i;
431 DEB(unsigned char * ptr); 419 DEB(unsigned char * ptr);
432 420
433 host->ecb[ecbno].sg = 1; /* SCSI Initiator Command 421 host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
434 * w/scatter-gather*/ 422 * w/scatter-gather*/
435 sgpnt = (struct scatterlist *) SCpnt->request_buffer;
436 cptr = sgptr->sg_chain; 423 cptr = sgptr->sg_chain;
437 count = dma_map_sg (&host->edev->dev, sgpnt, SCpnt->use_sg, 424 scsi_for_each_sg(SCpnt, sg, nseg, i) {
438 SCpnt->sc_data_direction); 425 cptr[i].datalen = sg_dma_len (sg);
439 for(i=0; i < count; i++) { 426 cptr[i].dataptr = sg_dma_address (sg);
440 cptr[i].datalen = sg_dma_len (sgpnt + i);
441 cptr[i].dataptr = sg_dma_address (sgpnt + i);
442 } 427 }
443 host->ecb[ecbno].datalen = count*sizeof(struct aha1740_chain); 428 host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
444 host->ecb[ecbno].dataptr = sg_dma; 429 host->ecb[ecbno].dataptr = sg_dma;
445#ifdef DEBUG 430#ifdef DEBUG
446 printk("cptr %x: ",cptr); 431 printk("cptr %x: ",cptr);
@@ -448,11 +433,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
448 for(i=0;i<24;i++) printk("%02x ", ptr[i]); 433 for(i=0;i<24;i++) printk("%02x ", ptr[i]);
449#endif 434#endif
450 } else { 435 } else {
451 host->ecb[ecbno].datalen = bufflen; 436 host->ecb[ecbno].datalen = 0;
452 sgptr->buf_dma_addr = dma_map_single (&host->edev->dev, 437 host->ecb[ecbno].dataptr = 0;
453 buff, bufflen,
454 DMA_BIDIRECTIONAL);
455 host->ecb[ecbno].dataptr = sgptr->buf_dma_addr;
456 } 438 }
457 host->ecb[ecbno].lun = SCpnt->device->lun; 439 host->ecb[ecbno].lun = SCpnt->device->lun;
458 host->ecb[ecbno].ses = 1; /* Suppress underrun errors */ 440 host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6054881f21f1..286ab83116f9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -376,21 +376,10 @@ static __inline void
376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
377{ 377{
378 struct scsi_cmnd *cmd; 378 struct scsi_cmnd *cmd;
379 int direction;
380 379
381 cmd = scb->io_ctx; 380 cmd = scb->io_ctx;
382 direction = cmd->sc_data_direction;
383 ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE); 381 ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
384 if (cmd->use_sg != 0) { 382 scsi_dma_unmap(cmd);
385 struct scatterlist *sg;
386
387 sg = (struct scatterlist *)cmd->request_buffer;
388 pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
389 } else if (cmd->request_bufflen != 0) {
390 pci_unmap_single(ahd->dev_softc,
391 scb->platform_data->buf_busaddr,
392 cmd->request_bufflen, direction);
393 }
394} 383}
395 384
396/******************************** Macros **************************************/ 385/******************************** Macros **************************************/
@@ -1422,6 +1411,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1422 u_int col_idx; 1411 u_int col_idx;
1423 uint16_t mask; 1412 uint16_t mask;
1424 unsigned long flags; 1413 unsigned long flags;
1414 int nseg;
1425 1415
1426 ahd_lock(ahd, &flags); 1416 ahd_lock(ahd, &flags);
1427 1417
@@ -1494,18 +1484,17 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1494 ahd_set_residual(scb, 0); 1484 ahd_set_residual(scb, 0);
1495 ahd_set_sense_residual(scb, 0); 1485 ahd_set_sense_residual(scb, 0);
1496 scb->sg_count = 0; 1486 scb->sg_count = 0;
1497 if (cmd->use_sg != 0) { 1487
1498 void *sg; 1488 nseg = scsi_dma_map(cmd);
1499 struct scatterlist *cur_seg; 1489 BUG_ON(nseg < 0);
1500 u_int nseg; 1490 if (nseg > 0) {
1501 int dir; 1491 void *sg = scb->sg_list;
1502 1492 struct scatterlist *cur_seg;
1503 cur_seg = (struct scatterlist *)cmd->request_buffer; 1493 int i;
1504 dir = cmd->sc_data_direction; 1494
1505 nseg = pci_map_sg(ahd->dev_softc, cur_seg,
1506 cmd->use_sg, dir);
1507 scb->platform_data->xfer_len = 0; 1495 scb->platform_data->xfer_len = 0;
1508 for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) { 1496
1497 scsi_for_each_sg(cmd, cur_seg, nseg, i) {
1509 dma_addr_t addr; 1498 dma_addr_t addr;
1510 bus_size_t len; 1499 bus_size_t len;
1511 1500
@@ -1513,22 +1502,8 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1513 len = sg_dma_len(cur_seg); 1502 len = sg_dma_len(cur_seg);
1514 scb->platform_data->xfer_len += len; 1503 scb->platform_data->xfer_len += len;
1515 sg = ahd_sg_setup(ahd, scb, sg, addr, len, 1504 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
1516 /*last*/nseg == 1); 1505 i == (nseg - 1));
1517 } 1506 }
1518 } else if (cmd->request_bufflen != 0) {
1519 void *sg;
1520 dma_addr_t addr;
1521 int dir;
1522
1523 sg = scb->sg_list;
1524 dir = cmd->sc_data_direction;
1525 addr = pci_map_single(ahd->dev_softc,
1526 cmd->request_buffer,
1527 cmd->request_bufflen, dir);
1528 scb->platform_data->xfer_len = cmd->request_bufflen;
1529 scb->platform_data->buf_busaddr = addr;
1530 sg = ahd_sg_setup(ahd, scb, sg, addr,
1531 cmd->request_bufflen, /*last*/TRUE);
1532 } 1507 }
1533 1508
1534 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 1509 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index ad9761b237dc..853998be1474 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -781,7 +781,7 @@ int ahd_get_transfer_dir(struct scb *scb)
781static __inline 781static __inline
782void ahd_set_residual(struct scb *scb, u_long resid) 782void ahd_set_residual(struct scb *scb, u_long resid)
783{ 783{
784 scb->io_ctx->resid = resid; 784 scsi_set_resid(scb->io_ctx, resid);
785} 785}
786 786
787static __inline 787static __inline
@@ -793,7 +793,7 @@ void ahd_set_sense_residual(struct scb *scb, u_long resid)
793static __inline 793static __inline
794u_long ahd_get_residual(struct scb *scb) 794u_long ahd_get_residual(struct scb *scb)
795{ 795{
796 return (scb->io_ctx->resid); 796 return scsi_get_resid(scb->io_ctx);
797} 797}
798 798
799static __inline 799static __inline
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 660f26e23a38..1803ab6fc21c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -402,18 +402,8 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
402 402
403 cmd = scb->io_ctx; 403 cmd = scb->io_ctx;
404 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); 404 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
405 if (cmd->use_sg != 0) { 405
406 struct scatterlist *sg; 406 scsi_dma_unmap(cmd);
407
408 sg = (struct scatterlist *)cmd->request_buffer;
409 pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
410 cmd->sc_data_direction);
411 } else if (cmd->request_bufflen != 0) {
412 pci_unmap_single(ahc->dev_softc,
413 scb->platform_data->buf_busaddr,
414 cmd->request_bufflen,
415 cmd->sc_data_direction);
416 }
417} 407}
418 408
419static __inline int 409static __inline int
@@ -1381,6 +1371,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1381 struct ahc_tmode_tstate *tstate; 1371 struct ahc_tmode_tstate *tstate;
1382 uint16_t mask; 1372 uint16_t mask;
1383 struct scb_tailq *untagged_q = NULL; 1373 struct scb_tailq *untagged_q = NULL;
1374 int nseg;
1384 1375
1385 /* 1376 /*
1386 * Schedule us to run later. The only reason we are not 1377 * Schedule us to run later. The only reason we are not
@@ -1472,23 +1463,21 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1472 ahc_set_residual(scb, 0); 1463 ahc_set_residual(scb, 0);
1473 ahc_set_sense_residual(scb, 0); 1464 ahc_set_sense_residual(scb, 0);
1474 scb->sg_count = 0; 1465 scb->sg_count = 0;
1475 if (cmd->use_sg != 0) { 1466
1467 nseg = scsi_dma_map(cmd);
1468 BUG_ON(nseg < 0);
1469 if (nseg > 0) {
1476 struct ahc_dma_seg *sg; 1470 struct ahc_dma_seg *sg;
1477 struct scatterlist *cur_seg; 1471 struct scatterlist *cur_seg;
1478 struct scatterlist *end_seg; 1472 int i;
1479 int nseg;
1480 1473
1481 cur_seg = (struct scatterlist *)cmd->request_buffer;
1482 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
1483 cmd->sc_data_direction);
1484 end_seg = cur_seg + nseg;
1485 /* Copy the segments into the SG list. */ 1474 /* Copy the segments into the SG list. */
1486 sg = scb->sg_list; 1475 sg = scb->sg_list;
1487 /* 1476 /*
1488 * The sg_count may be larger than nseg if 1477 * The sg_count may be larger than nseg if
1489 * a transfer crosses a 32bit page. 1478 * a transfer crosses a 32bit page.
1490 */ 1479 */
1491 while (cur_seg < end_seg) { 1480 scsi_for_each_sg(cmd, cur_seg, nseg, i) {
1492 dma_addr_t addr; 1481 dma_addr_t addr;
1493 bus_size_t len; 1482 bus_size_t len;
1494 int consumed; 1483 int consumed;
@@ -1499,7 +1488,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1499 sg, addr, len); 1488 sg, addr, len);
1500 sg += consumed; 1489 sg += consumed;
1501 scb->sg_count += consumed; 1490 scb->sg_count += consumed;
1502 cur_seg++;
1503 } 1491 }
1504 sg--; 1492 sg--;
1505 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 1493 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
@@ -1516,33 +1504,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1516 */ 1504 */
1517 scb->hscb->dataptr = scb->sg_list->addr; 1505 scb->hscb->dataptr = scb->sg_list->addr;
1518 scb->hscb->datacnt = scb->sg_list->len; 1506 scb->hscb->datacnt = scb->sg_list->len;
1519 } else if (cmd->request_bufflen != 0) {
1520 struct ahc_dma_seg *sg;
1521 dma_addr_t addr;
1522
1523 sg = scb->sg_list;
1524 addr = pci_map_single(ahc->dev_softc,
1525 cmd->request_buffer,
1526 cmd->request_bufflen,
1527 cmd->sc_data_direction);
1528 scb->platform_data->buf_busaddr = addr;
1529 scb->sg_count = ahc_linux_map_seg(ahc, scb,
1530 sg, addr,
1531 cmd->request_bufflen);
1532 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
1533
1534 /*
1535 * Reset the sg list pointer.
1536 */
1537 scb->hscb->sgptr =
1538 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
1539
1540 /*
1541 * Copy the first SG into the "current"
1542 * data pointer area.
1543 */
1544 scb->hscb->dataptr = sg->addr;
1545 scb->hscb->datacnt = sg->len;
1546 } else { 1507 } else {
1547 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 1508 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
1548 scb->hscb->dataptr = 0; 1509 scb->hscb->dataptr = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 8fee7edc6eb3..b48dab447bde 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -751,7 +751,7 @@ int ahc_get_transfer_dir(struct scb *scb)
751static __inline 751static __inline
752void ahc_set_residual(struct scb *scb, u_long resid) 752void ahc_set_residual(struct scb *scb, u_long resid)
753{ 753{
754 scb->io_ctx->resid = resid; 754 scsi_set_resid(scb->io_ctx, resid);
755} 755}
756 756
757static __inline 757static __inline
@@ -763,7 +763,7 @@ void ahc_set_sense_residual(struct scb *scb, u_long resid)
763static __inline 763static __inline
764u_long ahc_get_residual(struct scb *scb) 764u_long ahc_get_residual(struct scb *scb)
765{ 765{
766 return (scb->io_ctx->resid); 766 return scsi_get_resid(scb->io_ctx);
767} 767}
768 768
769static __inline 769static __inline
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index a988d5abf702..4998bb850c49 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2690,17 +2690,8 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2690 struct aic7xxx_scb *scbp; 2690 struct aic7xxx_scb *scbp;
2691 unsigned char queue_depth; 2691 unsigned char queue_depth;
2692 2692
2693 if (cmd->use_sg > 1) 2693 scsi_dma_unmap(cmd);
2694 {
2695 struct scatterlist *sg;
2696 2694
2697 sg = (struct scatterlist *)cmd->request_buffer;
2698 pci_unmap_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
2699 }
2700 else if (cmd->request_bufflen)
2701 pci_unmap_single(p->pdev, aic7xxx_mapping(cmd),
2702 cmd->request_bufflen,
2703 cmd->sc_data_direction);
2704 if (scb->flags & SCB_SENSE) 2695 if (scb->flags & SCB_SENSE)
2705 { 2696 {
2706 pci_unmap_single(p->pdev, 2697 pci_unmap_single(p->pdev,
@@ -3869,7 +3860,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
3869 * the mid layer didn't check residual data counts to see if the 3860 * the mid layer didn't check residual data counts to see if the
3870 * command needs retried. 3861 * command needs retried.
3871 */ 3862 */
3872 cmd->resid = scb->sg_length - actual; 3863 scsi_set_resid(cmd, scb->sg_length - actual);
3873 aic7xxx_status(cmd) = hscb->target_status; 3864 aic7xxx_status(cmd) = hscb->target_status;
3874 } 3865 }
3875 } 3866 }
@@ -6581,7 +6572,7 @@ aic7xxx_slave_alloc(struct scsi_device *SDptr)
6581 struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata; 6572 struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata;
6582 struct aic_dev_data *aic_dev; 6573 struct aic_dev_data *aic_dev;
6583 6574
6584 aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_ATOMIC | GFP_KERNEL); 6575 aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_KERNEL);
6585 if(!aic_dev) 6576 if(!aic_dev)
6586 return 1; 6577 return 1;
6587 /* 6578 /*
@@ -10137,6 +10128,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10137 struct scsi_device *sdptr = cmd->device; 10128 struct scsi_device *sdptr = cmd->device;
10138 unsigned char tindex = TARGET_INDEX(cmd); 10129 unsigned char tindex = TARGET_INDEX(cmd);
10139 struct request *req = cmd->request; 10130 struct request *req = cmd->request;
10131 int use_sg;
10140 10132
10141 mask = (0x01 << tindex); 10133 mask = (0x01 << tindex);
10142 hscb = scb->hscb; 10134 hscb = scb->hscb;
@@ -10209,8 +10201,10 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10209 memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len); 10201 memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len);
10210 hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd)); 10202 hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd));
10211 10203
10212 if (cmd->use_sg) 10204 use_sg = scsi_dma_map(cmd);
10213 { 10205 BUG_ON(use_sg < 0);
10206
10207 if (use_sg) {
10214 struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */ 10208 struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
10215 10209
10216 /* 10210 /*
@@ -10219,11 +10213,11 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10219 * differences and the kernel SG list uses virtual addresses where 10213 * differences and the kernel SG list uses virtual addresses where
10220 * we need physical addresses. 10214 * we need physical addresses.
10221 */ 10215 */
10222 int i, use_sg; 10216 int i;
10223 10217
10224 sg = (struct scatterlist *)cmd->request_buffer;
10225 scb->sg_length = 0; 10218 scb->sg_length = 0;
10226 use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction); 10219
10220
10227 /* 10221 /*
10228 * Copy the segments into the SG array. NOTE!!! - We used to 10222 * Copy the segments into the SG array. NOTE!!! - We used to
10229 * have the first entry both in the data_pointer area and the first 10223 * have the first entry both in the data_pointer area and the first
@@ -10231,10 +10225,9 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10231 * entry in both places, but now we download the address of 10225 * entry in both places, but now we download the address of
10232 * scb->sg_list[1] instead of 0 to the sg pointer in the hscb. 10226 * scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
10233 */ 10227 */
10234 for (i = 0; i < use_sg; i++) 10228 scsi_for_each_sg(cmd, sg, use_sg, i) {
10235 { 10229 unsigned int len = sg_dma_len(sg);
10236 unsigned int len = sg_dma_len(sg+i); 10230 scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg));
10237 scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg+i));
10238 scb->sg_list[i].length = cpu_to_le32(len); 10231 scb->sg_list[i].length = cpu_to_le32(len);
10239 scb->sg_length += len; 10232 scb->sg_length += len;
10240 } 10233 }
@@ -10244,33 +10237,13 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10244 scb->sg_count = i; 10237 scb->sg_count = i;
10245 hscb->SG_segment_count = i; 10238 hscb->SG_segment_count = i;
10246 hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1])); 10239 hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1]));
10247 } 10240 } else {
10248 else
10249 {
10250 if (cmd->request_bufflen)
10251 {
10252 unsigned int address = pci_map_single(p->pdev, cmd->request_buffer,
10253 cmd->request_bufflen,
10254 cmd->sc_data_direction);
10255 aic7xxx_mapping(cmd) = address;
10256 scb->sg_list[0].address = cpu_to_le32(address);
10257 scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
10258 scb->sg_count = 1;
10259 scb->sg_length = cmd->request_bufflen;
10260 hscb->SG_segment_count = 1;
10261 hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[0]));
10262 hscb->data_count = scb->sg_list[0].length;
10263 hscb->data_pointer = scb->sg_list[0].address;
10264 }
10265 else
10266 {
10267 scb->sg_count = 0; 10241 scb->sg_count = 0;
10268 scb->sg_length = 0; 10242 scb->sg_length = 0;
10269 hscb->SG_segment_count = 0; 10243 hscb->SG_segment_count = 0;
10270 hscb->SG_list_pointer = 0; 10244 hscb->SG_list_pointer = 0;
10271 hscb->data_count = 0; 10245 hscb->data_count = 0;
10272 hscb->data_pointer = 0; 10246 hscb->data_pointer = 0;
10273 }
10274 } 10247 }
10275} 10248}
10276 10249
diff --git a/drivers/scsi/amiga7xx.c b/drivers/scsi/amiga7xx.c
deleted file mode 100644
index d5d3c4d5a253..000000000000
--- a/drivers/scsi/amiga7xx.c
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
3 * Amiga MacroSystemUS WarpEngine SCSI controller.
4 * Amiga Technologies A4000T SCSI controller.
5 * Amiga Technologies/DKB A4091 SCSI controller.
6 *
7 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
8 * plus modifications of the 53c7xx.c driver to support the Amiga.
9 */
10#include <linux/types.h>
11#include <linux/mm.h>
12#include <linux/blkdev.h>
13#include <linux/zorro.h>
14#include <linux/stat.h>
15
16#include <asm/setup.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/amigaints.h>
20#include <asm/amigahw.h>
21#include <asm/dma.h>
22#include <asm/irq.h>
23
24#include "scsi.h"
25#include <scsi/scsi_host.h>
26#include "53c7xx.h"
27#include "amiga7xx.h"
28
29
30static int amiga7xx_register_one(struct scsi_host_template *tpnt,
31 unsigned long address)
32{
33 long long options;
34 int clock;
35
36 if (!request_mem_region(address, 0x1000, "ncr53c710"))
37 return 0;
38
39 address = (unsigned long)z_ioremap(address, 0x1000);
40 options = OPTION_MEMORY_MAPPED | OPTION_DEBUG_TEST1 | OPTION_INTFLY |
41 OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS |
42 OPTION_DISCONNECT;
43 clock = 50000000; /* 50 MHz SCSI Clock */
44 ncr53c7xx_init(tpnt, 0, 710, address, 0, IRQ_AMIGA_PORTS, DMA_NONE,
45 options, clock);
46 return 1;
47}
48
49
50#ifdef CONFIG_ZORRO
51
52static struct {
53 zorro_id id;
54 unsigned long offset;
55 int absolute; /* offset is absolute address */
56} amiga7xx_table[] = {
57 { .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS, .offset = 0xf40000,
58 .absolute = 1 },
59 { .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx, .offset = 0x40000 },
60 { .id = ZORRO_PROD_CBM_A4091_1, .offset = 0x800000 },
61 { .id = ZORRO_PROD_CBM_A4091_2, .offset = 0x800000 },
62 { .id = ZORRO_PROD_GVP_GFORCE_040_060, .offset = 0x40000 },
63 { 0 }
64};
65
66static int __init amiga7xx_zorro_detect(struct scsi_host_template *tpnt)
67{
68 int num = 0, i;
69 struct zorro_dev *z = NULL;
70 unsigned long address;
71
72 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
73 for (i = 0; amiga7xx_table[i].id; i++)
74 if (z->id == amiga7xx_table[i].id)
75 break;
76 if (!amiga7xx_table[i].id)
77 continue;
78 if (amiga7xx_table[i].absolute)
79 address = amiga7xx_table[i].offset;
80 else
81 address = z->resource.start + amiga7xx_table[i].offset;
82 num += amiga7xx_register_one(tpnt, address);
83 }
84 return num;
85}
86
87#endif /* CONFIG_ZORRO */
88
89
90int __init amiga7xx_detect(struct scsi_host_template *tpnt)
91{
92 static unsigned char called = 0;
93 int num = 0;
94
95 if (called || !MACH_IS_AMIGA)
96 return 0;
97
98 tpnt->proc_name = "Amiga7xx";
99
100 if (AMIGAHW_PRESENT(A4000_SCSI))
101 num += amiga7xx_register_one(tpnt, 0xdd0040);
102
103#ifdef CONFIG_ZORRO
104 num += amiga7xx_zorro_detect(tpnt);
105#endif
106
107 called = 1;
108 return num;
109}
110
111static int amiga7xx_release(struct Scsi_Host *shost)
112{
113 if (shost->irq)
114 free_irq(shost->irq, NULL);
115 if (shost->dma_channel != 0xff)
116 free_dma(shost->dma_channel);
117 if (shost->io_port && shost->n_io_port)
118 release_region(shost->io_port, shost->n_io_port);
119 scsi_unregister(shost);
120 return 0;
121}
122
123static struct scsi_host_template driver_template = {
124 .name = "Amiga NCR53c710 SCSI",
125 .detect = amiga7xx_detect,
126 .release = amiga7xx_release,
127 .queuecommand = NCR53c7xx_queue_command,
128 .abort = NCR53c7xx_abort,
129 .reset = NCR53c7xx_reset,
130 .can_queue = 24,
131 .this_id = 7,
132 .sg_tablesize = 63,
133 .cmd_per_lun = 3,
134 .use_clustering = DISABLE_CLUSTERING
135};
136
137
138#include "scsi_module.c"
diff --git a/drivers/scsi/amiga7xx.h b/drivers/scsi/amiga7xx.h
deleted file mode 100644
index 7cd63a996886..000000000000
--- a/drivers/scsi/amiga7xx.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef AMIGA7XX_H
2
3#include <linux/types.h>
4
5int amiga7xx_detect(struct scsi_host_template *);
6const char *NCR53c7x0_info(void);
7int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
8int NCR53c7xx_abort(Scsi_Cmnd *);
9int NCR53c7x0_release (struct Scsi_Host *);
10int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
11void NCR53c7x0_intr(int irq, void *dev_id);
12
13#ifndef CMD_PER_LUN
14#define CMD_PER_LUN 3
15#endif
16
17#ifndef CAN_QUEUE
18#define CAN_QUEUE 24
19#endif
20
21#include <scsi/scsicam.h>
22
23#endif /* AMIGA7XX_H */
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index aff96db9ccf6..f0b8bf4534f0 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,9 +48,10 @@ struct class_device_attribute;
48 48
49#define ARCMSR_MAX_OUTSTANDING_CMD 256 49#define ARCMSR_MAX_OUTSTANDING_CMD 256
50#define ARCMSR_MAX_FREECCB_NUM 288 50#define ARCMSR_MAX_FREECCB_NUM 288
51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.13" 51#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.14"
52#define ARCMSR_SCSI_INITIATOR_ID 255 52#define ARCMSR_SCSI_INITIATOR_ID 255
53#define ARCMSR_MAX_XFER_SECTORS 512 53#define ARCMSR_MAX_XFER_SECTORS 512
54#define ARCMSR_MAX_XFER_SECTORS_B 4096
54#define ARCMSR_MAX_TARGETID 17 55#define ARCMSR_MAX_TARGETID 17
55#define ARCMSR_MAX_TARGETLUN 8 56#define ARCMSR_MAX_TARGETLUN 8
56#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD 57#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
@@ -469,4 +470,3 @@ extern void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb);
469extern struct class_device_attribute *arcmsr_host_attrs[]; 470extern struct class_device_attribute *arcmsr_host_attrs[];
470extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb); 471extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb);
471void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb); 472void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
472
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 8b46158cc045..0ddfc21e9f7d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -57,6 +57,7 @@
57#include <linux/dma-mapping.h> 57#include <linux/dma-mapping.h>
58#include <linux/timer.h> 58#include <linux/timer.h>
59#include <linux/pci.h> 59#include <linux/pci.h>
60#include <linux/aer.h>
60#include <asm/dma.h> 61#include <asm/dma.h>
61#include <asm/io.h> 62#include <asm/io.h>
62#include <asm/system.h> 63#include <asm/system.h>
@@ -71,7 +72,7 @@
71#include "arcmsr.h" 72#include "arcmsr.h"
72 73
73MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>"); 74MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter"); 75MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
75MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77 78
@@ -93,7 +94,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb); 94static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94static const char *arcmsr_info(struct Scsi_Host *); 95static const char *arcmsr_info(struct Scsi_Host *);
95static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 96static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96 97static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
98 pci_channel_state_t state);
99static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
97static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth) 100static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98{ 101{
99 if (queue_depth > ARCMSR_MAX_CMD_PERLUN) 102 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -104,7 +107,8 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_de
104 107
105static struct scsi_host_template arcmsr_scsi_host_template = { 108static struct scsi_host_template arcmsr_scsi_host_template = {
106 .module = THIS_MODULE, 109 .module = THIS_MODULE,
107 .name = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION, 110 .name = "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
111 ARCMSR_DRIVER_VERSION,
108 .info = arcmsr_info, 112 .info = arcmsr_info,
109 .queuecommand = arcmsr_queue_command, 113 .queuecommand = arcmsr_queue_command,
110 .eh_abort_handler = arcmsr_abort, 114 .eh_abort_handler = arcmsr_abort,
@@ -119,6 +123,10 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
119 .use_clustering = ENABLE_CLUSTERING, 123 .use_clustering = ENABLE_CLUSTERING,
120 .shost_attrs = arcmsr_host_attrs, 124 .shost_attrs = arcmsr_host_attrs,
121}; 125};
126static struct pci_error_handlers arcmsr_pci_error_handlers = {
127 .error_detected = arcmsr_pci_error_detected,
128 .slot_reset = arcmsr_pci_slot_reset,
129};
122 130
123static struct pci_device_id arcmsr_device_id_table[] = { 131static struct pci_device_id arcmsr_device_id_table[] = {
124 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 132 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
@@ -144,7 +152,8 @@ static struct pci_driver arcmsr_pci_driver = {
144 .id_table = arcmsr_device_id_table, 152 .id_table = arcmsr_device_id_table,
145 .probe = arcmsr_probe, 153 .probe = arcmsr_probe,
146 .remove = arcmsr_remove, 154 .remove = arcmsr_remove,
147 .shutdown = arcmsr_shutdown 155 .shutdown = arcmsr_shutdown,
156 .err_handler = &arcmsr_pci_error_handlers,
148}; 157};
149 158
150static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) 159static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -328,6 +337,8 @@ static int arcmsr_probe(struct pci_dev *pdev,
328 337
329 arcmsr_iop_init(acb); 338 arcmsr_iop_init(acb);
330 pci_set_drvdata(pdev, host); 339 pci_set_drvdata(pdev, host);
340 if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
341 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
331 342
332 error = scsi_add_host(host, &pdev->dev); 343 error = scsi_add_host(host, &pdev->dev);
333 if (error) 344 if (error)
@@ -338,6 +349,7 @@ static int arcmsr_probe(struct pci_dev *pdev,
338 goto out_free_sysfs; 349 goto out_free_sysfs;
339 350
340 scsi_scan_host(host); 351 scsi_scan_host(host);
352 pci_enable_pcie_error_reporting(pdev);
341 return 0; 353 return 0;
342 out_free_sysfs: 354 out_free_sysfs:
343 out_free_irq: 355 out_free_irq:
@@ -369,19 +381,9 @@ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
369 381
370static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 382static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
371{ 383{
372 struct AdapterControlBlock *acb = ccb->acb;
373 struct scsi_cmnd *pcmd = ccb->pcmd; 384 struct scsi_cmnd *pcmd = ccb->pcmd;
374 385
375 if (pcmd->use_sg != 0) { 386 scsi_dma_unmap(pcmd);
376 struct scatterlist *sl;
377
378 sl = (struct scatterlist *)pcmd->request_buffer;
379 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
380 }
381 else if (pcmd->request_bufflen != 0)
382 pci_unmap_single(acb->pdev,
383 pcmd->SCp.dma_handle,
384 pcmd->request_bufflen, pcmd->sc_data_direction);
385} 387}
386 388
387static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) 389static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
@@ -498,7 +500,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
498 500
499static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 501static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
500{ 502{
501 struct MessageUnit __iomem *reg=acb->pmu; 503 struct MessageUnit __iomem *reg = acb->pmu;
502 504
503 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); 505 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
504 if (arcmsr_wait_msgint_ready(acb)) 506 if (arcmsr_wait_msgint_ready(acb))
@@ -551,6 +553,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
551 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 553 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
552 uint32_t address_lo, address_hi; 554 uint32_t address_lo, address_hi;
553 int arccdbsize = 0x30; 555 int arccdbsize = 0x30;
556 int nseg;
554 557
555 ccb->pcmd = pcmd; 558 ccb->pcmd = pcmd;
556 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB)); 559 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
@@ -561,20 +564,20 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
561 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len; 564 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
562 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb; 565 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
563 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 566 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
564 if (pcmd->use_sg) { 567
565 int length, sgcount, i, cdb_sgcount = 0; 568 nseg = scsi_dma_map(pcmd);
566 struct scatterlist *sl; 569 BUG_ON(nseg < 0);
567 570
568 /* Get Scatter Gather List from scsiport. */ 571 if (nseg) {
569 sl = (struct scatterlist *) pcmd->request_buffer; 572 int length, i, cdb_sgcount = 0;
570 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg, 573 struct scatterlist *sg;
571 pcmd->sc_data_direction); 574
572 /* map stor port SG list to our iop SG List. */ 575 /* map stor port SG list to our iop SG List. */
573 for (i = 0; i < sgcount; i++) { 576 scsi_for_each_sg(pcmd, sg, nseg, i) {
574 /* Get the physical address of the current data pointer */ 577 /* Get the physical address of the current data pointer */
575 length = cpu_to_le32(sg_dma_len(sl)); 578 length = cpu_to_le32(sg_dma_len(sg));
576 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl))); 579 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
577 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl))); 580 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
578 if (address_hi == 0) { 581 if (address_hi == 0) {
579 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 582 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
580 583
@@ -591,32 +594,12 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
591 psge += sizeof (struct SG64ENTRY); 594 psge += sizeof (struct SG64ENTRY);
592 arccdbsize += sizeof (struct SG64ENTRY); 595 arccdbsize += sizeof (struct SG64ENTRY);
593 } 596 }
594 sl++;
595 cdb_sgcount++; 597 cdb_sgcount++;
596 } 598 }
597 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount; 599 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
598 arcmsr_cdb->DataLength = pcmd->request_bufflen; 600 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
599 if ( arccdbsize > 256) 601 if ( arccdbsize > 256)
600 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 602 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
601 } else if (pcmd->request_bufflen) {
602 dma_addr_t dma_addr;
603 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
604 pcmd->request_bufflen, pcmd->sc_data_direction);
605 pcmd->SCp.dma_handle = dma_addr;
606 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
607 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
608 if (address_hi == 0) {
609 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
610 pdma_sg->address = address_lo;
611 pdma_sg->length = pcmd->request_bufflen;
612 } else {
613 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
614 pdma_sg->addresshigh = address_hi;
615 pdma_sg->address = address_lo;
616 pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
617 }
618 arcmsr_cdb->sgcount = 1;
619 arcmsr_cdb->DataLength = pcmd->request_bufflen;
620 } 603 }
621 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) { 604 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
622 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 605 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
@@ -747,7 +730,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
747 int id, lun; 730 int id, lun;
748 /* 731 /*
749 **************************************************************** 732 ****************************************************************
750 ** areca cdb command done 733 ** areca cdb command done
751 **************************************************************** 734 ****************************************************************
752 */ 735 */
753 while (1) { 736 while (1) {
@@ -758,20 +741,20 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
758 (flag_ccb << 5)); 741 (flag_ccb << 5));
759 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 742 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
760 if (ccb->startdone == ARCMSR_CCB_ABORTED) { 743 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
761 struct scsi_cmnd *abortcmd=ccb->pcmd; 744 struct scsi_cmnd *abortcmd = ccb->pcmd;
762 if (abortcmd) { 745 if (abortcmd) {
763 abortcmd->result |= DID_ABORT >> 16; 746 abortcmd->result |= DID_ABORT >> 16;
764 arcmsr_ccb_complete(ccb, 1); 747 arcmsr_ccb_complete(ccb, 1);
765 printk(KERN_NOTICE 748 printk(KERN_NOTICE
766 "arcmsr%d: ccb='0x%p' isr got aborted command \n" 749 "arcmsr%d: ccb ='0x%p' isr got aborted command \n"
767 , acb->host->host_no, ccb); 750 , acb->host->host_no, ccb);
768 } 751 }
769 continue; 752 continue;
770 } 753 }
771 printk(KERN_NOTICE 754 printk(KERN_NOTICE
772 "arcmsr%d: isr get an illegal ccb command done acb='0x%p'" 755 "arcmsr%d: isr get an illegal ccb command done acb = '0x%p'"
773 "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x" 756 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
774 " ccboutstandingcount=%d \n" 757 " ccboutstandingcount = %d \n"
775 , acb->host->host_no 758 , acb->host->host_no
776 , acb 759 , acb
777 , ccb 760 , ccb
@@ -791,7 +774,7 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
791 switch(ccb->arcmsr_cdb.DeviceStatus) { 774 switch(ccb->arcmsr_cdb.DeviceStatus) {
792 case ARCMSR_DEV_SELECT_TIMEOUT: { 775 case ARCMSR_DEV_SELECT_TIMEOUT: {
793 acb->devstate[id][lun] = ARECA_RAID_GONE; 776 acb->devstate[id][lun] = ARECA_RAID_GONE;
794 ccb->pcmd->result = DID_TIME_OUT << 16; 777 ccb->pcmd->result = DID_NO_CONNECT << 16;
795 arcmsr_ccb_complete(ccb, 1); 778 arcmsr_ccb_complete(ccb, 1);
796 } 779 }
797 break; 780 break;
@@ -810,8 +793,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
810 break; 793 break;
811 default: 794 default:
812 printk(KERN_NOTICE 795 printk(KERN_NOTICE
813 "arcmsr%d: scsi id=%d lun=%d" 796 "arcmsr%d: scsi id = %d lun = %d"
814 " isr get command error done," 797 " isr get command error done, "
815 "but got unknown DeviceStatus = 0x%x \n" 798 "but got unknown DeviceStatus = 0x%x \n"
816 , acb->host->host_no 799 , acb->host->host_no
817 , id 800 , id
@@ -848,24 +831,21 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
848 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 831 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
849 int retvalue = 0, transfer_len = 0; 832 int retvalue = 0, transfer_len = 0;
850 char *buffer; 833 char *buffer;
834 struct scatterlist *sg;
851 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | 835 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
852 (uint32_t ) cmd->cmnd[6] << 16 | 836 (uint32_t ) cmd->cmnd[6] << 16 |
853 (uint32_t ) cmd->cmnd[7] << 8 | 837 (uint32_t ) cmd->cmnd[7] << 8 |
854 (uint32_t ) cmd->cmnd[8]; 838 (uint32_t ) cmd->cmnd[8];
855 /* 4 bytes: Areca io control code */ 839 /* 4 bytes: Areca io control code */
856 if (cmd->use_sg) {
857 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
858 840
859 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 841 sg = scsi_sglist(cmd);
860 if (cmd->use_sg > 1) { 842 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
861 retvalue = ARCMSR_MESSAGE_FAIL; 843 if (scsi_sg_count(cmd) > 1) {
862 goto message_out; 844 retvalue = ARCMSR_MESSAGE_FAIL;
863 } 845 goto message_out;
864 transfer_len += sg->length;
865 } else {
866 buffer = cmd->request_buffer;
867 transfer_len = cmd->request_bufflen;
868 } 846 }
847 transfer_len += sg->length;
848
869 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 849 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
870 retvalue = ARCMSR_MESSAGE_FAIL; 850 retvalue = ARCMSR_MESSAGE_FAIL;
871 goto message_out; 851 goto message_out;
@@ -1057,12 +1037,9 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
1057 retvalue = ARCMSR_MESSAGE_FAIL; 1037 retvalue = ARCMSR_MESSAGE_FAIL;
1058 } 1038 }
1059 message_out: 1039 message_out:
1060 if (cmd->use_sg) { 1040 sg = scsi_sglist(cmd);
1061 struct scatterlist *sg; 1041 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1062 1042
1063 sg = (struct scatterlist *) cmd->request_buffer;
1064 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1065 }
1066 return retvalue; 1043 return retvalue;
1067} 1044}
1068 1045
@@ -1085,6 +1062,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1085 case INQUIRY: { 1062 case INQUIRY: {
1086 unsigned char inqdata[36]; 1063 unsigned char inqdata[36];
1087 char *buffer; 1064 char *buffer;
1065 struct scatterlist *sg;
1088 1066
1089 if (cmd->device->lun) { 1067 if (cmd->device->lun) {
1090 cmd->result = (DID_TIME_OUT << 16); 1068 cmd->result = (DID_TIME_OUT << 16);
@@ -1096,7 +1074,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1096 inqdata[1] = 0; 1074 inqdata[1] = 0;
1097 /* rem media bit & Dev Type Modifier */ 1075 /* rem media bit & Dev Type Modifier */
1098 inqdata[2] = 0; 1076 inqdata[2] = 0;
1099 /* ISO,ECMA,& ANSI versions */ 1077 /* ISO, ECMA, & ANSI versions */
1100 inqdata[4] = 31; 1078 inqdata[4] = 31;
1101 /* length of additional data */ 1079 /* length of additional data */
1102 strncpy(&inqdata[8], "Areca ", 8); 1080 strncpy(&inqdata[8], "Areca ", 8);
@@ -1104,21 +1082,14 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1104 strncpy(&inqdata[16], "RAID controller ", 16); 1082 strncpy(&inqdata[16], "RAID controller ", 16);
1105 /* Product Identification */ 1083 /* Product Identification */
1106 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1084 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1107 if (cmd->use_sg) {
1108 struct scatterlist *sg;
1109 1085
1110 sg = (struct scatterlist *) cmd->request_buffer; 1086 sg = scsi_sglist(cmd);
1111 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1087 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1112 } else { 1088
1113 buffer = cmd->request_buffer;
1114 }
1115 memcpy(buffer, inqdata, sizeof(inqdata)); 1089 memcpy(buffer, inqdata, sizeof(inqdata));
1116 if (cmd->use_sg) { 1090 sg = scsi_sglist(cmd);
1117 struct scatterlist *sg; 1091 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1118 1092
1119 sg = (struct scatterlist *) cmd->request_buffer;
1120 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1121 }
1122 cmd->scsi_done(cmd); 1093 cmd->scsi_done(cmd);
1123 } 1094 }
1124 break; 1095 break;
@@ -1153,7 +1124,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1153 , acb->host->host_no); 1124 , acb->host->host_no);
1154 return SCSI_MLQUEUE_HOST_BUSY; 1125 return SCSI_MLQUEUE_HOST_BUSY;
1155 } 1126 }
1156 if(target == 16) { 1127 if (target == 16) {
1157 /* virtual device for iop message transfer */ 1128 /* virtual device for iop message transfer */
1158 arcmsr_handle_virtual_command(acb, cmd); 1129 arcmsr_handle_virtual_command(acb, cmd);
1159 return 0; 1130 return 0;
@@ -1166,7 +1137,7 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1166 printk(KERN_NOTICE 1137 printk(KERN_NOTICE
1167 "arcmsr%d: block 'read/write'" 1138 "arcmsr%d: block 'read/write'"
1168 "command with gone raid volume" 1139 "command with gone raid volume"
1169 " Cmd=%2x, TargetId=%d, Lun=%d \n" 1140 " Cmd = %2x, TargetId = %d, Lun = %d \n"
1170 , acb->host->host_no 1141 , acb->host->host_no
1171 , cmd->cmnd[0] 1142 , cmd->cmnd[0]
1172 , target, lun); 1143 , target, lun);
@@ -1257,7 +1228,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1257 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || 1228 if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1258 (ccb == poll_ccb)) { 1229 (ccb == poll_ccb)) {
1259 printk(KERN_NOTICE 1230 printk(KERN_NOTICE
1260 "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'" 1231 "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
1261 " poll command abort successfully \n" 1232 " poll command abort successfully \n"
1262 , acb->host->host_no 1233 , acb->host->host_no
1263 , ccb->pcmd->device->id 1234 , ccb->pcmd->device->id
@@ -1270,8 +1241,8 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1270 } 1241 }
1271 printk(KERN_NOTICE 1242 printk(KERN_NOTICE
1272 "arcmsr%d: polling get an illegal ccb" 1243 "arcmsr%d: polling get an illegal ccb"
1273 " command done ccb='0x%p'" 1244 " command done ccb ='0x%p'"
1274 "ccboutstandingcount=%d \n" 1245 "ccboutstandingcount = %d \n"
1275 , acb->host->host_no 1246 , acb->host->host_no
1276 , ccb 1247 , ccb
1277 , atomic_read(&acb->ccboutstandingcount)); 1248 , atomic_read(&acb->ccboutstandingcount));
@@ -1288,7 +1259,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1288 switch(ccb->arcmsr_cdb.DeviceStatus) { 1259 switch(ccb->arcmsr_cdb.DeviceStatus) {
1289 case ARCMSR_DEV_SELECT_TIMEOUT: { 1260 case ARCMSR_DEV_SELECT_TIMEOUT: {
1290 acb->devstate[id][lun] = ARECA_RAID_GONE; 1261 acb->devstate[id][lun] = ARECA_RAID_GONE;
1291 ccb->pcmd->result = DID_TIME_OUT << 16; 1262 ccb->pcmd->result = DID_NO_CONNECT << 16;
1292 arcmsr_ccb_complete(ccb, 1); 1263 arcmsr_ccb_complete(ccb, 1);
1293 } 1264 }
1294 break; 1265 break;
@@ -1307,7 +1278,7 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1307 break; 1278 break;
1308 default: 1279 default:
1309 printk(KERN_NOTICE 1280 printk(KERN_NOTICE
1310 "arcmsr%d: scsi id=%d lun=%d" 1281 "arcmsr%d: scsi id = %d lun = %d"
1311 " polling and getting command error done" 1282 " polling and getting command error done"
1312 "but got unknown DeviceStatus = 0x%x \n" 1283 "but got unknown DeviceStatus = 0x%x \n"
1313 , acb->host->host_no 1284 , acb->host->host_no
@@ -1322,6 +1293,94 @@ static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1322 } 1293 }
1323 } 1294 }
1324} 1295}
1296static void arcmsr_done4_abort_postqueue(struct AdapterControlBlock *acb)
1297{
1298 int i = 0, found = 0;
1299 int id, lun;
1300 uint32_t flag_ccb, outbound_intstatus;
1301 struct MessageUnit __iomem *reg = acb->pmu;
1302 struct CommandControlBlock *ccb;
1303 /*clear and abort all outbound posted Q*/
1304
1305 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
1306(i++ < 256)){
1307 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
1308(flag_ccb << 5));
1309 if (ccb){
1310 if ((ccb->acb != acb)||(ccb->startdone != \
1311ARCMSR_CCB_START)){
1312 printk(KERN_NOTICE "arcmsr%d: polling get \
1313an illegal ccb" "command done ccb = '0x%p'""ccboutstandingcount = %d \n",
1314 acb->host->host_no, ccb,
1315 atomic_read(&acb->ccboutstandingcount));
1316 continue;
1317 }
1318
1319 id = ccb->pcmd->device->id;
1320 lun = ccb->pcmd->device->lun;
1321 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)){
1322 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1323 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1324 ccb->pcmd->result = DID_OK << 16;
1325 arcmsr_ccb_complete(ccb, 1);
1326 }
1327 else {
1328 switch(ccb->arcmsr_cdb.DeviceStatus) {
1329 case ARCMSR_DEV_SELECT_TIMEOUT: {
1330 acb->devstate[id][lun] = ARECA_RAID_GONE;
1331 ccb->pcmd->result = DID_NO_CONNECT << 16;
1332 arcmsr_ccb_complete(ccb, 1);
1333 }
1334 break;
1335
1336 case ARCMSR_DEV_ABORTED:
1337
1338 case ARCMSR_DEV_INIT_FAIL: {
1339 acb->devstate[id][lun] =
1340 ARECA_RAID_GONE;
1341 ccb->pcmd->result =
1342 DID_BAD_TARGET << 16;
1343 arcmsr_ccb_complete(ccb, 1);
1344 }
1345 break;
1346
1347 case ARCMSR_DEV_CHECK_CONDITION: {
1348 acb->devstate[id][lun] =
1349 ARECA_RAID_GOOD;
1350 arcmsr_report_sense_info(ccb);
1351 arcmsr_ccb_complete(ccb, 1);
1352 }
1353 break;
1354
1355 default:
1356 printk(KERN_NOTICE
1357 "arcmsr%d: scsi id = %d \
1358 lun = %d""polling and \
1359 getting command error \
1360 done""but got unknown \
1361 DeviceStatus = 0x%x \n",
1362 acb->host->host_no, id,
1363 lun, ccb->arcmsr_cdb.DeviceStatus);
1364 acb->devstate[id][lun] =
1365 ARECA_RAID_GONE;
1366 ccb->pcmd->result =
1367 DID_BAD_TARGET << 16;
1368 arcmsr_ccb_complete(ccb, 1);
1369 break;
1370 }
1371 }
1372 found = 1;
1373 }
1374 }
1375 if (found){
1376 outbound_intstatus = readl(&reg->outbound_intstatus) & \
1377 acb->outbound_int_enable;
1378 writel(outbound_intstatus, &reg->outbound_intstatus);
1379 /*clear interrupt*/
1380 }
1381 return;
1382}
1383
1325 1384
1326static void arcmsr_iop_init(struct AdapterControlBlock *acb) 1385static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1327{ 1386{
@@ -1355,7 +1414,6 @@ static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1355 1414
1356static void arcmsr_iop_reset(struct AdapterControlBlock *acb) 1415static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1357{ 1416{
1358 struct MessageUnit __iomem *reg = acb->pmu;
1359 struct CommandControlBlock *ccb; 1417 struct CommandControlBlock *ccb;
1360 uint32_t intmask_org; 1418 uint32_t intmask_org;
1361 int i = 0; 1419 int i = 0;
@@ -1368,21 +1426,17 @@ static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1368 /* disable all outbound interrupt */ 1426 /* disable all outbound interrupt */
1369 intmask_org = arcmsr_disable_outbound_ints(acb); 1427 intmask_org = arcmsr_disable_outbound_ints(acb);
1370 /* clear all outbound posted Q */ 1428 /* clear all outbound posted Q */
1371 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++) 1429 arcmsr_done4_abort_postqueue(acb);
1372 readl(&reg->outbound_queueport);
1373 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 1430 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1374 ccb = acb->pccb_pool[i]; 1431 ccb = acb->pccb_pool[i];
1375 if ((ccb->startdone == ARCMSR_CCB_START) || 1432 if (ccb->startdone == ARCMSR_CCB_START) {
1376 (ccb->startdone == ARCMSR_CCB_ABORTED)) {
1377 ccb->startdone = ARCMSR_CCB_ABORTED; 1433 ccb->startdone = ARCMSR_CCB_ABORTED;
1378 ccb->pcmd->result = DID_ABORT << 16;
1379 arcmsr_ccb_complete(ccb, 1);
1380 } 1434 }
1381 } 1435 }
1382 /* enable all outbound interrupt */ 1436 /* enable all outbound interrupt */
1383 arcmsr_enable_outbound_ints(acb, intmask_org); 1437 arcmsr_enable_outbound_ints(acb, intmask_org);
1384 } 1438 }
1385 atomic_set(&acb->ccboutstandingcount, 0); 1439
1386} 1440}
1387 1441
1388static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 1442static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
@@ -1428,10 +1482,9 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
1428 int i = 0; 1482 int i = 0;
1429 1483
1430 printk(KERN_NOTICE 1484 printk(KERN_NOTICE
1431 "arcmsr%d: abort device command of scsi id=%d lun=%d \n", 1485 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
1432 acb->host->host_no, cmd->device->id, cmd->device->lun); 1486 acb->host->host_no, cmd->device->id, cmd->device->lun);
1433 acb->num_aborts++; 1487 acb->num_aborts++;
1434
1435 /* 1488 /*
1436 ************************************************ 1489 ************************************************
1437 ** the all interrupt service routine is locked 1490 ** the all interrupt service routine is locked
@@ -1486,10 +1539,306 @@ static const char *arcmsr_info(struct Scsi_Host *host)
1486 type = "X-TYPE"; 1539 type = "X-TYPE";
1487 break; 1540 break;
1488 } 1541 }
1489 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s", 1542 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
1490 type, raid6 ? "( RAID6 capable)" : "", 1543 type, raid6 ? "( RAID6 capable)" : "",
1491 ARCMSR_DRIVER_VERSION); 1544 ARCMSR_DRIVER_VERSION);
1492 return buf; 1545 return buf;
1493} 1546}
1494 1547
1548static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
1549{
1550 struct Scsi_Host *host;
1551 struct AdapterControlBlock *acb;
1552 uint8_t bus, dev_fun;
1553 int error;
1554
1555 error = pci_enable_device(pdev);
1556 if (error)
1557 return PCI_ERS_RESULT_DISCONNECT;
1558 pci_set_master(pdev);
1559
1560 host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof \
1561(struct AdapterControlBlock));
1562 if (!host)
1563 return PCI_ERS_RESULT_DISCONNECT;
1564 acb = (struct AdapterControlBlock *)host->hostdata;
1565 memset(acb, 0, sizeof (struct AdapterControlBlock));
1566
1567 error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1568 if (error) {
1569 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1570 if (error) {
1571 printk(KERN_WARNING
1572 "scsi%d: No suitable DMA mask available\n",
1573 host->host_no);
1574 return PCI_ERS_RESULT_DISCONNECT;
1575 }
1576 }
1577 bus = pdev->bus->number;
1578 dev_fun = pdev->devfn;
1579 acb = (struct AdapterControlBlock *) host->hostdata;
1580 memset(acb, 0, sizeof(struct AdapterControlBlock));
1581 acb->pdev = pdev;
1582 acb->host = host;
1583 host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
1584 host->max_lun = ARCMSR_MAX_TARGETLUN;
1585 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
1586 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
1587 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
1588 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
1589 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
1590 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
1591 host->unique_id = (bus << 8) | dev_fun;
1592 host->irq = pdev->irq;
1593 error = pci_request_regions(pdev, "arcmsr");
1594 if (error)
1595 return PCI_ERS_RESULT_DISCONNECT;
1596
1597 acb->pmu = ioremap(pci_resource_start(pdev, 0),
1598 pci_resource_len(pdev, 0));
1599 if (!acb->pmu) {
1600 printk(KERN_NOTICE "arcmsr%d: memory"
1601 " mapping region fail \n", acb->host->host_no);
1602 return PCI_ERS_RESULT_DISCONNECT;
1603 }
1604 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1605 ACB_F_MESSAGE_RQBUFFER_CLEARED |
1606 ACB_F_MESSAGE_WQBUFFER_READED);
1607 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
1608 INIT_LIST_HEAD(&acb->ccb_free_list);
1609
1610 error = arcmsr_alloc_ccb_pool(acb);
1611 if (error)
1612 return PCI_ERS_RESULT_DISCONNECT;
1613
1614 error = request_irq(pdev->irq, arcmsr_do_interrupt,
1615 IRQF_DISABLED | IRQF_SHARED, "arcmsr", acb);
1616 if (error)
1617 return PCI_ERS_RESULT_DISCONNECT;
1618
1619 arcmsr_iop_init(acb);
1620 if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
1621 host->max_sectors = ARCMSR_MAX_XFER_SECTORS_B;
1622
1623 pci_set_drvdata(pdev, host);
1624
1625 error = scsi_add_host(host, &pdev->dev);
1626 if (error)
1627 return PCI_ERS_RESULT_DISCONNECT;
1628
1629 error = arcmsr_alloc_sysfs_attr(acb);
1630 if (error)
1631 return PCI_ERS_RESULT_DISCONNECT;
1632
1633 scsi_scan_host(host);
1634 return PCI_ERS_RESULT_RECOVERED;
1635}
1636
1637static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
1638{
1639 struct Scsi_Host *host = pci_get_drvdata(pdev);
1640 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1641 struct MessageUnit __iomem *reg = acb->pmu;
1642 struct CommandControlBlock *ccb;
1643 /*clear and abort all outbound posted Q*/
1644 int i = 0, found = 0;
1645 int id, lun;
1646 uint32_t flag_ccb, outbound_intstatus;
1647
1648 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
1649 (i++ < 256)){
1650 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset
1651 + (flag_ccb << 5));
1652 if (ccb){
1653 if ((ccb->acb != acb)||(ccb->startdone !=
1654 ARCMSR_CCB_START)){
1655 printk(KERN_NOTICE "arcmsr%d: polling \
1656 get an illegal ccb"" command done ccb = '0x%p'"
1657 "ccboutstandingcount = %d \n",
1658 acb->host->host_no, ccb,
1659 atomic_read(&acb->ccboutstandingcount));
1660 continue;
1661 }
1495 1662
1663 id = ccb->pcmd->device->id;
1664 lun = ccb->pcmd->device->lun;
1665 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1666 if (acb->devstate[id][lun] ==
1667 ARECA_RAID_GONE)
1668 acb->devstate[id][lun] =
1669 ARECA_RAID_GOOD;
1670 ccb->pcmd->result = DID_OK << 16;
1671 arcmsr_ccb_complete(ccb, 1);
1672 }
1673 else {
1674 switch(ccb->arcmsr_cdb.DeviceStatus) {
1675 case ARCMSR_DEV_SELECT_TIMEOUT: {
1676 acb->devstate[id][lun] =
1677 ARECA_RAID_GONE;
1678 ccb->pcmd->result =
1679 DID_NO_CONNECT << 16;
1680 arcmsr_ccb_complete(ccb, 1);
1681 }
1682 break;
1683
1684 case ARCMSR_DEV_ABORTED:
1685
1686 case ARCMSR_DEV_INIT_FAIL: {
1687 acb->devstate[id][lun] =
1688 ARECA_RAID_GONE;
1689 ccb->pcmd->result =
1690 DID_BAD_TARGET << 16;
1691 arcmsr_ccb_complete(ccb, 1);
1692 }
1693 break;
1694
1695 case ARCMSR_DEV_CHECK_CONDITION: {
1696 acb->devstate[id][lun] =
1697 ARECA_RAID_GOOD;
1698 arcmsr_report_sense_info(ccb);
1699 arcmsr_ccb_complete(ccb, 1);
1700 }
1701 break;
1702
1703 default:
1704 printk(KERN_NOTICE
1705 "arcmsr%d: scsi \
1706 id = %d lun = %d"
1707 " polling and \
1708 getting command \
1709 error done"
1710 "but got unknown \
1711 DeviceStatus = 0x%x \n"
1712 , acb->host->host_no,
1713 id, lun,
1714 ccb->arcmsr_cdb.DeviceStatus);
1715 acb->devstate[id][lun] =
1716 ARECA_RAID_GONE;
1717 ccb->pcmd->result =
1718 DID_BAD_TARGET << 16;
1719 arcmsr_ccb_complete(ccb, 1);
1720 break;
1721 }
1722 }
1723 found = 1;
1724 }
1725 }
1726 if (found){
1727 outbound_intstatus = readl(&reg->outbound_intstatus) &
1728 acb->outbound_int_enable;
1729 writel(outbound_intstatus, &reg->outbound_intstatus);
1730 /*clear interrupt*/
1731 }
1732 return;
1733}
1734
1735
1736static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
1737{
1738 struct Scsi_Host *host = pci_get_drvdata(pdev);
1739 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1740 struct MessageUnit __iomem *reg = acb->pmu;
1741 struct CommandControlBlock *ccb;
1742 /*clear and abort all outbound posted Q*/
1743 int i = 0, found = 0;
1744 int id, lun;
1745 uint32_t flag_ccb, outbound_intstatus;
1746
1747 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) &&
1748 (i++ < 256)){
1749 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
1750 (flag_ccb << 5));
1751 if (ccb){
1752 if ((ccb->acb != acb)||(ccb->startdone !=
1753 ARCMSR_CCB_START)){
1754 printk(KERN_NOTICE
1755 "arcmsr%d: polling get an illegal ccb"
1756 " command done ccb = '0x%p'"
1757 "ccboutstandingcount = %d \n",
1758 acb->host->host_no, ccb,
1759 atomic_read(&acb->ccboutstandingcount));
1760 continue;
1761 }
1762
1763 id = ccb->pcmd->device->id;
1764 lun = ccb->pcmd->device->lun;
1765 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1766 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1767 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1768 ccb->pcmd->result = DID_OK << 16;
1769 arcmsr_ccb_complete(ccb, 1);
1770 }
1771 else {
1772 switch(ccb->arcmsr_cdb.DeviceStatus) {
1773 case ARCMSR_DEV_SELECT_TIMEOUT: {
1774 acb->devstate[id][lun] =
1775 ARECA_RAID_GONE;
1776 ccb->pcmd->result =
1777 DID_NO_CONNECT << 16;
1778 arcmsr_ccb_complete(ccb, 1);
1779 }
1780 break;
1781
1782 case ARCMSR_DEV_ABORTED:
1783
1784 case ARCMSR_DEV_INIT_FAIL: {
1785 acb->devstate[id][lun] =
1786 ARECA_RAID_GONE;
1787 ccb->pcmd->result =
1788 DID_BAD_TARGET << 16;
1789 arcmsr_ccb_complete(ccb, 1);
1790 }
1791 break;
1792
1793 case ARCMSR_DEV_CHECK_CONDITION: {
1794 acb->devstate[id][lun] =
1795 ARECA_RAID_GOOD;
1796 arcmsr_report_sense_info(ccb);
1797 arcmsr_ccb_complete(ccb, 1);
1798 }
1799 break;
1800
1801 default:
1802 printk(KERN_NOTICE "arcmsr%d: \
1803 scsi id = %d lun = %d"
1804 " polling and \
1805 getting command error done"
1806 "but got unknown \
1807 DeviceStatus = 0x%x \n"
1808 , acb->host->host_no,
1809 id, lun, ccb->arcmsr_cdb.DeviceStatus);
1810 acb->devstate[id][lun] =
1811 ARECA_RAID_GONE;
1812 ccb->pcmd->result =
1813 DID_BAD_TARGET << 16;
1814 arcmsr_ccb_complete(ccb, 1);
1815 break;
1816 }
1817 }
1818 found = 1;
1819 }
1820 }
1821 if (found){
1822 outbound_intstatus = readl(&reg->outbound_intstatus) &
1823 acb->outbound_int_enable;
1824 writel(outbound_intstatus, &reg->outbound_intstatus);
1825 /*clear interrupt*/
1826 }
1827 return;
1828}
1829
1830static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
1831 pci_channel_state_t state)
1832{
1833 switch (state) {
1834 case pci_channel_io_frozen:
1835 arcmsr_pci_ers_need_reset_forepart(pdev);
1836 return PCI_ERS_RESULT_NEED_RESET;
1837 case pci_channel_io_perm_failure:
1838 arcmsr_pci_ers_disconnect_forepart(pdev);
1839 return PCI_ERS_RESULT_DISCONNECT;
1840 break;
1841 default:
1842 return PCI_ERS_RESULT_NEED_RESET;
1843 }
1844}
diff --git a/drivers/scsi/bvme6000.c b/drivers/scsi/bvme6000.c
deleted file mode 100644
index 599b400a3c43..000000000000
--- a/drivers/scsi/bvme6000.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
3 *
4 * Based on work by Alan Hourihane
5 */
6#include <linux/types.h>
7#include <linux/mm.h>
8#include <linux/blkdev.h>
9#include <linux/zorro.h>
10
11#include <asm/setup.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14#include <asm/bvme6000hw.h>
15#include <asm/irq.h>
16
17#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "53c7xx.h"
20#include "bvme6000.h"
21
22#include<linux/stat.h>
23
24
25int bvme6000_scsi_detect(struct scsi_host_template *tpnt)
26{
27 static unsigned char called = 0;
28 int clock;
29 long long options;
30
31 if (called)
32 return 0;
33 if (!MACH_IS_BVME6000)
34 return 0;
35
36 tpnt->proc_name = "BVME6000";
37
38 options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
39
40 clock = 40000000; /* 66MHz SCSI Clock */
41
42 ncr53c7xx_init(tpnt, 0, 710, (unsigned long)BVME_NCR53C710_BASE,
43 0, BVME_IRQ_SCSI, DMA_NONE,
44 options, clock);
45 called = 1;
46 return 1;
47}
48
49static int bvme6000_scsi_release(struct Scsi_Host *shost)
50{
51 if (shost->irq)
52 free_irq(shost->irq, NULL);
53 if (shost->dma_channel != 0xff)
54 free_dma(shost->dma_channel);
55 if (shost->io_port && shost->n_io_port)
56 release_region(shost->io_port, shost->n_io_port);
57 scsi_unregister(shost);
58 return 0;
59}
60
61static struct scsi_host_template driver_template = {
62 .name = "BVME6000 NCR53c710 SCSI",
63 .detect = bvme6000_scsi_detect,
64 .release = bvme6000_scsi_release,
65 .queuecommand = NCR53c7xx_queue_command,
66 .abort = NCR53c7xx_abort,
67 .reset = NCR53c7xx_reset,
68 .can_queue = 24,
69 .this_id = 7,
70 .sg_tablesize = 63,
71 .cmd_per_lun = 3,
72 .use_clustering = DISABLE_CLUSTERING
73};
74
75
76#include "scsi_module.c"
diff --git a/drivers/scsi/bvme6000.h b/drivers/scsi/bvme6000.h
deleted file mode 100644
index ea3e4b2b9220..000000000000
--- a/drivers/scsi/bvme6000.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef BVME6000_SCSI_H
2#define BVME6000_SCSI_H
3
4#include <linux/types.h>
5
6int bvme6000_scsi_detect(struct scsi_host_template *);
7const char *NCR53c7x0_info(void);
8int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
9int NCR53c7xx_abort(Scsi_Cmnd *);
10int NCR53c7x0_release (struct Scsi_Host *);
11int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
12void NCR53c7x0_intr(int irq, void *dev_id);
13
14#ifndef CMD_PER_LUN
15#define CMD_PER_LUN 3
16#endif
17
18#ifndef CAN_QUEUE
19#define CAN_QUEUE 24
20#endif
21
22#include <scsi/scsicam.h>
23
24#endif /* BVME6000_SCSI_H */
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
new file mode 100644
index 000000000000..012cdea7946d
--- /dev/null
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -0,0 +1,135 @@
1/*
2 * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
3 *
4 * Based on work by Alan Hourihane and Kars de Jong
5 *
6 * Rewritten to use 53c700.c by Richard Hirst <richard@sleepie.demon.co.uk>
7 */
8
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <asm/bvme6000hw.h>
16#include <scsi/scsi_host.h>
17#include <scsi/scsi_device.h>
18#include <scsi/scsi_transport.h>
19#include <scsi/scsi_transport_spi.h>
20
21#include "53c700.h"
22
23MODULE_AUTHOR("Richard Hirst <richard@sleepie.demon.co.uk>");
24MODULE_DESCRIPTION("BVME6000 NCR53C710 driver");
25MODULE_LICENSE("GPL");
26
27static struct scsi_host_template bvme6000_scsi_driver_template = {
28 .name = "BVME6000 NCR53c710 SCSI",
29 .proc_name = "BVME6000",
30 .this_id = 7,
31 .module = THIS_MODULE,
32};
33
34static struct platform_device *bvme6000_scsi_device;
35
36static __devinit int
37bvme6000_probe(struct device *dev)
38{
39 struct Scsi_Host * host = NULL;
40 struct NCR_700_Host_Parameters *hostdata;
41
42 if (!MACH_IS_BVME6000)
43 goto out;
44
45 hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
46 if (hostdata == NULL) {
47 printk(KERN_ERR "bvme6000-scsi: "
48 "Failed to allocate host data\n");
49 goto out;
50 }
51 memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
52
53 /* Fill in the required pieces of hostdata */
54 hostdata->base = (void __iomem *)BVME_NCR53C710_BASE;
55 hostdata->clock = 40; /* XXX - depends on the CPU clock! */
56 hostdata->chip710 = 1;
57 hostdata->dmode_extra = DMODE_FC2;
58 hostdata->dcntl_extra = EA_710;
59 hostdata->ctest7_extra = CTEST7_TT1;
60
61 /* and register the chip */
62 host = NCR_700_detect(&bvme6000_scsi_driver_template, hostdata, dev);
63 if (!host) {
64 printk(KERN_ERR "bvme6000-scsi: No host detected; "
65 "board configuration problem?\n");
66 goto out_free;
67 }
68 host->base = BVME_NCR53C710_BASE;
69 host->this_id = 7;
70 host->irq = BVME_IRQ_SCSI;
71 if (request_irq(BVME_IRQ_SCSI, NCR_700_intr, 0, "bvme6000-scsi",
72 host)) {
73 printk(KERN_ERR "bvme6000-scsi: request_irq failed\n");
74 goto out_put_host;
75 }
76
77 scsi_scan_host(host);
78
79 return 0;
80
81 out_put_host:
82 scsi_host_put(host);
83 out_free:
84 kfree(hostdata);
85 out:
86 return -ENODEV;
87}
88
89static __devexit int
90bvme6000_device_remove(struct device *dev)
91{
92 struct Scsi_Host *host = dev_to_shost(dev);
93 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
94
95 scsi_remove_host(host);
96 NCR_700_release(host);
97 kfree(hostdata);
98 free_irq(host->irq, host);
99
100 return 0;
101}
102
103static struct device_driver bvme6000_scsi_driver = {
104 .name = "bvme6000-scsi",
105 .bus = &platform_bus_type,
106 .probe = bvme6000_probe,
107 .remove = __devexit_p(bvme6000_device_remove),
108};
109
110static int __init bvme6000_scsi_init(void)
111{
112 int err;
113
114 err = driver_register(&bvme6000_scsi_driver);
115 if (err)
116 return err;
117
118 bvme6000_scsi_device = platform_device_register_simple("bvme6000-scsi",
119 -1, NULL, 0);
120 if (IS_ERR(bvme6000_scsi_device)) {
121 driver_unregister(&bvme6000_scsi_driver);
122 return PTR_ERR(bvme6000_scsi_device);
123 }
124
125 return 0;
126}
127
128static void __exit bvme6000_scsi_exit(void)
129{
130 platform_device_unregister(bvme6000_scsi_device);
131 driver_unregister(&bvme6000_scsi_driver);
132}
133
134module_init(bvme6000_scsi_init);
135module_exit(bvme6000_scsi_exit);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 564ea90ed3a0..7b8a3457b696 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -979,6 +979,7 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
979static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, 979static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
980 struct ScsiReqBlk *srb) 980 struct ScsiReqBlk *srb)
981{ 981{
982 int nseg;
982 enum dma_data_direction dir = cmd->sc_data_direction; 983 enum dma_data_direction dir = cmd->sc_data_direction;
983 dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n", 984 dprintkdbg(DBG_0, "build_srb: (pid#%li) <%02i-%i>\n",
984 cmd->pid, dcb->target_id, dcb->target_lun); 985 cmd->pid, dcb->target_id, dcb->target_lun);
@@ -1000,27 +1001,30 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
1000 srb->scsi_phase = PH_BUS_FREE; /* initial phase */ 1001 srb->scsi_phase = PH_BUS_FREE; /* initial phase */
1001 srb->end_message = 0; 1002 srb->end_message = 0;
1002 1003
1003 if (dir == PCI_DMA_NONE || !cmd->request_buffer) { 1004 nseg = scsi_dma_map(cmd);
1005 BUG_ON(nseg < 0);
1006
1007 if (dir == PCI_DMA_NONE || !nseg) {
1004 dprintkdbg(DBG_0, 1008 dprintkdbg(DBG_0,
1005 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n", 1009 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
1006 cmd->bufflen, cmd->request_buffer, 1010 cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
1007 cmd->use_sg, srb->segment_x[0].address); 1011 srb->segment_x[0].address);
1008 } else if (cmd->use_sg) { 1012 } else {
1009 int i; 1013 int i;
1010 u32 reqlen = cmd->request_bufflen; 1014 u32 reqlen = scsi_bufflen(cmd);
1011 struct scatterlist *sl = (struct scatterlist *) 1015 struct scatterlist *sg;
1012 cmd->request_buffer;
1013 struct SGentry *sgp = srb->segment_x; 1016 struct SGentry *sgp = srb->segment_x;
1014 srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg, 1017
1015 dir); 1018 srb->sg_count = nseg;
1019
1016 dprintkdbg(DBG_0, 1020 dprintkdbg(DBG_0,
1017 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n", 1021 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
1018 reqlen, cmd->request_buffer, cmd->use_sg, 1022 reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
1019 srb->sg_count); 1023 srb->sg_count);
1020 1024
1021 for (i = 0; i < srb->sg_count; i++) { 1025 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
1022 u32 busaddr = (u32)sg_dma_address(&sl[i]); 1026 u32 busaddr = (u32)sg_dma_address(sg);
1023 u32 seglen = (u32)sl[i].length; 1027 u32 seglen = (u32)sg->length;
1024 sgp[i].address = busaddr; 1028 sgp[i].address = busaddr;
1025 sgp[i].length = seglen; 1029 sgp[i].length = seglen;
1026 srb->total_xfer_length += seglen; 1030 srb->total_xfer_length += seglen;
@@ -1050,23 +1054,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
1050 1054
1051 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n", 1055 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
1052 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN); 1056 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
1053 } else {
1054 srb->total_xfer_length = cmd->request_bufflen;
1055 srb->sg_count = 1;
1056 srb->segment_x[0].address =
1057 pci_map_single(dcb->acb->dev, cmd->request_buffer,
1058 srb->total_xfer_length, dir);
1059
1060 /* Fixup for WIDE padding - make sure length is even */
1061 if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
1062 srb->total_xfer_length++;
1063
1064 srb->segment_x[0].length = srb->total_xfer_length;
1065
1066 dprintkdbg(DBG_0,
1067 "build_srb: [1] len=%d buf=%p use_sg=%d map=%08x\n",
1068 srb->total_xfer_length, cmd->request_buffer,
1069 cmd->use_sg, srb->segment_x[0].address);
1070 } 1057 }
1071 1058
1072 srb->request_length = srb->total_xfer_length; 1059 srb->request_length = srb->total_xfer_length;
@@ -2128,7 +2115,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2128 /*clear_fifo(acb, "DOP1"); */ 2115 /*clear_fifo(acb, "DOP1"); */
2129 /* KG: What is this supposed to be useful for? WIDE padding stuff? */ 2116 /* KG: What is this supposed to be useful for? WIDE padding stuff? */
2130 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC 2117 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
2131 && srb->cmd->request_bufflen % 2) { 2118 && scsi_bufflen(srb->cmd) % 2) {
2132 d_left_counter = 0; 2119 d_left_counter = 0;
2133 dprintkl(KERN_INFO, 2120 dprintkl(KERN_INFO,
2134 "data_out_phase0: Discard 1 byte (0x%02x)\n", 2121 "data_out_phase0: Discard 1 byte (0x%02x)\n",
@@ -2159,7 +2146,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2159 sg_update_list(srb, d_left_counter); 2146 sg_update_list(srb, d_left_counter);
2160 /* KG: Most ugly hack! Apparently, this works around a chip bug */ 2147 /* KG: Most ugly hack! Apparently, this works around a chip bug */
2161 if ((srb->segment_x[srb->sg_index].length == 2148 if ((srb->segment_x[srb->sg_index].length ==
2162 diff && srb->cmd->use_sg) 2149 diff && scsi_sg_count(srb->cmd))
2163 || ((oldxferred & ~PAGE_MASK) == 2150 || ((oldxferred & ~PAGE_MASK) ==
2164 (PAGE_SIZE - diff)) 2151 (PAGE_SIZE - diff))
2165 ) { 2152 ) {
@@ -2289,19 +2276,15 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2289 unsigned char *virt, *base = NULL; 2276 unsigned char *virt, *base = NULL;
2290 unsigned long flags = 0; 2277 unsigned long flags = 0;
2291 size_t len = left_io; 2278 size_t len = left_io;
2279 size_t offset = srb->request_length - left_io;
2280
2281 local_irq_save(flags);
2282 /* Assumption: it's inside one page as it's at most 4 bytes and
2283 I just assume it's on a 4-byte boundary */
2284 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2285 srb->sg_count, &offset, &len);
2286 virt = base + offset;
2292 2287
2293 if (srb->cmd->use_sg) {
2294 size_t offset = srb->request_length - left_io;
2295 local_irq_save(flags);
2296 /* Assumption: it's inside one page as it's at most 4 bytes and
2297 I just assume it's on a 4-byte boundary */
2298 base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
2299 srb->sg_count, &offset, &len);
2300 virt = base + offset;
2301 } else {
2302 virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
2303 len = left_io;
2304 }
2305 left_io -= len; 2288 left_io -= len;
2306 2289
2307 while (len) { 2290 while (len) {
@@ -2341,10 +2324,8 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2341 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0); 2324 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2342 } 2325 }
2343 2326
2344 if (srb->cmd->use_sg) { 2327 scsi_kunmap_atomic_sg(base);
2345 scsi_kunmap_atomic_sg(base); 2328 local_irq_restore(flags);
2346 local_irq_restore(flags);
2347 }
2348 } 2329 }
2349 /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */ 2330 /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
2350 /*srb->total_xfer_length = 0; */ 2331 /*srb->total_xfer_length = 0; */
@@ -2455,7 +2436,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2455 */ 2436 */
2456 srb->state |= SRB_DATA_XFER; 2437 srb->state |= SRB_DATA_XFER;
2457 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0); 2438 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
2458 if (srb->cmd->use_sg) { /* with S/G */ 2439 if (scsi_sg_count(srb->cmd)) { /* with S/G */
2459 io_dir |= DMACMD_SG; 2440 io_dir |= DMACMD_SG;
2460 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR, 2441 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2461 srb->sg_bus_addr + 2442 srb->sg_bus_addr +
@@ -2513,18 +2494,14 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2513 unsigned char *virt, *base = NULL; 2494 unsigned char *virt, *base = NULL;
2514 unsigned long flags = 0; 2495 unsigned long flags = 0;
2515 size_t len = left_io; 2496 size_t len = left_io;
2497 size_t offset = srb->request_length - left_io;
2498
2499 local_irq_save(flags);
2500 /* Again, max 4 bytes */
2501 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2502 srb->sg_count, &offset, &len);
2503 virt = base + offset;
2516 2504
2517 if (srb->cmd->use_sg) {
2518 size_t offset = srb->request_length - left_io;
2519 local_irq_save(flags);
2520 /* Again, max 4 bytes */
2521 base = scsi_kmap_atomic_sg((struct scatterlist *)srb->cmd->request_buffer,
2522 srb->sg_count, &offset, &len);
2523 virt = base + offset;
2524 } else {
2525 virt = srb->cmd->request_buffer + srb->cmd->request_bufflen - left_io;
2526 len = left_io;
2527 }
2528 left_io -= len; 2505 left_io -= len;
2529 2506
2530 while (len--) { 2507 while (len--) {
@@ -2536,10 +2513,8 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
2536 sg_subtract_one(srb); 2513 sg_subtract_one(srb);
2537 } 2514 }
2538 2515
2539 if (srb->cmd->use_sg) { 2516 scsi_kunmap_atomic_sg(base);
2540 scsi_kunmap_atomic_sg(base); 2517 local_irq_restore(flags);
2541 local_irq_restore(flags);
2542 }
2543 } 2518 }
2544 if (srb->dcb->sync_period & WIDE_SYNC) { 2519 if (srb->dcb->sync_period & WIDE_SYNC) {
2545 if (ln % 2) { 2520 if (ln % 2) {
@@ -3295,7 +3270,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3295{ 3270{
3296 struct scsi_cmnd *cmd = srb->cmd; 3271 struct scsi_cmnd *cmd = srb->cmd;
3297 enum dma_data_direction dir = cmd->sc_data_direction; 3272 enum dma_data_direction dir = cmd->sc_data_direction;
3298 if (cmd->use_sg && dir != PCI_DMA_NONE) { 3273
3274 if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
3299 /* unmap DC395x SG list */ 3275 /* unmap DC395x SG list */
3300 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", 3276 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3301 srb->sg_bus_addr, SEGMENTX_LEN); 3277 srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3303,16 +3279,9 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3303 SEGMENTX_LEN, 3279 SEGMENTX_LEN,
3304 PCI_DMA_TODEVICE); 3280 PCI_DMA_TODEVICE);
3305 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", 3281 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3306 cmd->use_sg, cmd->request_buffer); 3282 scsi_sg_count(cmd), scsi_bufflen(cmd));
3307 /* unmap the sg segments */ 3283 /* unmap the sg segments */
3308 pci_unmap_sg(acb->dev, 3284 scsi_dma_unmap(cmd);
3309 (struct scatterlist *)cmd->request_buffer,
3310 cmd->use_sg, dir);
3311 } else if (cmd->request_buffer && dir != PCI_DMA_NONE) {
3312 dprintkdbg(DBG_SG, "pci_unmap_srb: buffer=%08x(%05x)\n",
3313 srb->segment_x[0].address, cmd->request_bufflen);
3314 pci_unmap_single(acb->dev, srb->segment_x[0].address,
3315 cmd->request_bufflen, dir);
3316 } 3285 }
3317} 3286}
3318 3287
@@ -3352,8 +3321,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3352 dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid, 3321 dprintkdbg(DBG_1, "srb_done: (pid#%li) <%02i-%i>\n", srb->cmd->pid,
3353 srb->cmd->device->id, srb->cmd->device->lun); 3322 srb->cmd->device->id, srb->cmd->device->lun);
3354 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n", 3323 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3355 srb, cmd->use_sg, srb->sg_index, srb->sg_count, 3324 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3356 cmd->request_buffer); 3325 scsi_sgtalbe(cmd));
3357 status = srb->target_status; 3326 status = srb->target_status;
3358 if (srb->flag & AUTO_REQSENSE) { 3327 if (srb->flag & AUTO_REQSENSE) {
3359 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n"); 3328 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
@@ -3482,16 +3451,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3482 } 3451 }
3483 } 3452 }
3484 3453
3485 if (dir != PCI_DMA_NONE) { 3454 if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
3486 if (cmd->use_sg) 3455 pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
3487 pci_dma_sync_sg_for_cpu(acb->dev, 3456 scsi_sg_count(cmd), dir);
3488 (struct scatterlist *)cmd-> 3457
3489 request_buffer, cmd->use_sg, dir);
3490 else if (cmd->request_buffer)
3491 pci_dma_sync_single_for_cpu(acb->dev,
3492 srb->segment_x[0].address,
3493 cmd->request_bufflen, dir);
3494 }
3495 ckc_only = 0; 3458 ckc_only = 0;
3496/* Check Error Conditions */ 3459/* Check Error Conditions */
3497 ckc_e: 3460 ckc_e:
@@ -3500,19 +3463,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3500 unsigned char *base = NULL; 3463 unsigned char *base = NULL;
3501 struct ScsiInqData *ptr; 3464 struct ScsiInqData *ptr;
3502 unsigned long flags = 0; 3465 unsigned long flags = 0;
3466 struct scatterlist* sg = scsi_sglist(cmd);
3467 size_t offset = 0, len = sizeof(struct ScsiInqData);
3503 3468
3504 if (cmd->use_sg) { 3469 local_irq_save(flags);
3505 struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; 3470 base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
3506 size_t offset = 0, len = sizeof(struct ScsiInqData); 3471 ptr = (struct ScsiInqData *)(base + offset);
3507
3508 local_irq_save(flags);
3509 base = scsi_kmap_atomic_sg(sg, cmd->use_sg, &offset, &len);
3510 ptr = (struct ScsiInqData *)(base + offset);
3511 } else
3512 ptr = (struct ScsiInqData *)(cmd->request_buffer);
3513 3472
3514 if (!ckc_only && (cmd->result & RES_DID) == 0 3473 if (!ckc_only && (cmd->result & RES_DID) == 0
3515 && cmd->cmnd[2] == 0 && cmd->request_bufflen >= 8 3474 && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
3516 && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2) 3475 && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
3517 dcb->inquiry7 = ptr->Flags; 3476 dcb->inquiry7 = ptr->Flags;
3518 3477
@@ -3527,14 +3486,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3527 } 3486 }
3528 } 3487 }
3529 3488
3530 if (cmd->use_sg) { 3489 scsi_kunmap_atomic_sg(base);
3531 scsi_kunmap_atomic_sg(base); 3490 local_irq_restore(flags);
3532 local_irq_restore(flags);
3533 }
3534 } 3491 }
3535 3492
3536 /* Here is the info for Doug Gilbert's sg3 ... */ 3493 /* Here is the info for Doug Gilbert's sg3 ... */
3537 cmd->resid = srb->total_xfer_length; 3494 scsi_set_resid(cmd, srb->total_xfer_length);
3538 /* This may be interpreted by sb. or not ... */ 3495 /* This may be interpreted by sb. or not ... */
3539 cmd->SCp.this_residual = srb->total_xfer_length; 3496 cmd->SCp.this_residual = srb->total_xfer_length;
3540 cmd->SCp.buffers_residual = 0; 3497 cmd->SCp.buffers_residual = 0;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 8c7d2bbf9b1a..2e2362d787ca 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2078,12 +2078,13 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2078 u32 *lenptr; 2078 u32 *lenptr;
2079 int direction; 2079 int direction;
2080 int scsidir; 2080 int scsidir;
2081 int nseg;
2081 u32 len; 2082 u32 len;
2082 u32 reqlen; 2083 u32 reqlen;
2083 s32 rcode; 2084 s32 rcode;
2084 2085
2085 memset(msg, 0 , sizeof(msg)); 2086 memset(msg, 0 , sizeof(msg));
2086 len = cmd->request_bufflen; 2087 len = scsi_bufflen(cmd);
2087 direction = 0x00000000; 2088 direction = 0x00000000;
2088 2089
2089 scsidir = 0x00000000; // DATA NO XFER 2090 scsidir = 0x00000000; // DATA NO XFER
@@ -2140,21 +2141,21 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2140 lenptr=mptr++; /* Remember me - fill in when we know */ 2141 lenptr=mptr++; /* Remember me - fill in when we know */
2141 reqlen = 14; // SINGLE SGE 2142 reqlen = 14; // SINGLE SGE
2142 /* Now fill in the SGList and command */ 2143 /* Now fill in the SGList and command */
2143 if(cmd->use_sg) {
2144 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
2145 int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg,
2146 cmd->sc_data_direction);
2147 2144
2145 nseg = scsi_dma_map(cmd);
2146 BUG_ON(nseg < 0);
2147 if (nseg) {
2148 struct scatterlist *sg;
2148 2149
2149 len = 0; 2150 len = 0;
2150 for(i = 0 ; i < sg_count; i++) { 2151 scsi_for_each_sg(cmd, sg, nseg, i) {
2151 *mptr++ = direction|0x10000000|sg_dma_len(sg); 2152 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2152 len+=sg_dma_len(sg); 2153 len+=sg_dma_len(sg);
2153 *mptr++ = sg_dma_address(sg); 2154 *mptr++ = sg_dma_address(sg);
2154 sg++; 2155 /* Make this an end of list */
2156 if (i == nseg - 1)
2157 mptr[-2] = direction|0xD0000000|sg_dma_len(sg);
2155 } 2158 }
2156 /* Make this an end of list */
2157 mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1);
2158 reqlen = mptr - msg; 2159 reqlen = mptr - msg;
2159 *lenptr = len; 2160 *lenptr = len;
2160 2161
@@ -2163,16 +2164,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
2163 len, cmd->underflow); 2164 len, cmd->underflow);
2164 } 2165 }
2165 } else { 2166 } else {
2166 *lenptr = len = cmd->request_bufflen; 2167 *lenptr = len = 0;
2167 if(len == 0) { 2168 reqlen = 12;
2168 reqlen = 12;
2169 } else {
2170 *mptr++ = 0xD0000000|direction|cmd->request_bufflen;
2171 *mptr++ = pci_map_single(pHba->pDev,
2172 cmd->request_buffer,
2173 cmd->request_bufflen,
2174 cmd->sc_data_direction);
2175 }
2176 } 2169 }
2177 2170
2178 /* Stick the headers on */ 2171 /* Stick the headers on */
@@ -2232,7 +2225,7 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2232 hba_status = detailed_status >> 8; 2225 hba_status = detailed_status >> 8;
2233 2226
2234 // calculate resid for sg 2227 // calculate resid for sg
2235 cmd->resid = cmd->request_bufflen - readl(reply+5); 2228 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+5));
2236 2229
2237 pHba = (adpt_hba*) cmd->device->host->hostdata[0]; 2230 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2238 2231
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 2d38025861a5..a83e9f150b97 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1609,8 +1609,9 @@ static int eata2x_detect(struct scsi_host_template *tpnt)
1609 1609
1610static void map_dma(unsigned int i, struct hostdata *ha) 1610static void map_dma(unsigned int i, struct hostdata *ha)
1611{ 1611{
1612 unsigned int k, count, pci_dir; 1612 unsigned int k, pci_dir;
1613 struct scatterlist *sgpnt; 1613 int count;
1614 struct scatterlist *sg;
1614 struct mscp *cpp; 1615 struct mscp *cpp;
1615 struct scsi_cmnd *SCpnt; 1616 struct scsi_cmnd *SCpnt;
1616 1617
@@ -1625,38 +1626,19 @@ static void map_dma(unsigned int i, struct hostdata *ha)
1625 1626
1626 cpp->sense_len = sizeof SCpnt->sense_buffer; 1627 cpp->sense_len = sizeof SCpnt->sense_buffer;
1627 1628
1628 if (!SCpnt->use_sg) { 1629 count = scsi_dma_map(SCpnt);
1629 1630 BUG_ON(count < 0);
1630 /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */ 1631 scsi_for_each_sg(SCpnt, sg, count, k) {
1631 if (!SCpnt->request_bufflen) 1632 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1632 pci_dir = PCI_DMA_BIDIRECTIONAL; 1633 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
1633
1634 if (SCpnt->request_buffer)
1635 cpp->data_address = H2DEV(pci_map_single(ha->pdev,
1636 SCpnt->
1637 request_buffer,
1638 SCpnt->
1639 request_bufflen,
1640 pci_dir));
1641
1642 cpp->data_len = H2DEV(SCpnt->request_bufflen);
1643 return;
1644 }
1645
1646 sgpnt = (struct scatterlist *)SCpnt->request_buffer;
1647 count = pci_map_sg(ha->pdev, sgpnt, SCpnt->use_sg, pci_dir);
1648
1649 for (k = 0; k < count; k++) {
1650 cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k]));
1651 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k]));
1652 } 1634 }
1653 1635
1654 cpp->sg = 1; 1636 cpp->sg = 1;
1655 cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist, 1637 cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist,
1656 SCpnt->use_sg * 1638 scsi_sg_count(SCpnt) *
1657 sizeof(struct sg_list), 1639 sizeof(struct sg_list),
1658 pci_dir)); 1640 pci_dir));
1659 cpp->data_len = H2DEV((SCpnt->use_sg * sizeof(struct sg_list))); 1641 cpp->data_len = H2DEV((scsi_sg_count(SCpnt) * sizeof(struct sg_list)));
1660} 1642}
1661 1643
1662static void unmap_dma(unsigned int i, struct hostdata *ha) 1644static void unmap_dma(unsigned int i, struct hostdata *ha)
@@ -1673,9 +1655,7 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
1673 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr), 1655 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
1674 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1656 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1675 1657
1676 if (SCpnt->use_sg) 1658 scsi_dma_unmap(SCpnt);
1677 pci_unmap_sg(ha->pdev, SCpnt->request_buffer, SCpnt->use_sg,
1678 pci_dir);
1679 1659
1680 if (!DEV2H(cpp->data_len)) 1660 if (!DEV2H(cpp->data_len))
1681 pci_dir = PCI_DMA_BIDIRECTIONAL; 1661 pci_dir = PCI_DMA_BIDIRECTIONAL;
@@ -1700,9 +1680,9 @@ static void sync_dma(unsigned int i, struct hostdata *ha)
1700 DEV2H(cpp->sense_len), 1680 DEV2H(cpp->sense_len),
1701 PCI_DMA_FROMDEVICE); 1681 PCI_DMA_FROMDEVICE);
1702 1682
1703 if (SCpnt->use_sg) 1683 if (scsi_sg_count(SCpnt))
1704 pci_dma_sync_sg_for_cpu(ha->pdev, SCpnt->request_buffer, 1684 pci_dma_sync_sg_for_cpu(ha->pdev, scsi_sglist(SCpnt),
1705 SCpnt->use_sg, pci_dir); 1685 scsi_sg_count(SCpnt), pci_dir);
1706 1686
1707 if (!DEV2H(cpp->data_len)) 1687 if (!DEV2H(cpp->data_len))
1708 pci_dir = PCI_DMA_BIDIRECTIONAL; 1688 pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 71caf2ded6ba..77b06a983fa7 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -324,17 +324,14 @@ static void esp_reset_esp(struct esp *esp)
324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) 324static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
325{ 325{
326 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); 326 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
327 struct scatterlist *sg = cmd->request_buffer; 327 struct scatterlist *sg = scsi_sglist(cmd);
328 int dir = cmd->sc_data_direction; 328 int dir = cmd->sc_data_direction;
329 int total, i; 329 int total, i;
330 330
331 if (dir == DMA_NONE) 331 if (dir == DMA_NONE)
332 return; 332 return;
333 333
334 BUG_ON(cmd->use_sg == 0); 334 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
335
336 spriv->u.num_sg = esp->ops->map_sg(esp, sg,
337 cmd->use_sg, dir);
338 spriv->cur_residue = sg_dma_len(sg); 335 spriv->cur_residue = sg_dma_len(sg);
339 spriv->cur_sg = sg; 336 spriv->cur_sg = sg;
340 337
@@ -407,8 +404,7 @@ static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
407 if (dir == DMA_NONE) 404 if (dir == DMA_NONE)
408 return; 405 return;
409 406
410 esp->ops->unmap_sg(esp, cmd->request_buffer, 407 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
411 spriv->u.num_sg, dir);
412} 408}
413 409
414static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) 410static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -921,7 +917,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
921static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 917static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
922{ 918{
923 struct scsi_device *dev = cmd->device; 919 struct scsi_device *dev = cmd->device;
924 struct esp *esp = host_to_esp(dev->host); 920 struct esp *esp = shost_priv(dev->host);
925 struct esp_cmd_priv *spriv; 921 struct esp_cmd_priv *spriv;
926 struct esp_cmd_entry *ent; 922 struct esp_cmd_entry *ent;
927 923
@@ -2357,7 +2353,7 @@ EXPORT_SYMBOL(scsi_esp_unregister);
2357 2353
2358static int esp_slave_alloc(struct scsi_device *dev) 2354static int esp_slave_alloc(struct scsi_device *dev)
2359{ 2355{
2360 struct esp *esp = host_to_esp(dev->host); 2356 struct esp *esp = shost_priv(dev->host);
2361 struct esp_target_data *tp = &esp->target[dev->id]; 2357 struct esp_target_data *tp = &esp->target[dev->id];
2362 struct esp_lun_data *lp; 2358 struct esp_lun_data *lp;
2363 2359
@@ -2381,7 +2377,7 @@ static int esp_slave_alloc(struct scsi_device *dev)
2381 2377
2382static int esp_slave_configure(struct scsi_device *dev) 2378static int esp_slave_configure(struct scsi_device *dev)
2383{ 2379{
2384 struct esp *esp = host_to_esp(dev->host); 2380 struct esp *esp = shost_priv(dev->host);
2385 struct esp_target_data *tp = &esp->target[dev->id]; 2381 struct esp_target_data *tp = &esp->target[dev->id];
2386 int goal_tags, queue_depth; 2382 int goal_tags, queue_depth;
2387 2383
@@ -2423,7 +2419,7 @@ static void esp_slave_destroy(struct scsi_device *dev)
2423 2419
2424static int esp_eh_abort_handler(struct scsi_cmnd *cmd) 2420static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2425{ 2421{
2426 struct esp *esp = host_to_esp(cmd->device->host); 2422 struct esp *esp = shost_priv(cmd->device->host);
2427 struct esp_cmd_entry *ent, *tmp; 2423 struct esp_cmd_entry *ent, *tmp;
2428 struct completion eh_done; 2424 struct completion eh_done;
2429 unsigned long flags; 2425 unsigned long flags;
@@ -2539,7 +2535,7 @@ out_failure:
2539 2535
2540static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) 2536static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2541{ 2537{
2542 struct esp *esp = host_to_esp(cmd->device->host); 2538 struct esp *esp = shost_priv(cmd->device->host);
2543 struct completion eh_reset; 2539 struct completion eh_reset;
2544 unsigned long flags; 2540 unsigned long flags;
2545 2541
@@ -2575,7 +2571,7 @@ static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2575/* All bets are off, reset the entire device. */ 2571/* All bets are off, reset the entire device. */
2576static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) 2572static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2577{ 2573{
2578 struct esp *esp = host_to_esp(cmd->device->host); 2574 struct esp *esp = shost_priv(cmd->device->host);
2579 unsigned long flags; 2575 unsigned long flags;
2580 2576
2581 spin_lock_irqsave(esp->host->host_lock, flags); 2577 spin_lock_irqsave(esp->host->host_lock, flags);
@@ -2615,7 +2611,7 @@ EXPORT_SYMBOL(scsi_esp_template);
2615 2611
2616static void esp_get_signalling(struct Scsi_Host *host) 2612static void esp_get_signalling(struct Scsi_Host *host)
2617{ 2613{
2618 struct esp *esp = host_to_esp(host); 2614 struct esp *esp = shost_priv(host);
2619 enum spi_signal_type type; 2615 enum spi_signal_type type;
2620 2616
2621 if (esp->flags & ESP_FLAG_DIFFERENTIAL) 2617 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
@@ -2629,7 +2625,7 @@ static void esp_get_signalling(struct Scsi_Host *host)
2629static void esp_set_offset(struct scsi_target *target, int offset) 2625static void esp_set_offset(struct scsi_target *target, int offset)
2630{ 2626{
2631 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2627 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2632 struct esp *esp = host_to_esp(host); 2628 struct esp *esp = shost_priv(host);
2633 struct esp_target_data *tp = &esp->target[target->id]; 2629 struct esp_target_data *tp = &esp->target[target->id];
2634 2630
2635 tp->nego_goal_offset = offset; 2631 tp->nego_goal_offset = offset;
@@ -2639,7 +2635,7 @@ static void esp_set_offset(struct scsi_target *target, int offset)
2639static void esp_set_period(struct scsi_target *target, int period) 2635static void esp_set_period(struct scsi_target *target, int period)
2640{ 2636{
2641 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2637 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2642 struct esp *esp = host_to_esp(host); 2638 struct esp *esp = shost_priv(host);
2643 struct esp_target_data *tp = &esp->target[target->id]; 2639 struct esp_target_data *tp = &esp->target[target->id];
2644 2640
2645 tp->nego_goal_period = period; 2641 tp->nego_goal_period = period;
@@ -2649,7 +2645,7 @@ static void esp_set_period(struct scsi_target *target, int period)
2649static void esp_set_width(struct scsi_target *target, int width) 2645static void esp_set_width(struct scsi_target *target, int width)
2650{ 2646{
2651 struct Scsi_Host *host = dev_to_shost(target->dev.parent); 2647 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2652 struct esp *esp = host_to_esp(host); 2648 struct esp *esp = shost_priv(host);
2653 struct esp_target_data *tp = &esp->target[target->id]; 2649 struct esp_target_data *tp = &esp->target[target->id];
2654 2650
2655 tp->nego_goal_width = (width ? 1 : 0); 2651 tp->nego_goal_width = (width ? 1 : 0);
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8d4a6690401f..d5576d54ce76 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -517,8 +517,6 @@ struct esp {
517 struct sbus_dma *dma; 517 struct sbus_dma *dma;
518}; 518};
519 519
520#define host_to_esp(host) ((struct esp *)(host)->hostdata)
521
522/* A front-end driver for the ESP chip should do the following in 520/* A front-end driver for the ESP chip should do the following in
523 * it's device probe routine: 521 * it's device probe routine:
524 * 1) Allocate the host and private area using scsi_host_alloc() 522 * 1) Allocate the host and private area using scsi_host_alloc()
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 5d4ea6f77953..36169d597e98 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -410,6 +410,8 @@ static irqreturn_t do_fdomain_16x0_intr( int irq, void *dev_id );
410static char * fdomain = NULL; 410static char * fdomain = NULL;
411module_param(fdomain, charp, 0); 411module_param(fdomain, charp, 0);
412 412
413#ifndef PCMCIA
414
413static unsigned long addresses[] = { 415static unsigned long addresses[] = {
414 0xc8000, 416 0xc8000,
415 0xca000, 417 0xca000,
@@ -426,6 +428,8 @@ static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
426 428
427static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 }; 429static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
428 430
431#endif /* !PCMCIA */
432
429/* 433/*
430 434
431 READ THIS BEFORE YOU ADD A SIGNATURE! 435 READ THIS BEFORE YOU ADD A SIGNATURE!
@@ -458,6 +462,8 @@ static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
458 462
459*/ 463*/
460 464
465#ifndef PCMCIA
466
461static struct signature { 467static struct signature {
462 const char *signature; 468 const char *signature;
463 int sig_offset; 469 int sig_offset;
@@ -503,6 +509,8 @@ static struct signature {
503 509
504#define SIGNATURE_COUNT ARRAY_SIZE(signatures) 510#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
505 511
512#endif /* !PCMCIA */
513
506static void print_banner( struct Scsi_Host *shpnt ) 514static void print_banner( struct Scsi_Host *shpnt )
507{ 515{
508 if (!shpnt) return; /* This won't ever happen */ 516 if (!shpnt) return; /* This won't ever happen */
@@ -633,6 +641,8 @@ static int fdomain_test_loopback( void )
633 return 0; 641 return 0;
634} 642}
635 643
644#ifndef PCMCIA
645
636/* fdomain_get_irq assumes that we have a valid MCA ID for a 646/* fdomain_get_irq assumes that we have a valid MCA ID for a
637 TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the 647 TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
638 bios_base matches these ports. If someone was unlucky enough to have 648 bios_base matches these ports. If someone was unlucky enough to have
@@ -667,7 +677,6 @@ static int fdomain_get_irq( int base )
667 677
668static int fdomain_isa_detect( int *irq, int *iobase ) 678static int fdomain_isa_detect( int *irq, int *iobase )
669{ 679{
670#ifndef PCMCIA
671 int i, j; 680 int i, j;
672 int base = 0xdeadbeef; 681 int base = 0xdeadbeef;
673 int flag = 0; 682 int flag = 0;
@@ -786,11 +795,22 @@ found:
786 *iobase = base; 795 *iobase = base;
787 796
788 return 1; /* success */ 797 return 1; /* success */
789#else
790 return 0;
791#endif
792} 798}
793 799
800#else /* PCMCIA */
801
802static int fdomain_isa_detect( int *irq, int *iobase )
803{
804 if (irq)
805 *irq = 0;
806 if (iobase)
807 *iobase = 0;
808 return 0;
809}
810
811#endif /* !PCMCIA */
812
813
794/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int* 814/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
795 iobase) This function gets the Interrupt Level and I/O base address from 815 iobase) This function gets the Interrupt Level and I/O base address from
796 the PCI configuration registers. */ 816 the PCI configuration registers. */
@@ -1345,16 +1365,15 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
1345 1365
1346#if ERRORS_ONLY 1366#if ERRORS_ONLY
1347 if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) { 1367 if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
1348 if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) { 1368 char *buf = scsi_sglist(current_SC);
1369 if ((unsigned char)(*(buf + 2)) & 0x0f) {
1349 unsigned char key; 1370 unsigned char key;
1350 unsigned char code; 1371 unsigned char code;
1351 unsigned char qualifier; 1372 unsigned char qualifier;
1352 1373
1353 key = (unsigned char)(*((char *)current_SC->request_buffer + 2)) 1374 key = (unsigned char)(*(buf + 2)) & 0x0f;
1354 & 0x0f; 1375 code = (unsigned char)(*(buf + 12));
1355 code = (unsigned char)(*((char *)current_SC->request_buffer + 12)); 1376 qualifier = (unsigned char)(*(buf + 13));
1356 qualifier = (unsigned char)(*((char *)current_SC->request_buffer
1357 + 13));
1358 1377
1359 if (key != UNIT_ATTENTION 1378 if (key != UNIT_ATTENTION
1360 && !(key == NOT_READY 1379 && !(key == NOT_READY
@@ -1405,8 +1424,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
1405 printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n", 1424 printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
1406 SCpnt->target, 1425 SCpnt->target,
1407 *(unsigned char *)SCpnt->cmnd, 1426 *(unsigned char *)SCpnt->cmnd,
1408 SCpnt->use_sg, 1427 scsi_sg_count(SCpnt),
1409 SCpnt->request_bufflen ); 1428 scsi_bufflen(SCpnt));
1410#endif 1429#endif
1411 1430
1412 fdomain_make_bus_idle(); 1431 fdomain_make_bus_idle();
@@ -1416,20 +1435,19 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
1416 1435
1417 /* Initialize static data */ 1436 /* Initialize static data */
1418 1437
1419 if (current_SC->use_sg) { 1438 if (scsi_sg_count(current_SC)) {
1420 current_SC->SCp.buffer = 1439 current_SC->SCp.buffer = scsi_sglist(current_SC);
1421 (struct scatterlist *)current_SC->request_buffer; 1440 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
1422 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 1441 + current_SC->SCp.buffer->offset;
1423 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1442 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1424 current_SC->SCp.buffers_residual = current_SC->use_sg - 1; 1443 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
1425 } else { 1444 } else {
1426 current_SC->SCp.ptr = (char *)current_SC->request_buffer; 1445 current_SC->SCp.ptr = 0;
1427 current_SC->SCp.this_residual = current_SC->request_bufflen; 1446 current_SC->SCp.this_residual = 0;
1428 current_SC->SCp.buffer = NULL; 1447 current_SC->SCp.buffer = NULL;
1429 current_SC->SCp.buffers_residual = 0; 1448 current_SC->SCp.buffers_residual = 0;
1430 } 1449 }
1431 1450
1432
1433 current_SC->SCp.Status = 0; 1451 current_SC->SCp.Status = 0;
1434 current_SC->SCp.Message = 0; 1452 current_SC->SCp.Message = 0;
1435 current_SC->SCp.have_data_in = 0; 1453 current_SC->SCp.have_data_in = 0;
@@ -1472,8 +1490,8 @@ static void print_info(struct scsi_cmnd *SCpnt)
1472 SCpnt->SCp.phase, 1490 SCpnt->SCp.phase,
1473 SCpnt->device->id, 1491 SCpnt->device->id,
1474 *(unsigned char *)SCpnt->cmnd, 1492 *(unsigned char *)SCpnt->cmnd,
1475 SCpnt->use_sg, 1493 scsi_sg_count(SCpnt),
1476 SCpnt->request_bufflen ); 1494 scsi_bufflen(SCpnt));
1477 printk( "sent_command = %d, have_data_in = %d, timeout = %d\n", 1495 printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
1478 SCpnt->SCp.sent_command, 1496 SCpnt->SCp.sent_command,
1479 SCpnt->SCp.have_data_in, 1497 SCpnt->SCp.have_data_in,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 60446b88f721..d0b95ce0ba00 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -876,7 +876,7 @@ static int __init gdth_search_pci(gdth_pci_str *pcistr)
876/* Vortex only makes RAID controllers. 876/* Vortex only makes RAID controllers.
877 * We do not really want to specify all 550 ids here, so wildcard match. 877 * We do not really want to specify all 550 ids here, so wildcard match.
878 */ 878 */
879static struct pci_device_id gdthtable[] __attribute_used__ = { 879static struct pci_device_id gdthtable[] __maybe_unused = {
880 {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID}, 880 {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID},
881 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID}, 881 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID},
882 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID}, 882 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID},
@@ -1955,7 +1955,7 @@ static int __init gdth_search_drives(int hanum)
1955 for (j = 0; j < 12; ++j) 1955 for (j = 0; j < 12; ++j)
1956 rtc[j] = CMOS_READ(j); 1956 rtc[j] = CMOS_READ(j);
1957 } while (rtc[0] != CMOS_READ(0)); 1957 } while (rtc[0] != CMOS_READ(0));
1958 spin_lock_irqrestore(&rtc_lock, flags); 1958 spin_unlock_irqrestore(&rtc_lock, flags);
1959 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], 1959 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
1960 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); 1960 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
1961 /* 3. send to controller firmware */ 1961 /* 3. send to controller firmware */
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index bec83cbee59a..0e579ca45814 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -339,20 +339,8 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
339 339
340 scp = hba->reqs[tag].scp; 340 scp = hba->reqs[tag].scp;
341 341
342 if (HPT_SCP(scp)->mapped) { 342 if (HPT_SCP(scp)->mapped)
343 if (scp->use_sg) 343 scsi_dma_unmap(scp);
344 pci_unmap_sg(hba->pcidev,
345 (struct scatterlist *)scp->request_buffer,
346 scp->use_sg,
347 scp->sc_data_direction
348 );
349 else
350 pci_unmap_single(hba->pcidev,
351 HPT_SCP(scp)->dma_handle,
352 scp->request_bufflen,
353 scp->sc_data_direction
354 );
355 }
356 344
357 switch (le32_to_cpu(req->header.result)) { 345 switch (le32_to_cpu(req->header.result)) {
358 case IOP_RESULT_SUCCESS: 346 case IOP_RESULT_SUCCESS:
@@ -448,43 +436,26 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
448{ 436{
449 struct Scsi_Host *host = scp->device->host; 437 struct Scsi_Host *host = scp->device->host;
450 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 438 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
451 struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer; 439 struct scatterlist *sg;
452 440 int idx, nseg;
453 /* 441
454 * though we'll not get non-use_sg fields anymore, 442 nseg = scsi_dma_map(scp);
455 * keep use_sg checking anyway 443 BUG_ON(nseg < 0);
456 */ 444 if (!nseg)
457 if (scp->use_sg) { 445 return 0;
458 int idx;
459
460 HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
461 sglist, scp->use_sg,
462 scp->sc_data_direction);
463 HPT_SCP(scp)->mapped = 1;
464 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
465
466 for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
467 psg[idx].pci_address =
468 cpu_to_le64(sg_dma_address(&sglist[idx]));
469 psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
470 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
471 cpu_to_le32(1) : 0;
472 }
473 446
474 return HPT_SCP(scp)->sgcnt; 447 HPT_SCP(scp)->sgcnt = nseg;
475 } else { 448 HPT_SCP(scp)->mapped = 1;
476 HPT_SCP(scp)->dma_handle = pci_map_single( 449
477 hba->pcidev, 450 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
478 scp->request_buffer, 451
479 scp->request_bufflen, 452 scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
480 scp->sc_data_direction 453 psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
481 ); 454 psg[idx].size = cpu_to_le32(sg_dma_len(sg));
482 HPT_SCP(scp)->mapped = 1; 455 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
483 psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle); 456 cpu_to_le32(1) : 0;
484 psg->size = cpu_to_le32(scp->request_bufflen);
485 psg->eot = cpu_to_le32(1);
486 return 1;
487 } 457 }
458 return HPT_SCP(scp)->sgcnt;
488} 459}
489 460
490static int hptiop_queuecommand(struct scsi_cmnd *scp, 461static int hptiop_queuecommand(struct scsi_cmnd *scp,
@@ -529,9 +500,8 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
529 req = (struct hpt_iop_request_scsi_command *)_req->req_virt; 500 req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
530 501
531 /* build S/G table */ 502 /* build S/G table */
532 if (scp->request_bufflen) 503 sg_count = hptiop_buildsgl(scp, req->sg_list);
533 sg_count = hptiop_buildsgl(scp, req->sg_list); 504 if (!sg_count)
534 else
535 HPT_SCP(scp)->mapped = 0; 505 HPT_SCP(scp)->mapped = 0;
536 506
537 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); 507 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
@@ -540,7 +510,7 @@ static int hptiop_queuecommand(struct scsi_cmnd *scp,
540 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | 510 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
541 (u32)_req->index); 511 (u32)_req->index);
542 req->header.context_hi32 = 0; 512 req->header.context_hi32 = 0;
543 req->dataxfer_length = cpu_to_le32(scp->request_bufflen); 513 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
544 req->channel = scp->device->channel; 514 req->channel = scp->device->channel;
545 req->target = scp->device->id; 515 req->target = scp->device->id;
546 req->lun = scp->device->lun; 516 req->lun = scp->device->lun;
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 0e57fb6964d5..4275d1b04ced 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -31,14 +31,21 @@
31#include <linux/mca.h> 31#include <linux/mca.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/mca-legacy.h>
35 34
36#include <asm/system.h> 35#include <asm/system.h>
37#include <asm/io.h> 36#include <asm/io.h>
38 37
39#include "scsi.h" 38#include "scsi.h"
40#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
41#include "ibmmca.h" 40
41/* Common forward declarations for all Linux-versions: */
42static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
43static int ibmmca_abort (Scsi_Cmnd *);
44static int ibmmca_host_reset (Scsi_Cmnd *);
45static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
46static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
47
48
42 49
43/* current version of this driver-source: */ 50/* current version of this driver-source: */
44#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac" 51#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
@@ -65,11 +72,11 @@
65#define IM_DEBUG_CMD_DEVICE TYPE_TAPE 72#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
66 73
67/* relative addresses of hardware registers on a subsystem */ 74/* relative addresses of hardware registers on a subsystem */
68#define IM_CMD_REG(hi) (hosts[(hi)]->io_port) /*Command Interface, (4 bytes long) */ 75#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
69#define IM_ATTN_REG(hi) (hosts[(hi)]->io_port+4) /*Attention (1 byte) */ 76#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
70#define IM_CTR_REG(hi) (hosts[(hi)]->io_port+5) /*Basic Control (1 byte) */ 77#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
71#define IM_INTR_REG(hi) (hosts[(hi)]->io_port+6) /*Interrupt Status (1 byte, r/o) */ 78#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
72#define IM_STAT_REG(hi) (hosts[(hi)]->io_port+7) /*Basic Status (1 byte, read only) */ 79#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
73 80
74/* basic I/O-port of first adapter */ 81/* basic I/O-port of first adapter */
75#define IM_IO_PORT 0x3540 82#define IM_IO_PORT 0x3540
@@ -266,30 +273,36 @@ static int global_adapter_speed = 0; /* full speed by default */
266 if ((display_mode & LED_ACTIVITY)||(!display_mode)) \ 273 if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
267 outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); } 274 outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
268 275
269/*list of supported subsystems */
270struct subsys_list_struct {
271 unsigned short mca_id;
272 char *description;
273};
274
275/* types of different supported hardware that goes to hostdata special */ 276/* types of different supported hardware that goes to hostdata special */
276#define IBM_SCSI2_FW 0 277#define IBM_SCSI2_FW 0
277#define IBM_7568_WCACHE 1 278#define IBM_7568_WCACHE 1
278#define IBM_EXP_UNIT 2 279#define IBM_EXP_UNIT 2
279#define IBM_SCSI_WCACHE 3 280#define IBM_SCSI_WCACHE 3
280#define IBM_SCSI 4 281#define IBM_SCSI 4
282#define IBM_INTEGSCSI 5
281 283
282/* other special flags for hostdata structure */ 284/* other special flags for hostdata structure */
283#define FORCED_DETECTION 100 285#define FORCED_DETECTION 100
284#define INTEGRATED_SCSI 101 286#define INTEGRATED_SCSI 101
285 287
286/* List of possible IBM-SCSI-adapters */ 288/* List of possible IBM-SCSI-adapters */
287static struct subsys_list_struct subsys_list[] = { 289static short ibmmca_id_table[] = {
288 {0x8efc, "IBM SCSI-2 F/W Adapter"}, /* special = 0 */ 290 0x8efc,
289 {0x8efd, "IBM 7568 Industrial Computer SCSI Adapter w/Cache"}, /* special = 1 */ 291 0x8efd,
290 {0x8ef8, "IBM Expansion Unit SCSI Controller"}, /* special = 2 */ 292 0x8ef8,
291 {0x8eff, "IBM SCSI Adapter w/Cache"}, /* special = 3 */ 293 0x8eff,
292 {0x8efe, "IBM SCSI Adapter"}, /* special = 4 */ 294 0x8efe,
295 /* No entry for integrated SCSI, that's part of the register */
296 0
297};
298
299static const char *ibmmca_description[] = {
300 "IBM SCSI-2 F/W Adapter", /* special = 0 */
301 "IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
302 "IBM Expansion Unit SCSI Controller", /* special = 2 */
303 "IBM SCSI Adapter w/Cache", /* special = 3 */
304 "IBM SCSI Adapter", /* special = 4 */
305 "IBM Integrated SCSI Controller", /* special = 5 */
293}; 306};
294 307
295/* Max number of logical devices (can be up from 0 to 14). 15 is the address 308/* Max number of logical devices (can be up from 0 to 14). 15 is the address
@@ -375,30 +388,30 @@ struct ibmmca_hostdata {
375}; 388};
376 389
377/* macros to access host data structure */ 390/* macros to access host data structure */
378#define subsystem_pun(hi) (hosts[(hi)]->this_id) 391#define subsystem_pun(h) ((h)->this_id)
379#define subsystem_maxid(hi) (hosts[(hi)]->max_id) 392#define subsystem_maxid(h) ((h)->max_id)
380#define ld(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_ld) 393#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
381#define get_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_ldn) 394#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
382#define get_scsi(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_scsi) 395#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
383#define local_checking_phase_flag(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_local_checking_phase_flag) 396#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
384#define got_interrupt(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_got_interrupt) 397#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
385#define stat_result(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_stat_result) 398#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
386#define reset_status(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_reset_status) 399#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
387#define last_scsi_command(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_command) 400#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
388#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type) 401#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
389#define last_scsi_blockcount(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_blockcount) 402#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
390#define last_scsi_logical_block(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_logical_block) 403#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
391#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type) 404#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
392#define next_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_next_ldn) 405#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
393#define IBM_DS(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_IBM_DS) 406#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
394#define special(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_special) 407#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
395#define subsystem_connector_size(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_connector_size) 408#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
396#define adapter_speed(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_adapter_speed) 409#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
397#define pos2(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[2]) 410#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
398#define pos3(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[3]) 411#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
399#define pos4(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[4]) 412#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
400#define pos5(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[5]) 413#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
401#define pos6(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[6]) 414#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
402 415
403/* Define a arbitrary number as subsystem-marker-type. This number is, as 416/* Define a arbitrary number as subsystem-marker-type. This number is, as
404 described in the ANSI-SCSI-standard, not occupied by other device-types. */ 417 described in the ANSI-SCSI-standard, not occupied by other device-types. */
@@ -459,11 +472,6 @@ MODULE_LICENSE("GPL");
459/*counter of concurrent disk read/writes, to turn on/off disk led */ 472/*counter of concurrent disk read/writes, to turn on/off disk led */
460static int disk_rw_in_progress = 0; 473static int disk_rw_in_progress = 0;
461 474
462/* host information */
463static int found = 0;
464static struct Scsi_Host *hosts[IM_MAX_HOSTS + 1] = {
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
466};
467static unsigned int pos[8]; /* whole pos register-line for diagnosis */ 475static unsigned int pos[8]; /* whole pos register-line for diagnosis */
468/* Taking into account the additions, made by ZP Gu. 476/* Taking into account the additions, made by ZP Gu.
469 * This selects now the preset value from the configfile and 477 * This selects now the preset value from the configfile and
@@ -474,70 +482,68 @@ static char ibm_ansi_order = 1;
474static char ibm_ansi_order = 0; 482static char ibm_ansi_order = 0;
475#endif 483#endif
476 484
477static void issue_cmd(int, unsigned long, unsigned char); 485static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
478static void internal_done(Scsi_Cmnd * cmd); 486static void internal_done(Scsi_Cmnd * cmd);
479static void check_devices(int, int); 487static void check_devices(struct Scsi_Host *, int);
480static int immediate_assign(int, unsigned int, unsigned int, unsigned int, unsigned int); 488static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
481static int immediate_feature(int, unsigned int, unsigned int); 489static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
482#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET 490#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
483static int immediate_reset(int, unsigned int); 491static int immediate_reset(struct Scsi_Host *, unsigned int);
484#endif 492#endif
485static int device_inquiry(int, int); 493static int device_inquiry(struct Scsi_Host *, int);
486static int read_capacity(int, int); 494static int read_capacity(struct Scsi_Host *, int);
487static int get_pos_info(int); 495static int get_pos_info(struct Scsi_Host *);
488static char *ti_p(int); 496static char *ti_p(int);
489static char *ti_l(int); 497static char *ti_l(int);
490static char *ibmrate(unsigned int, int); 498static char *ibmrate(unsigned int, int);
491static int probe_display(int); 499static int probe_display(int);
492static int probe_bus_mode(int); 500static int probe_bus_mode(struct Scsi_Host *);
493static int device_exists(int, int, int *, int *); 501static int device_exists(struct Scsi_Host *, int, int *, int *);
494static struct Scsi_Host *ibmmca_register(struct scsi_host_template *, int, int, int, char *);
495static int option_setup(char *); 502static int option_setup(char *);
496/* local functions needed for proc_info */ 503/* local functions needed for proc_info */
497static int ldn_access_load(int, int); 504static int ldn_access_load(struct Scsi_Host *, int);
498static int ldn_access_total_read_write(int); 505static int ldn_access_total_read_write(struct Scsi_Host *);
499 506
500static irqreturn_t interrupt_handler(int irq, void *dev_id) 507static irqreturn_t interrupt_handler(int irq, void *dev_id)
501{ 508{
502 int host_index, ihost_index;
503 unsigned int intr_reg; 509 unsigned int intr_reg;
504 unsigned int cmd_result; 510 unsigned int cmd_result;
505 unsigned int ldn; 511 unsigned int ldn;
512 unsigned long flags;
506 Scsi_Cmnd *cmd; 513 Scsi_Cmnd *cmd;
507 int lastSCSI; 514 int lastSCSI;
508 struct Scsi_Host *dev = dev_id; 515 struct device *dev = dev_id;
516 struct Scsi_Host *shpnt = dev_get_drvdata(dev);
509 517
510 spin_lock(dev->host_lock); 518 spin_lock_irqsave(shpnt->host_lock, flags);
511 /* search for one adapter-response on shared interrupt */ 519
512 for (host_index = 0; hosts[host_index] && !(inb(IM_STAT_REG(host_index)) & IM_INTR_REQUEST); host_index++); 520 if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
513 /* return if some other device on this IRQ caused the interrupt */ 521 spin_unlock_irqrestore(shpnt->host_lock, flags);
514 if (!hosts[host_index]) {
515 spin_unlock(dev->host_lock);
516 return IRQ_NONE; 522 return IRQ_NONE;
517 } 523 }
518 524
519 /* the reset-function already did all the job, even ints got 525 /* the reset-function already did all the job, even ints got
520 renabled on the subsystem, so just return */ 526 renabled on the subsystem, so just return */
521 if ((reset_status(host_index) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(host_index) == IM_RESET_FINISHED_OK_NO_INT)) { 527 if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
522 reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS; 528 reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
523 spin_unlock(dev->host_lock); 529 spin_unlock_irqrestore(shpnt->host_lock, flags);
524 return IRQ_HANDLED; 530 return IRQ_HANDLED;
525 } 531 }
526 532
527 /*must wait for attention reg not busy, then send EOI to subsystem */ 533 /*must wait for attention reg not busy, then send EOI to subsystem */
528 while (1) { 534 while (1) {
529 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 535 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
530 break; 536 break;
531 cpu_relax(); 537 cpu_relax();
532 } 538 }
533 ihost_index = host_index; 539
534 /*get command result and logical device */ 540 /*get command result and logical device */
535 intr_reg = (unsigned char) (inb(IM_INTR_REG(ihost_index))); 541 intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
536 cmd_result = intr_reg & 0xf0; 542 cmd_result = intr_reg & 0xf0;
537 ldn = intr_reg & 0x0f; 543 ldn = intr_reg & 0x0f;
538 /* get the last_scsi_command here */ 544 /* get the last_scsi_command here */
539 lastSCSI = last_scsi_command(ihost_index)[ldn]; 545 lastSCSI = last_scsi_command(shpnt)[ldn];
540 outb(IM_EOI | ldn, IM_ATTN_REG(ihost_index)); 546 outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
541 547
542 /*these should never happen (hw fails, or a local programming bug) */ 548 /*these should never happen (hw fails, or a local programming bug) */
543 if (!global_command_error_excuse) { 549 if (!global_command_error_excuse) {
@@ -547,38 +553,38 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
547 case IM_SOFTWARE_SEQUENCING_ERROR: 553 case IM_SOFTWARE_SEQUENCING_ERROR:
548 case IM_CMD_ERROR: 554 case IM_CMD_ERROR:
549 printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n"); 555 printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
550 printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(ihost_index)[ldn].scb.enable); 556 printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
551 if (ld(ihost_index)[ldn].cmd) 557 if (ld(shpnt)[ldn].cmd)
552 printk("%ld/%ld,", (long) (ld(ihost_index)[ldn].cmd->request_bufflen), (long) (ld(ihost_index)[ldn].scb.sys_buf_length)); 558 printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
553 else 559 else
554 printk("none,"); 560 printk("none,");
555 if (ld(ihost_index)[ldn].cmd) 561 if (ld(shpnt)[ldn].cmd)
556 printk("Blocksize=%d", ld(ihost_index)[ldn].scb.u2.blk.length); 562 printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
557 else 563 else
558 printk("Blocksize=none"); 564 printk("Blocksize=none");
559 printk(", host=0x%x, ldn=0x%x\n", ihost_index, ldn); 565 printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
560 if (ld(ihost_index)[ldn].cmd) { 566 if (ld(shpnt)[ldn].cmd) {
561 printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u2.blk.count); 567 printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
562 printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u1.log_blk_adr); 568 printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
563 } 569 }
564 printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN"); 570 printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
565 /* if errors appear, enter this section to give detailed info */ 571 /* if errors appear, enter this section to give detailed info */
566 printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n"); 572 printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
567 printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(ihost_index)[ldn]); 573 printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
568 printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(ihost_index))); 574 printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
569 printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(ihost_index))); 575 printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
570 printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg); 576 printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
571 printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(ihost_index))); 577 printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
572 if ((last_scsi_type(ihost_index)[ldn] == IM_SCB) || (last_scsi_type(ihost_index)[ldn] == IM_LONG_SCB)) { 578 if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
573 printk(KERN_ERR " SCB-Command.................: %x\n", ld(ihost_index)[ldn].scb.command); 579 printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
574 printk(KERN_ERR " SCB-Enable..................: %x\n", ld(ihost_index)[ldn].scb.enable); 580 printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
575 printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(ihost_index)[ldn].scb.u1.log_blk_adr); 581 printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
576 printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_adr); 582 printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
577 printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_length); 583 printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
578 printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(ihost_index)[ldn].scb.tsb_adr); 584 printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
579 printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(ihost_index)[ldn].scb.scb_chain_adr); 585 printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
580 printk(KERN_ERR " SCB-block count.............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.count); 586 printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
581 printk(KERN_ERR " SCB-block length............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.length); 587 printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
582 } 588 }
583 printk(KERN_ERR " Send this report to the maintainer.\n"); 589 printk(KERN_ERR " Send this report to the maintainer.\n");
584 panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result); 590 panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
@@ -600,72 +606,73 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
600 } 606 }
601 } 607 }
602 /* if no panic appeared, increase the interrupt-counter */ 608 /* if no panic appeared, increase the interrupt-counter */
603 IBM_DS(ihost_index).total_interrupts++; 609 IBM_DS(shpnt).total_interrupts++;
604 /*only for local checking phase */ 610 /*only for local checking phase */
605 if (local_checking_phase_flag(ihost_index)) { 611 if (local_checking_phase_flag(shpnt)) {
606 stat_result(ihost_index) = cmd_result; 612 stat_result(shpnt) = cmd_result;
607 got_interrupt(ihost_index) = 1; 613 got_interrupt(shpnt) = 1;
608 reset_status(ihost_index) = IM_RESET_FINISHED_OK; 614 reset_status(shpnt) = IM_RESET_FINISHED_OK;
609 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 615 last_scsi_command(shpnt)[ldn] = NO_SCSI;
610 spin_unlock(dev->host_lock); 616 spin_unlock_irqrestore(shpnt->host_lock, flags);
611 return IRQ_HANDLED; 617 return IRQ_HANDLED;
612 } 618 }
613 /* handling of commands coming from upper level of scsi driver */ 619 /* handling of commands coming from upper level of scsi driver */
614 if (last_scsi_type(ihost_index)[ldn] == IM_IMM_CMD) { 620 if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
615 /* verify ldn, and may handle rare reset immediate command */ 621 /* verify ldn, and may handle rare reset immediate command */
616 if ((reset_status(ihost_index) == IM_RESET_IN_PROGRESS) && (last_scsi_command(ihost_index)[ldn] == IM_RESET_IMM_CMD)) { 622 if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
617 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) { 623 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
618 disk_rw_in_progress = 0; 624 disk_rw_in_progress = 0;
619 PS2_DISK_LED_OFF(); 625 PS2_DISK_LED_OFF();
620 reset_status(ihost_index) = IM_RESET_FINISHED_FAIL; 626 reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
621 } else { 627 } else {
622 /*reset disk led counter, turn off disk led */ 628 /*reset disk led counter, turn off disk led */
623 disk_rw_in_progress = 0; 629 disk_rw_in_progress = 0;
624 PS2_DISK_LED_OFF(); 630 PS2_DISK_LED_OFF();
625 reset_status(ihost_index) = IM_RESET_FINISHED_OK; 631 reset_status(shpnt) = IM_RESET_FINISHED_OK;
626 } 632 }
627 stat_result(ihost_index) = cmd_result; 633 stat_result(shpnt) = cmd_result;
628 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 634 last_scsi_command(shpnt)[ldn] = NO_SCSI;
629 last_scsi_type(ihost_index)[ldn] = 0; 635 last_scsi_type(shpnt)[ldn] = 0;
630 spin_unlock(dev->host_lock); 636 spin_unlock_irqrestore(shpnt->host_lock, flags);
631 return IRQ_HANDLED; 637 return IRQ_HANDLED;
632 } else if (last_scsi_command(ihost_index)[ldn] == IM_ABORT_IMM_CMD) { 638 } else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
633 /* react on SCSI abort command */ 639 /* react on SCSI abort command */
634#ifdef IM_DEBUG_PROBE 640#ifdef IM_DEBUG_PROBE
635 printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n"); 641 printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
636#endif 642#endif
637 disk_rw_in_progress = 0; 643 disk_rw_in_progress = 0;
638 PS2_DISK_LED_OFF(); 644 PS2_DISK_LED_OFF();
639 cmd = ld(ihost_index)[ldn].cmd; 645 cmd = ld(shpnt)[ldn].cmd;
640 ld(ihost_index)[ldn].cmd = NULL; 646 ld(shpnt)[ldn].cmd = NULL;
641 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) 647 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
642 cmd->result = DID_NO_CONNECT << 16; 648 cmd->result = DID_NO_CONNECT << 16;
643 else 649 else
644 cmd->result = DID_ABORT << 16; 650 cmd->result = DID_ABORT << 16;
645 stat_result(ihost_index) = cmd_result; 651 stat_result(shpnt) = cmd_result;
646 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 652 last_scsi_command(shpnt)[ldn] = NO_SCSI;
647 last_scsi_type(ihost_index)[ldn] = 0; 653 last_scsi_type(shpnt)[ldn] = 0;
648 if (cmd->scsi_done) 654 if (cmd->scsi_done)
649 (cmd->scsi_done) (cmd); /* should be the internal_done */ 655 (cmd->scsi_done) (cmd); /* should be the internal_done */
650 spin_unlock(dev->host_lock); 656 spin_unlock_irqrestore(shpnt->host_lock, flags);
651 return IRQ_HANDLED; 657 return IRQ_HANDLED;
652 } else { 658 } else {
653 disk_rw_in_progress = 0; 659 disk_rw_in_progress = 0;
654 PS2_DISK_LED_OFF(); 660 PS2_DISK_LED_OFF();
655 reset_status(ihost_index) = IM_RESET_FINISHED_OK; 661 reset_status(shpnt) = IM_RESET_FINISHED_OK;
656 stat_result(ihost_index) = cmd_result; 662 stat_result(shpnt) = cmd_result;
657 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 663 last_scsi_command(shpnt)[ldn] = NO_SCSI;
658 spin_unlock(dev->host_lock); 664 spin_unlock_irqrestore(shpnt->host_lock, flags);
659 return IRQ_HANDLED; 665 return IRQ_HANDLED;
660 } 666 }
661 } 667 }
662 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 668 last_scsi_command(shpnt)[ldn] = NO_SCSI;
663 last_scsi_type(ihost_index)[ldn] = 0; 669 last_scsi_type(shpnt)[ldn] = 0;
664 cmd = ld(ihost_index)[ldn].cmd; 670 cmd = ld(shpnt)[ldn].cmd;
665 ld(ihost_index)[ldn].cmd = NULL; 671 ld(shpnt)[ldn].cmd = NULL;
666#ifdef IM_DEBUG_TIMEOUT 672#ifdef IM_DEBUG_TIMEOUT
667 if (cmd) { 673 if (cmd) {
668 if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) { 674 if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
675 spin_unlock_irqsave(shpnt->host_lock, flags);
669 printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun); 676 printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
670 return IRQ_HANDLED; 677 return IRQ_HANDLED;
671 } 678 }
@@ -674,15 +681,15 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
674 /*if no command structure, just return, else clear cmd */ 681 /*if no command structure, just return, else clear cmd */
675 if (!cmd) 682 if (!cmd)
676 { 683 {
677 spin_unlock(dev->host_lock); 684 spin_unlock_irqrestore(shpnt->host_lock, flags);
678 return IRQ_HANDLED; 685 return IRQ_HANDLED;
679 } 686 }
680 687
681#ifdef IM_DEBUG_INT 688#ifdef IM_DEBUG_INT
682 printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(ihost_index)[ldn].tsb.dev_status, ld(ihost_index)[ldn].tsb.cmd_status, ld(ihost_index)[ldn].tsb.dev_error, ld(ihost_index)[ldn].tsb.cmd_error); 689 printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
683#endif 690#endif
684 /*if this is end of media read/write, may turn off PS/2 disk led */ 691 /*if this is end of media read/write, may turn off PS/2 disk led */
685 if ((ld(ihost_index)[ldn].device_type != TYPE_NO_LUN) && (ld(ihost_index)[ldn].device_type != TYPE_NO_DEVICE)) { 692 if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
686 /* only access this, if there was a valid device addressed */ 693 /* only access this, if there was a valid device addressed */
687 if (--disk_rw_in_progress == 0) 694 if (--disk_rw_in_progress == 0)
688 PS2_DISK_LED_OFF(); 695 PS2_DISK_LED_OFF();
@@ -693,8 +700,8 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
693 * adapters do not support CMD_TERMINATED, TASK_SET_FULL and 700 * adapters do not support CMD_TERMINATED, TASK_SET_FULL and
694 * ACA_ACTIVE as returning statusbyte information. (ML) */ 701 * ACA_ACTIVE as returning statusbyte information. (ML) */
695 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) { 702 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
696 cmd->result = (unsigned char) (ld(ihost_index)[ldn].tsb.dev_status & 0x1e); 703 cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
697 IBM_DS(ihost_index).total_errors++; 704 IBM_DS(shpnt).total_errors++;
698 } else 705 } else
699 cmd->result = 0; 706 cmd->result = 0;
700 /* write device status into cmd->result, and call done function */ 707 /* write device status into cmd->result, and call done function */
@@ -705,24 +712,25 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
705 cmd->result |= DID_OK << 16; 712 cmd->result |= DID_OK << 16;
706 if (cmd->scsi_done) 713 if (cmd->scsi_done)
707 (cmd->scsi_done) (cmd); 714 (cmd->scsi_done) (cmd);
708 spin_unlock(dev->host_lock); 715 spin_unlock_irqrestore(shpnt->host_lock, flags);
709 return IRQ_HANDLED; 716 return IRQ_HANDLED;
710} 717}
711 718
712static void issue_cmd(int host_index, unsigned long cmd_reg, unsigned char attn_reg) 719static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
720 unsigned char attn_reg)
713{ 721{
714 unsigned long flags; 722 unsigned long flags;
715 /* must wait for attention reg not busy */ 723 /* must wait for attention reg not busy */
716 while (1) { 724 while (1) {
717 spin_lock_irqsave(hosts[host_index]->host_lock, flags); 725 spin_lock_irqsave(shpnt->host_lock, flags);
718 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 726 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
719 break; 727 break;
720 spin_unlock_irqrestore(hosts[host_index]->host_lock, flags); 728 spin_unlock_irqrestore(shpnt->host_lock, flags);
721 } 729 }
722 /* write registers and enable system interrupts */ 730 /* write registers and enable system interrupts */
723 outl(cmd_reg, IM_CMD_REG(host_index)); 731 outl(cmd_reg, IM_CMD_REG(shpnt));
724 outb(attn_reg, IM_ATTN_REG(host_index)); 732 outb(attn_reg, IM_ATTN_REG(shpnt));
725 spin_unlock_irqrestore(hosts[host_index]->host_lock, flags); 733 spin_unlock_irqrestore(shpnt->host_lock, flags);
726} 734}
727 735
728static void internal_done(Scsi_Cmnd * cmd) 736static void internal_done(Scsi_Cmnd * cmd)
@@ -732,34 +740,34 @@ static void internal_done(Scsi_Cmnd * cmd)
732} 740}
733 741
734/* SCSI-SCB-command for device_inquiry */ 742/* SCSI-SCB-command for device_inquiry */
735static int device_inquiry(int host_index, int ldn) 743static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
736{ 744{
737 int retr; 745 int retr;
738 struct im_scb *scb; 746 struct im_scb *scb;
739 struct im_tsb *tsb; 747 struct im_tsb *tsb;
740 unsigned char *buf; 748 unsigned char *buf;
741 749
742 scb = &(ld(host_index)[ldn].scb); 750 scb = &(ld(shpnt)[ldn].scb);
743 tsb = &(ld(host_index)[ldn].tsb); 751 tsb = &(ld(shpnt)[ldn].tsb);
744 buf = (unsigned char *) (&(ld(host_index)[ldn].buf)); 752 buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
745 ld(host_index)[ldn].tsb.dev_status = 0; /* prepare statusblock */ 753 ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
746 for (retr = 0; retr < 3; retr++) { 754 for (retr = 0; retr < 3; retr++) {
747 /* fill scb with inquiry command */ 755 /* fill scb with inquiry command */
748 scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT; 756 scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
749 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER; 757 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
750 last_scsi_command(host_index)[ldn] = IM_DEVICE_INQUIRY_CMD; 758 last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
751 last_scsi_type(host_index)[ldn] = IM_SCB; 759 last_scsi_type(shpnt)[ldn] = IM_SCB;
752 scb->sys_buf_adr = isa_virt_to_bus(buf); 760 scb->sys_buf_adr = isa_virt_to_bus(buf);
753 scb->sys_buf_length = 255; /* maximum bufferlength gives max info */ 761 scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
754 scb->tsb_adr = isa_virt_to_bus(tsb); 762 scb->tsb_adr = isa_virt_to_bus(tsb);
755 /* issue scb to passed ldn, and busy wait for interrupt */ 763 /* issue scb to passed ldn, and busy wait for interrupt */
756 got_interrupt(host_index) = 0; 764 got_interrupt(shpnt) = 0;
757 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn); 765 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
758 while (!got_interrupt(host_index)) 766 while (!got_interrupt(shpnt))
759 barrier(); 767 barrier();
760 768
761 /*if command successful, break */ 769 /*if command successful, break */
762 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 770 if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
763 return 1; 771 return 1;
764 } 772 }
765 /*if all three retries failed, return "no device at this ldn" */ 773 /*if all three retries failed, return "no device at this ldn" */
@@ -769,34 +777,34 @@ static int device_inquiry(int host_index, int ldn)
769 return 1; 777 return 1;
770} 778}
771 779
772static int read_capacity(int host_index, int ldn) 780static int read_capacity(struct Scsi_Host *shpnt, int ldn)
773{ 781{
774 int retr; 782 int retr;
775 struct im_scb *scb; 783 struct im_scb *scb;
776 struct im_tsb *tsb; 784 struct im_tsb *tsb;
777 unsigned char *buf; 785 unsigned char *buf;
778 786
779 scb = &(ld(host_index)[ldn].scb); 787 scb = &(ld(shpnt)[ldn].scb);
780 tsb = &(ld(host_index)[ldn].tsb); 788 tsb = &(ld(shpnt)[ldn].tsb);
781 buf = (unsigned char *) (&(ld(host_index)[ldn].buf)); 789 buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
782 ld(host_index)[ldn].tsb.dev_status = 0; 790 ld(shpnt)[ldn].tsb.dev_status = 0;
783 for (retr = 0; retr < 3; retr++) { 791 for (retr = 0; retr < 3; retr++) {
784 /*fill scb with read capacity command */ 792 /*fill scb with read capacity command */
785 scb->command = IM_READ_CAPACITY_CMD; 793 scb->command = IM_READ_CAPACITY_CMD;
786 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER; 794 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
787 last_scsi_command(host_index)[ldn] = IM_READ_CAPACITY_CMD; 795 last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
788 last_scsi_type(host_index)[ldn] = IM_SCB; 796 last_scsi_type(shpnt)[ldn] = IM_SCB;
789 scb->sys_buf_adr = isa_virt_to_bus(buf); 797 scb->sys_buf_adr = isa_virt_to_bus(buf);
790 scb->sys_buf_length = 8; 798 scb->sys_buf_length = 8;
791 scb->tsb_adr = isa_virt_to_bus(tsb); 799 scb->tsb_adr = isa_virt_to_bus(tsb);
792 /*issue scb to passed ldn, and busy wait for interrupt */ 800 /*issue scb to passed ldn, and busy wait for interrupt */
793 got_interrupt(host_index) = 0; 801 got_interrupt(shpnt) = 0;
794 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn); 802 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
795 while (!got_interrupt(host_index)) 803 while (!got_interrupt(shpnt))
796 barrier(); 804 barrier();
797 805
798 /*if got capacity, get block length and return one device found */ 806 /*if got capacity, get block length and return one device found */
799 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 807 if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
800 return 1; 808 return 1;
801 } 809 }
802 /*if all three retries failed, return "no device at this ldn" */ 810 /*if all three retries failed, return "no device at this ldn" */
@@ -806,39 +814,39 @@ static int read_capacity(int host_index, int ldn)
806 return 1; 814 return 1;
807} 815}
808 816
809static int get_pos_info(int host_index) 817static int get_pos_info(struct Scsi_Host *shpnt)
810{ 818{
811 int retr; 819 int retr;
812 struct im_scb *scb; 820 struct im_scb *scb;
813 struct im_tsb *tsb; 821 struct im_tsb *tsb;
814 unsigned char *buf; 822 unsigned char *buf;
815 823
816 scb = &(ld(host_index)[MAX_LOG_DEV].scb); 824 scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
817 tsb = &(ld(host_index)[MAX_LOG_DEV].tsb); 825 tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
818 buf = (unsigned char *) (&(ld(host_index)[MAX_LOG_DEV].buf)); 826 buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
819 ld(host_index)[MAX_LOG_DEV].tsb.dev_status = 0; 827 ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
820 for (retr = 0; retr < 3; retr++) { 828 for (retr = 0; retr < 3; retr++) {
821 /*fill scb with get_pos_info command */ 829 /*fill scb with get_pos_info command */
822 scb->command = IM_GET_POS_INFO_CMD; 830 scb->command = IM_GET_POS_INFO_CMD;
823 scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER; 831 scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
824 last_scsi_command(host_index)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD; 832 last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
825 last_scsi_type(host_index)[MAX_LOG_DEV] = IM_SCB; 833 last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
826 scb->sys_buf_adr = isa_virt_to_bus(buf); 834 scb->sys_buf_adr = isa_virt_to_bus(buf);
827 if (special(host_index) == IBM_SCSI2_FW) 835 if (special(shpnt) == IBM_SCSI2_FW)
828 scb->sys_buf_length = 256; /* get all info from F/W adapter */ 836 scb->sys_buf_length = 256; /* get all info from F/W adapter */
829 else 837 else
830 scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */ 838 scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
831 scb->tsb_adr = isa_virt_to_bus(tsb); 839 scb->tsb_adr = isa_virt_to_bus(tsb);
832 /*issue scb to ldn=15, and busy wait for interrupt */ 840 /*issue scb to ldn=15, and busy wait for interrupt */
833 got_interrupt(host_index) = 0; 841 got_interrupt(shpnt) = 0;
834 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV); 842 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
835 843
836 /* FIXME: timeout */ 844 /* FIXME: timeout */
837 while (!got_interrupt(host_index)) 845 while (!got_interrupt(shpnt))
838 barrier(); 846 barrier();
839 847
840 /*if got POS-stuff, get block length and return one device found */ 848 /*if got POS-stuff, get block length and return one device found */
841 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 849 if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
842 return 1; 850 return 1;
843 } 851 }
844 /* if all three retries failed, return "no device at this ldn" */ 852 /* if all three retries failed, return "no device at this ldn" */
@@ -851,14 +859,16 @@ static int get_pos_info(int host_index)
851/* SCSI-immediate-command for assign. This functions maps/unmaps specific 859/* SCSI-immediate-command for assign. This functions maps/unmaps specific
852 ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the 860 ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
853 subsystem and for dynamical remapping od ldns. */ 861 subsystem and for dynamical remapping od ldns. */
854static int immediate_assign(int host_index, unsigned int pun, unsigned int lun, unsigned int ldn, unsigned int operation) 862static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
863 unsigned int lun, unsigned int ldn,
864 unsigned int operation)
855{ 865{
856 int retr; 866 int retr;
857 unsigned long imm_cmd; 867 unsigned long imm_cmd;
858 868
859 for (retr = 0; retr < 3; retr++) { 869 for (retr = 0; retr < 3; retr++) {
860 /* select mutation level of the SCSI-adapter */ 870 /* select mutation level of the SCSI-adapter */
861 switch (special(host_index)) { 871 switch (special(shpnt)) {
862 case IBM_SCSI2_FW: 872 case IBM_SCSI2_FW:
863 imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD); 873 imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
864 imm_cmd |= (unsigned long) ((lun & 7) << 24); 874 imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -867,7 +877,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
867 imm_cmd |= (unsigned long) ((ldn & 15) << 16); 877 imm_cmd |= (unsigned long) ((ldn & 15) << 16);
868 break; 878 break;
869 default: 879 default:
870 imm_cmd = inl(IM_CMD_REG(host_index)); 880 imm_cmd = inl(IM_CMD_REG(shpnt));
871 imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */ 881 imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
872 imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD); 882 imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
873 imm_cmd |= (unsigned long) ((lun & 7) << 24); 883 imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -876,15 +886,15 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
876 imm_cmd |= (unsigned long) ((ldn & 15) << 16); 886 imm_cmd |= (unsigned long) ((ldn & 15) << 16);
877 break; 887 break;
878 } 888 }
879 last_scsi_command(host_index)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD; 889 last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
880 last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD; 890 last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
881 got_interrupt(host_index) = 0; 891 got_interrupt(shpnt) = 0;
882 issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV); 892 issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
883 while (!got_interrupt(host_index)) 893 while (!got_interrupt(shpnt))
884 barrier(); 894 barrier();
885 895
886 /*if command successful, break */ 896 /*if command successful, break */
887 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 897 if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
888 return 1; 898 return 1;
889 } 899 }
890 if (retr >= 3) 900 if (retr >= 3)
@@ -893,7 +903,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
893 return 1; 903 return 1;
894} 904}
895 905
896static int immediate_feature(int host_index, unsigned int speed, unsigned int timeout) 906static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
897{ 907{
898 int retr; 908 int retr;
899 unsigned long imm_cmd; 909 unsigned long imm_cmd;
@@ -903,16 +913,16 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
903 imm_cmd = IM_FEATURE_CTR_IMM_CMD; 913 imm_cmd = IM_FEATURE_CTR_IMM_CMD;
904 imm_cmd |= (unsigned long) ((speed & 0x7) << 29); 914 imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
905 imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16); 915 imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
906 last_scsi_command(host_index)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD; 916 last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
907 last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD; 917 last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
908 got_interrupt(host_index) = 0; 918 got_interrupt(shpnt) = 0;
909 /* we need to run into command errors in order to probe for the 919 /* we need to run into command errors in order to probe for the
910 * right speed! */ 920 * right speed! */
911 global_command_error_excuse = 1; 921 global_command_error_excuse = 1;
912 issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV); 922 issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
913 923
914 /* FIXME: timeout */ 924 /* FIXME: timeout */
915 while (!got_interrupt(host_index)) 925 while (!got_interrupt(shpnt))
916 barrier(); 926 barrier();
917 if (global_command_error_excuse == CMD_FAIL) { 927 if (global_command_error_excuse == CMD_FAIL) {
918 global_command_error_excuse = 0; 928 global_command_error_excuse = 0;
@@ -920,7 +930,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
920 } else 930 } else
921 global_command_error_excuse = 0; 931 global_command_error_excuse = 0;
922 /*if command successful, break */ 932 /*if command successful, break */
923 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 933 if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
924 return 1; 934 return 1;
925 } 935 }
926 if (retr >= 3) 936 if (retr >= 3)
@@ -930,35 +940,35 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
930} 940}
931 941
932#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET 942#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
933static int immediate_reset(int host_index, unsigned int ldn) 943static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
934{ 944{
935 int retries; 945 int retries;
936 int ticks; 946 int ticks;
937 unsigned long imm_command; 947 unsigned long imm_command;
938 948
939 for (retries = 0; retries < 3; retries++) { 949 for (retries = 0; retries < 3; retries++) {
940 imm_command = inl(IM_CMD_REG(host_index)); 950 imm_command = inl(IM_CMD_REG(shpnt));
941 imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */ 951 imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
942 imm_command |= (unsigned long) (IM_RESET_IMM_CMD); 952 imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
943 last_scsi_command(host_index)[ldn] = IM_RESET_IMM_CMD; 953 last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
944 last_scsi_type(host_index)[ldn] = IM_IMM_CMD; 954 last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
945 got_interrupt(host_index) = 0; 955 got_interrupt(shpnt) = 0;
946 reset_status(host_index) = IM_RESET_IN_PROGRESS; 956 reset_status(shpnt) = IM_RESET_IN_PROGRESS;
947 issue_cmd(host_index, (unsigned long) (imm_command), IM_IMM_CMD | ldn); 957 issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
948 ticks = IM_RESET_DELAY * HZ; 958 ticks = IM_RESET_DELAY * HZ;
949 while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks) { 959 while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
950 udelay((1 + 999 / HZ) * 1000); 960 udelay((1 + 999 / HZ) * 1000);
951 barrier(); 961 barrier();
952 } 962 }
953 /* if reset did not complete, just complain */ 963 /* if reset did not complete, just complain */
954 if (!ticks) { 964 if (!ticks) {
955 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY); 965 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
956 reset_status(host_index) = IM_RESET_FINISHED_OK; 966 reset_status(shpnt) = IM_RESET_FINISHED_OK;
957 /* did not work, finish */ 967 /* did not work, finish */
958 return 1; 968 return 1;
959 } 969 }
960 /*if command successful, break */ 970 /*if command successful, break */
961 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 971 if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
962 return 1; 972 return 1;
963 } 973 }
964 if (retries >= 3) 974 if (retries >= 3)
@@ -1060,35 +1070,35 @@ static int probe_display(int what)
1060 return 0; 1070 return 0;
1061} 1071}
1062 1072
1063static int probe_bus_mode(int host_index) 1073static int probe_bus_mode(struct Scsi_Host *shpnt)
1064{ 1074{
1065 struct im_pos_info *info; 1075 struct im_pos_info *info;
1066 int num_bus = 0; 1076 int num_bus = 0;
1067 int ldn; 1077 int ldn;
1068 1078
1069 info = (struct im_pos_info *) (&(ld(host_index)[MAX_LOG_DEV].buf)); 1079 info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
1070 if (get_pos_info(host_index)) { 1080 if (get_pos_info(shpnt)) {
1071 if (info->connector_size & 0xf000) 1081 if (info->connector_size & 0xf000)
1072 subsystem_connector_size(host_index) = 16; 1082 subsystem_connector_size(shpnt) = 16;
1073 else 1083 else
1074 subsystem_connector_size(host_index) = 32; 1084 subsystem_connector_size(shpnt) = 32;
1075 num_bus |= (info->pos_4b & 8) >> 3; 1085 num_bus |= (info->pos_4b & 8) >> 3;
1076 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1086 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1077 if ((special(host_index) == IBM_SCSI_WCACHE) || (special(host_index) == IBM_7568_WCACHE)) { 1087 if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
1078 if (!((info->cache_stat >> ldn) & 1)) 1088 if (!((info->cache_stat >> ldn) & 1))
1079 ld(host_index)[ldn].cache_flag = 0; 1089 ld(shpnt)[ldn].cache_flag = 0;
1080 } 1090 }
1081 if (!((info->retry_stat >> ldn) & 1)) 1091 if (!((info->retry_stat >> ldn) & 1))
1082 ld(host_index)[ldn].retry_flag = 0; 1092 ld(shpnt)[ldn].retry_flag = 0;
1083 } 1093 }
1084#ifdef IM_DEBUG_PROBE 1094#ifdef IM_DEBUG_PROBE
1085 printk("IBM MCA SCSI: SCSI-Cache bits: "); 1095 printk("IBM MCA SCSI: SCSI-Cache bits: ");
1086 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1096 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1087 printk("%d", ld(host_index)[ldn].cache_flag); 1097 printk("%d", ld(shpnt)[ldn].cache_flag);
1088 } 1098 }
1089 printk("\nIBM MCA SCSI: SCSI-Retry bits: "); 1099 printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
1090 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1100 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1091 printk("%d", ld(host_index)[ldn].retry_flag); 1101 printk("%d", ld(shpnt)[ldn].retry_flag);
1092 } 1102 }
1093 printk("\n"); 1103 printk("\n");
1094#endif 1104#endif
@@ -1097,7 +1107,7 @@ static int probe_bus_mode(int host_index)
1097} 1107}
1098 1108
1099/* probing scsi devices */ 1109/* probing scsi devices */
1100static void check_devices(int host_index, int adaptertype) 1110static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
1101{ 1111{
1102 int id, lun, ldn, ticks; 1112 int id, lun, ldn, ticks;
1103 int count_devices; /* local counter for connected device */ 1113 int count_devices; /* local counter for connected device */
@@ -1108,24 +1118,24 @@ static void check_devices(int host_index, int adaptertype)
1108 /* assign default values to certain variables */ 1118 /* assign default values to certain variables */
1109 ticks = 0; 1119 ticks = 0;
1110 count_devices = 0; 1120 count_devices = 0;
1111 IBM_DS(host_index).dyn_flag = 0; /* normally no need for dynamical ldn management */ 1121 IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
1112 IBM_DS(host_index).total_errors = 0; /* set errorcounter to 0 */ 1122 IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
1113 next_ldn(host_index) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */ 1123 next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
1114 1124
1115 /* initialize the very important driver-informational arrays/structs */ 1125 /* initialize the very important driver-informational arrays/structs */
1116 memset(ld(host_index), 0, sizeof(ld(host_index))); 1126 memset(ld(shpnt), 0, sizeof(ld(shpnt)));
1117 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1127 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1118 last_scsi_command(host_index)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */ 1128 last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
1119 last_scsi_type(host_index)[ldn] = 0; 1129 last_scsi_type(shpnt)[ldn] = 0;
1120 ld(host_index)[ldn].cache_flag = 1; 1130 ld(shpnt)[ldn].cache_flag = 1;
1121 ld(host_index)[ldn].retry_flag = 1; 1131 ld(shpnt)[ldn].retry_flag = 1;
1122 } 1132 }
1123 memset(get_ldn(host_index), TYPE_NO_DEVICE, sizeof(get_ldn(host_index))); /* this is essential ! */ 1133 memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
1124 memset(get_scsi(host_index), TYPE_NO_DEVICE, sizeof(get_scsi(host_index))); /* this is essential ! */ 1134 memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
1125 for (lun = 0; lun < 8; lun++) { 1135 for (lun = 0; lun < 8; lun++) {
1126 /* mark the adapter at its pun on all luns */ 1136 /* mark the adapter at its pun on all luns */
1127 get_scsi(host_index)[subsystem_pun(host_index)][lun] = TYPE_IBM_SCSI_ADAPTER; 1137 get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
1128 get_ldn(host_index)[subsystem_pun(host_index)][lun] = MAX_LOG_DEV; /* make sure, the subsystem 1138 get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
1129 ldn is active for all 1139 ldn is active for all
1130 luns. */ 1140 luns. */
1131 } 1141 }
@@ -1134,9 +1144,9 @@ static void check_devices(int host_index, int adaptertype)
1134 /* monitor connected on model XX95. */ 1144 /* monitor connected on model XX95. */
1135 1145
1136 /* STEP 1: */ 1146 /* STEP 1: */
1137 adapter_speed(host_index) = global_adapter_speed; 1147 adapter_speed(shpnt) = global_adapter_speed;
1138 speedrun = adapter_speed(host_index); 1148 speedrun = adapter_speed(shpnt);
1139 while (immediate_feature(host_index, speedrun, adapter_timeout) == 2) { 1149 while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
1140 probe_display(1); 1150 probe_display(1);
1141 if (speedrun == 7) 1151 if (speedrun == 7)
1142 panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n"); 1152 panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
@@ -1144,30 +1154,30 @@ static void check_devices(int host_index, int adaptertype)
1144 if (speedrun > 7) 1154 if (speedrun > 7)
1145 speedrun = 7; 1155 speedrun = 7;
1146 } 1156 }
1147 adapter_speed(host_index) = speedrun; 1157 adapter_speed(shpnt) = speedrun;
1148 /* Get detailed information about the current adapter, necessary for 1158 /* Get detailed information about the current adapter, necessary for
1149 * device operations: */ 1159 * device operations: */
1150 num_bus = probe_bus_mode(host_index); 1160 num_bus = probe_bus_mode(shpnt);
1151 1161
1152 /* num_bus contains only valid data for the F/W adapter! */ 1162 /* num_bus contains only valid data for the F/W adapter! */
1153 if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */ 1163 if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
1154 /* F/W adapter PUN-space extension evaluation: */ 1164 /* F/W adapter PUN-space extension evaluation: */
1155 if (num_bus) { 1165 if (num_bus) {
1156 printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n"); 1166 printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
1157 subsystem_maxid(host_index) = 16; 1167 subsystem_maxid(shpnt) = 16;
1158 } else { 1168 } else {
1159 printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n"); 1169 printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
1160 subsystem_maxid(host_index) = 8; 1170 subsystem_maxid(shpnt) = 8;
1161 } 1171 }
1162 printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype)); 1172 printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
1163 } else /* all other IBM SCSI adapters: */ 1173 } else /* all other IBM SCSI adapters: */
1164 printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype)); 1174 printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
1165 1175
1166 /* assign correct PUN device space */ 1176 /* assign correct PUN device space */
1167 max_pun = subsystem_maxid(host_index); 1177 max_pun = subsystem_maxid(shpnt);
1168 1178
1169#ifdef IM_DEBUG_PROBE 1179#ifdef IM_DEBUG_PROBE
1170 printk("IBM MCA SCSI: Current SCSI-host index: %d\n", host_index); 1180 printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
1171 printk("IBM MCA SCSI: Removing default logical SCSI-device mapping."); 1181 printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
1172#else 1182#else
1173 printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New"); 1183 printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
@@ -1177,7 +1187,7 @@ static void check_devices(int host_index, int adaptertype)
1177#ifdef IM_DEBUG_PROBE 1187#ifdef IM_DEBUG_PROBE
1178 printk("."); 1188 printk(".");
1179#endif 1189#endif
1180 immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */ 1190 immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
1181 } 1191 }
1182 lun = 0; /* default lun is 0 */ 1192 lun = 0; /* default lun is 0 */
1183#ifndef IM_DEBUG_PROBE 1193#ifndef IM_DEBUG_PROBE
@@ -1196,18 +1206,18 @@ static void check_devices(int host_index, int adaptertype)
1196#ifdef IM_DEBUG_PROBE 1206#ifdef IM_DEBUG_PROBE
1197 printk("."); 1207 printk(".");
1198#endif 1208#endif
1199 if (id != subsystem_pun(host_index)) { 1209 if (id != subsystem_pun(shpnt)) {
1200 /* if pun is not the adapter: */ 1210 /* if pun is not the adapter: */
1201 /* set ldn=0 to pun,lun */ 1211 /* set ldn=0 to pun,lun */
1202 immediate_assign(host_index, id, lun, PROBE_LDN, SET_LDN); 1212 immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
1203 if (device_inquiry(host_index, PROBE_LDN)) { /* probe device */ 1213 if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
1204 get_scsi(host_index)[id][lun] = (unsigned char) (ld(host_index)[PROBE_LDN].buf[0]); 1214 get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
1205 /* entry, even for NO_LUN */ 1215 /* entry, even for NO_LUN */
1206 if (ld(host_index)[PROBE_LDN].buf[0] != TYPE_NO_LUN) 1216 if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
1207 count_devices++; /* a existing device is found */ 1217 count_devices++; /* a existing device is found */
1208 } 1218 }
1209 /* remove ldn */ 1219 /* remove ldn */
1210 immediate_assign(host_index, id, lun, PROBE_LDN, REMOVE_LDN); 1220 immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
1211 } 1221 }
1212 } 1222 }
1213#ifndef IM_DEBUG_PROBE 1223#ifndef IM_DEBUG_PROBE
@@ -1227,16 +1237,16 @@ static void check_devices(int host_index, int adaptertype)
1227#ifdef IM_DEBUG_PROBE 1237#ifdef IM_DEBUG_PROBE
1228 printk("."); 1238 printk(".");
1229#endif 1239#endif
1230 if (id != subsystem_pun(host_index)) { 1240 if (id != subsystem_pun(shpnt)) {
1231 if (get_scsi(host_index)[id][lun] != TYPE_NO_LUN && get_scsi(host_index)[id][lun] != TYPE_NO_DEVICE) { 1241 if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
1232 /* Only map if accepted type. Always enter for 1242 /* Only map if accepted type. Always enter for
1233 lun == 0 to get no gaps into ldn-mapping for ldn<7. */ 1243 lun == 0 to get no gaps into ldn-mapping for ldn<7. */
1234 immediate_assign(host_index, id, lun, ldn, SET_LDN); 1244 immediate_assign(shpnt, id, lun, ldn, SET_LDN);
1235 get_ldn(host_index)[id][lun] = ldn; /* map ldn */ 1245 get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
1236 if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) { 1246 if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
1237#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET 1247#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
1238 printk("resetting device at ldn=%x ... ", ldn); 1248 printk("resetting device at ldn=%x ... ", ldn);
1239 immediate_reset(host_index, ldn); 1249 immediate_reset(shpnt, ldn);
1240#endif 1250#endif
1241 ldn++; 1251 ldn++;
1242 } else { 1252 } else {
@@ -1244,15 +1254,15 @@ static void check_devices(int host_index, int adaptertype)
1244 * handle it or because it has problems */ 1254 * handle it or because it has problems */
1245 if (lun > 0) { 1255 if (lun > 0) {
1246 /* remove mapping */ 1256 /* remove mapping */
1247 get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE; 1257 get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
1248 immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN); 1258 immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
1249 } else 1259 } else
1250 ldn++; 1260 ldn++;
1251 } 1261 }
1252 } else if (lun == 0) { 1262 } else if (lun == 0) {
1253 /* map lun == 0, even if no device exists */ 1263 /* map lun == 0, even if no device exists */
1254 immediate_assign(host_index, id, lun, ldn, SET_LDN); 1264 immediate_assign(shpnt, id, lun, ldn, SET_LDN);
1255 get_ldn(host_index)[id][lun] = ldn; /* map ldn */ 1265 get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
1256 ldn++; 1266 ldn++;
1257 } 1267 }
1258 } 1268 }
@@ -1262,14 +1272,14 @@ static void check_devices(int host_index, int adaptertype)
1262 /* map remaining ldns to non-existing devices */ 1272 /* map remaining ldns to non-existing devices */
1263 for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++) 1273 for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
1264 for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) { 1274 for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
1265 if (get_scsi(host_index)[id][lun] == TYPE_NO_LUN || get_scsi(host_index)[id][lun] == TYPE_NO_DEVICE) { 1275 if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
1266 probe_display(1); 1276 probe_display(1);
1267 /* Map remaining ldns only to NON-existing pun,lun 1277 /* Map remaining ldns only to NON-existing pun,lun
1268 combinations to make sure an inquiry will fail. 1278 combinations to make sure an inquiry will fail.
1269 For MULTI_LUN, it is needed to avoid adapter autonome 1279 For MULTI_LUN, it is needed to avoid adapter autonome
1270 SCSI-remapping. */ 1280 SCSI-remapping. */
1271 immediate_assign(host_index, id, lun, ldn, SET_LDN); 1281 immediate_assign(shpnt, id, lun, ldn, SET_LDN);
1272 get_ldn(host_index)[id][lun] = ldn; 1282 get_ldn(shpnt)[id][lun] = ldn;
1273 ldn++; 1283 ldn++;
1274 } 1284 }
1275 } 1285 }
@@ -1292,51 +1302,51 @@ static void check_devices(int host_index, int adaptertype)
1292 for (id = 0; id < max_pun; id++) { 1302 for (id = 0; id < max_pun; id++) {
1293 printk("%2d ", id); 1303 printk("%2d ", id);
1294 for (lun = 0; lun < 8; lun++) 1304 for (lun = 0; lun < 8; lun++)
1295 printk("%2s ", ti_p(get_scsi(host_index)[id][lun])); 1305 printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
1296 printk(" %2d ", id); 1306 printk(" %2d ", id);
1297 for (lun = 0; lun < 8; lun++) 1307 for (lun = 0; lun < 8; lun++)
1298 printk("%2s ", ti_l(get_ldn(host_index)[id][lun])); 1308 printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
1299 printk("\n"); 1309 printk("\n");
1300 } 1310 }
1301#endif 1311#endif
1302 1312
1303 /* assign total number of found SCSI-devices to the statistics struct */ 1313 /* assign total number of found SCSI-devices to the statistics struct */
1304 IBM_DS(host_index).total_scsi_devices = count_devices; 1314 IBM_DS(shpnt).total_scsi_devices = count_devices;
1305 1315
1306 /* decide for output in /proc-filesystem, if the configuration of 1316 /* decide for output in /proc-filesystem, if the configuration of
1307 SCSI-devices makes dynamical reassignment of devices necessary */ 1317 SCSI-devices makes dynamical reassignment of devices necessary */
1308 if (count_devices >= MAX_LOG_DEV) 1318 if (count_devices >= MAX_LOG_DEV)
1309 IBM_DS(host_index).dyn_flag = 1; /* dynamical assignment is necessary */ 1319 IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
1310 else 1320 else
1311 IBM_DS(host_index).dyn_flag = 0; /* dynamical assignment is not necessary */ 1321 IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
1312 1322
1313 /* If no SCSI-devices are assigned, return 1 in order to cause message. */ 1323 /* If no SCSI-devices are assigned, return 1 in order to cause message. */
1314 if (ldn == 0) 1324 if (ldn == 0)
1315 printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n"); 1325 printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
1316 1326
1317 /* reset the counters for statistics on the current adapter */ 1327 /* reset the counters for statistics on the current adapter */
1318 IBM_DS(host_index).scbs = 0; 1328 IBM_DS(shpnt).scbs = 0;
1319 IBM_DS(host_index).long_scbs = 0; 1329 IBM_DS(shpnt).long_scbs = 0;
1320 IBM_DS(host_index).total_accesses = 0; 1330 IBM_DS(shpnt).total_accesses = 0;
1321 IBM_DS(host_index).total_interrupts = 0; 1331 IBM_DS(shpnt).total_interrupts = 0;
1322 IBM_DS(host_index).dynamical_assignments = 0; 1332 IBM_DS(shpnt).dynamical_assignments = 0;
1323 memset(IBM_DS(host_index).ldn_access, 0x0, sizeof(IBM_DS(host_index).ldn_access)); 1333 memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
1324 memset(IBM_DS(host_index).ldn_read_access, 0x0, sizeof(IBM_DS(host_index).ldn_read_access)); 1334 memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
1325 memset(IBM_DS(host_index).ldn_write_access, 0x0, sizeof(IBM_DS(host_index).ldn_write_access)); 1335 memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
1326 memset(IBM_DS(host_index).ldn_inquiry_access, 0x0, sizeof(IBM_DS(host_index).ldn_inquiry_access)); 1336 memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
1327 memset(IBM_DS(host_index).ldn_modeselect_access, 0x0, sizeof(IBM_DS(host_index).ldn_modeselect_access)); 1337 memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
1328 memset(IBM_DS(host_index).ldn_assignments, 0x0, sizeof(IBM_DS(host_index).ldn_assignments)); 1338 memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
1329 probe_display(0); 1339 probe_display(0);
1330 return; 1340 return;
1331} 1341}
1332 1342
1333static int device_exists(int host_index, int ldn, int *block_length, int *device_type) 1343static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
1334{ 1344{
1335 unsigned char *buf; 1345 unsigned char *buf;
1336 /* if no valid device found, return immediately with 0 */ 1346 /* if no valid device found, return immediately with 0 */
1337 if (!(device_inquiry(host_index, ldn))) 1347 if (!(device_inquiry(shpnt, ldn)))
1338 return 0; 1348 return 0;
1339 buf = (unsigned char *) (&(ld(host_index)[ldn].buf)); 1349 buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
1340 if (*buf == TYPE_ROM) { 1350 if (*buf == TYPE_ROM) {
1341 *device_type = TYPE_ROM; 1351 *device_type = TYPE_ROM;
1342 *block_length = 2048; /* (standard blocksize for yellow-/red-book) */ 1352 *block_length = 2048; /* (standard blocksize for yellow-/red-book) */
@@ -1349,7 +1359,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
1349 } 1359 }
1350 if (*buf == TYPE_DISK) { 1360 if (*buf == TYPE_DISK) {
1351 *device_type = TYPE_DISK; 1361 *device_type = TYPE_DISK;
1352 if (read_capacity(host_index, ldn)) { 1362 if (read_capacity(shpnt, ldn)) {
1353 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24); 1363 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
1354 return 1; 1364 return 1;
1355 } else 1365 } else
@@ -1357,7 +1367,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
1357 } 1367 }
1358 if (*buf == TYPE_MOD) { 1368 if (*buf == TYPE_MOD) {
1359 *device_type = TYPE_MOD; 1369 *device_type = TYPE_MOD;
1360 if (read_capacity(host_index, ldn)) { 1370 if (read_capacity(shpnt, ldn)) {
1361 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24); 1371 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
1362 return 1; 1372 return 1;
1363 } else 1373 } else
@@ -1430,6 +1440,9 @@ static void internal_ibmmca_scsi_setup(char *str, int *ints)
1430 return; 1440 return;
1431} 1441}
1432 1442
1443#if 0
1444 FIXME NEED TO MOVE TO SYSFS
1445
1433static int ibmmca_getinfo(char *buf, int slot, void *dev_id) 1446static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
1434{ 1447{
1435 struct Scsi_Host *shpnt; 1448 struct Scsi_Host *shpnt;
@@ -1480,58 +1493,34 @@ static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
1480 1493
1481 return len; 1494 return len;
1482} 1495}
1496#endif
1483 1497
1484int ibmmca_detect(struct scsi_host_template * scsi_template) 1498static struct scsi_host_template ibmmca_driver_template = {
1499 .proc_name = "ibmmca",
1500 .proc_info = ibmmca_proc_info,
1501 .name = "IBM SCSI-Subsystem",
1502 .queuecommand = ibmmca_queuecommand,
1503 .eh_abort_handler = ibmmca_abort,
1504 .eh_host_reset_handler = ibmmca_host_reset,
1505 .bios_param = ibmmca_biosparam,
1506 .can_queue = 16,
1507 .this_id = 7,
1508 .sg_tablesize = 16,
1509 .cmd_per_lun = 1,
1510 .use_clustering = ENABLE_CLUSTERING,
1511};
1512
1513static int ibmmca_probe(struct device *dev)
1485{ 1514{
1486 struct Scsi_Host *shpnt; 1515 struct Scsi_Host *shpnt;
1487 int port, id, i, j, k, slot; 1516 int port, id, i, j, k, irq, enabled, ret = -EINVAL;
1488 int devices_on_irq_11 = 0; 1517 struct mca_device *mca_dev = to_mca_device(dev);
1489 int devices_on_irq_14 = 0; 1518 const char *description = ibmmca_description[mca_dev->index];
1490 int IRQ14_registered = 0;
1491 int IRQ11_registered = 0;
1492
1493 found = 0; /* make absolutely sure, that found is set to 0 */
1494 1519
1495 /* First of all, print the version number of the driver. This is 1520 /* First of all, print the version number of the driver. This is
1496 * important to allow better user bugreports in case of already 1521 * important to allow better user bugreports in case of already
1497 * having problems with the MCA_bus probing. */ 1522 * having problems with the MCA_bus probing. */
1498 printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION); 1523 printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
1499 /* if this is not MCA machine, return "nothing found" */
1500 if (!MCA_bus) {
1501 printk(KERN_INFO "IBM MCA SCSI: No Microchannel-bus present --> Aborting.\n" " This machine does not have any IBM MCA-bus\n" " or the MCA-Kernel-support is not enabled!\n");
1502 return 0;
1503 }
1504
1505#ifdef MODULE
1506 /* If the driver is run as module, read from conf.modules or cmd-line */
1507 if (boot_options)
1508 option_setup(boot_options);
1509#endif
1510
1511 /* get interrupt request level */
1512 if (request_irq(IM_IRQ, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
1513 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ);
1514 return 0;
1515 } else
1516 IRQ14_registered++;
1517
1518 /* if ibmmcascsi setup option was passed to kernel, return "found" */
1519 for (i = 0; i < IM_MAX_HOSTS; i++)
1520 if (io_port[i] > 0 && scsi_id[i] >= 0 && scsi_id[i] < 8) {
1521 printk("IBM MCA SCSI: forced detected SCSI Adapter, io=0x%x, scsi id=%d.\n", io_port[i], scsi_id[i]);
1522 if ((shpnt = ibmmca_register(scsi_template, io_port[i], scsi_id[i], FORCED_DETECTION, "forced detected SCSI Adapter"))) {
1523 for (k = 2; k < 7; k++)
1524 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = 0;
1525 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = FORCED_DETECTION;
1526 mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter");
1527 mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1528 mca_mark_as_used(MCA_INTEGSCSI);
1529 devices_on_irq_14++;
1530 }
1531 }
1532 if (found)
1533 return found;
1534
1535 /* The POS2-register of all PS/2 model SCSI-subsystems has the following 1524 /* The POS2-register of all PS/2 model SCSI-subsystems has the following
1536 * interpretation of bits: 1525 * interpretation of bits:
1537 * Bit 7 - 4 : Chip Revision ID (Release) 1526 * Bit 7 - 4 : Chip Revision ID (Release)
@@ -1558,7 +1547,14 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1558 1547
1559 /* first look for the IBM SCSI integrated subsystem on the motherboard */ 1548 /* first look for the IBM SCSI integrated subsystem on the motherboard */
1560 for (j = 0; j < 8; j++) /* read the pos-information */ 1549 for (j = 0; j < 8; j++) /* read the pos-information */
1561 pos[j] = mca_read_stored_pos(MCA_INTEGSCSI, j); 1550 pos[j] = mca_device_read_pos(mca_dev, j);
1551 id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
1552 enabled = (pos[2] &0x01);
1553 if (!enabled) {
1554 printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
1555 printk(KERN_WARNING " SCSI-operations may not work.\n");
1556 }
1557
1562 /* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but 1558 /* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
1563 * if we ignore the settings of all surrounding pos registers, it is not 1559 * if we ignore the settings of all surrounding pos registers, it is not
1564 * completely sufficient to only check pos2 and pos3. */ 1560 * completely sufficient to only check pos2 and pos3. */
@@ -1566,232 +1562,137 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1566 * make sure, we see a real integrated onboard SCSI-interface and no 1562 * make sure, we see a real integrated onboard SCSI-interface and no
1567 * internal system information, which gets mapped to some pos registers 1563 * internal system information, which gets mapped to some pos registers
1568 * on models 95xx. */ 1564 * on models 95xx. */
1569 if ((!pos[0] && !pos[1] && pos[2] > 0 && pos[3] > 0 && !pos[4] && !pos[5] && !pos[6] && !pos[7]) || (pos[0] == 0xff && pos[1] == 0xff && pos[2] < 0xff && pos[3] < 0xff && pos[4] == 0xff && pos[5] == 0xff && pos[6] == 0xff && pos[7] == 0xff)) { 1565 if (mca_dev->slot == MCA_INTEGSCSI &&
1570 if ((pos[2] & 1) == 1) /* is the subsystem chip enabled ? */ 1566 ((!pos[0] && !pos[1] && pos[2] > 0 &&
1571 port = IM_IO_PORT; 1567 pos[3] > 0 && !pos[4] && !pos[5] &&
1572 else { /* if disabled, no IRQs will be generated, as the chip won't 1568 !pos[6] && !pos[7]) ||
1573 * listen to the incoming commands and will do really nothing, 1569 (pos[0] == 0xff && pos[1] == 0xff &&
1574 * except for listening to the pos-register settings. If this 1570 pos[2] < 0xff && pos[3] < 0xff &&
1575 * happens, I need to hugely think about it, as one has to 1571 pos[4] == 0xff && pos[5] == 0xff &&
1576 * write something to the MCA-Bus pos register in order to 1572 pos[6] == 0xff && pos[7] == 0xff))) {
1577 * enable the chip. Normally, IBM-SCSI won't pass the POST, 1573 irq = IM_IRQ;
1578 * when the chip is disabled (see IBM tech. ref.). */ 1574 port = IM_IO_PORT;
1579 port = IM_IO_PORT; /* anyway, set the portnumber and warn */ 1575 } else {
1580 printk("IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n" " SCSI-operations may not work.\n"); 1576 irq = IM_IRQ;
1577 port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
1578 if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
1579 printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
1580 printk(KERN_ERR " Impossible to determine adapter PUN!\n");
1581 printk(KERN_ERR " Guessing adapter PUN = 7.\n");
1582 id = 7;
1583 } else {
1584 id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
1585 if (mca_dev->index == IBM_SCSI2_FW) {
1586 id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
1587 * for F/W adapters */
1588 }
1581 } 1589 }
1582 id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */ 1590 if ((mca_dev->index == IBM_SCSI2_FW) &&
1583 /* give detailed information on the subsystem. This helps me 1591 (pos[4] & 0x01) && (pos[6] == 0)) {
1584 * additionally during debugging and analyzing bug-reports. */ 1592 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1585 printk(KERN_INFO "IBM MCA SCSI: IBM Integrated SCSI Controller ffound, io=0x%x, scsi id=%d,\n", port, id); 1593 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1586 printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled."); 1594 irq = IM_IRQ_FW;
1587
1588 /* register the found integrated SCSI-subsystem */
1589 if ((shpnt = ibmmca_register(scsi_template, port, id, INTEGRATED_SCSI, "IBM Integrated SCSI Controller")))
1590 {
1591 for (k = 2; k < 7; k++)
1592 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1593 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
1594 mca_set_adapter_name(MCA_INTEGSCSI, "IBM Integrated SCSI Controller");
1595 mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1596 mca_mark_as_used(MCA_INTEGSCSI);
1597 devices_on_irq_14++;
1598 } 1595 }
1599 } 1596 }
1600 1597
1601 /* now look for other adapters in MCA slots, */
1602 /* determine the number of known IBM-SCSI-subsystem types */
1603 /* see the pos[2] dependence to get the adapter port-offset. */
1604 for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
1605 /* scan each slot for a fitting adapter id */
1606 slot = 0; /* start at slot 0 */
1607 while ((slot = mca_find_adapter(subsys_list[i].mca_id, slot))
1608 != MCA_NOTFOUND) { /* scan through all slots */
1609 for (j = 0; j < 8; j++) /* read the pos-information */
1610 pos[j] = mca_read_stored_pos(slot, j);
1611 if ((pos[2] & 1) == 1)
1612 /* is the subsystem chip enabled ? */
1613 /* (explanations see above) */
1614 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1615 else {
1616 /* anyway, set the portnumber and warn */
1617 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1618 printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
1619 printk(KERN_WARNING " SCSI-operations may not work.\n");
1620 }
1621 if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
1622 printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
1623 printk(KERN_ERR " Impossible to determine adapter PUN!\n");
1624 printk(KERN_ERR " Guessing adapter PUN = 7.\n");
1625 id = 7;
1626 } else {
1627 id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
1628 if (i == IBM_SCSI2_FW) {
1629 id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
1630 * for F/W adapters */
1631 }
1632 }
1633 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
1634 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1635 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1636 /* get interrupt request level */
1637 if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
1638 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
1639 } else
1640 IRQ11_registered++;
1641 }
1642 printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
1643 if ((pos[2] & 0xf0) == 0xf0)
1644 printk(KERN_DEBUG" ROM Addr.=off,");
1645 else
1646 printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
1647 printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
1648
1649 /* register the hostadapter */
1650 if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
1651 for (k = 2; k < 8; k++)
1652 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1653 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
1654 mca_set_adapter_name(slot, subsys_list[i].description);
1655 mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1656 mca_mark_as_used(slot);
1657 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
1658 devices_on_irq_11++;
1659 else
1660 devices_on_irq_14++;
1661 }
1662 slot++; /* advance to next slot */
1663 } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
1664 }
1665 1598
1666 /* now check for SCSI-adapters, mapped to the integrated SCSI
1667 * area. E.g. a W/Cache in MCA-slot 9(!). Do the check correct here,
1668 * as this is a known effect on some models 95xx. */
1669 for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
1670 /* scan each slot for a fitting adapter id */
1671 slot = mca_find_adapter(subsys_list[i].mca_id, MCA_INTEGSCSI);
1672 if (slot != MCA_NOTFOUND) { /* scan through all slots */
1673 for (j = 0; j < 8; j++) /* read the pos-information */
1674 pos[j] = mca_read_stored_pos(slot, j);
1675 if ((pos[2] & 1) == 1) { /* is the subsystem chip enabled ? */
1676 /* (explanations see above) */
1677 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1678 } else { /* anyway, set the portnumber and warn */
1679 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1680 printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
1681 printk(KERN_WARNING " SCSI-operations may not work.\n");
1682 }
1683 if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
1684 printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
1685 printk(KERN_ERR " Impossible to determine adapter PUN!\n");
1686 printk(KERN_ERR " Guessing adapter PUN = 7.\n");
1687 id = 7;
1688 } else {
1689 id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
1690 if (i == IBM_SCSI2_FW)
1691 id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
1692 * for F/W adapters */
1693 }
1694 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
1695 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1696 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1697 /* get interrupt request level */
1698 if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts))
1699 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
1700 else
1701 IRQ11_registered++;
1702 }
1703 printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
1704 if ((pos[2] & 0xf0) == 0xf0)
1705 printk(KERN_DEBUG " ROM Addr.=off,");
1706 else
1707 printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
1708 printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
1709
1710 /* register the hostadapter */
1711 if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
1712 for (k = 2; k < 7; k++)
1713 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1714 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
1715 mca_set_adapter_name(slot, subsys_list[i].description);
1716 mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1717 mca_mark_as_used(slot);
1718 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
1719 devices_on_irq_11++;
1720 else
1721 devices_on_irq_14++;
1722 }
1723 slot++; /* advance to next slot */
1724 } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
1725 }
1726 if (IRQ11_registered && !devices_on_irq_11)
1727 free_irq(IM_IRQ_FW, hosts); /* no devices on IRQ 11 */
1728 if (IRQ14_registered && !devices_on_irq_14)
1729 free_irq(IM_IRQ, hosts); /* no devices on IRQ 14 */
1730 if (!devices_on_irq_11 && !devices_on_irq_14)
1731 printk(KERN_WARNING "IBM MCA SCSI: No IBM SCSI-subsystem adapter attached.\n");
1732 return found; /* return the number of found SCSI hosts. Should be 1 or 0. */
1733}
1734 1599
1735static struct Scsi_Host *ibmmca_register(struct scsi_host_template * scsi_template, int port, int id, int adaptertype, char *hostname) 1600 /* give detailed information on the subsystem. This helps me
1736{ 1601 * additionally during debugging and analyzing bug-reports. */
1737 struct Scsi_Host *shpnt; 1602 printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
1738 int i, j; 1603 description, port, id);
1739 unsigned int ctrl; 1604 if (mca_dev->slot == MCA_INTEGSCSI)
1605 printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
1606 else {
1607 if ((pos[2] & 0xf0) == 0xf0)
1608 printk(KERN_DEBUG " ROM Addr.=off,");
1609 else
1610 printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
1611
1612 printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
1613 }
1740 1614
1741 /* check I/O region */ 1615 /* check I/O region */
1742 if (!request_region(port, IM_N_IO_PORT, hostname)) { 1616 if (!request_region(port, IM_N_IO_PORT, description)) {
1743 printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT); 1617 printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
1744 return NULL; 1618 goto out_fail;
1745 } 1619 }
1746 1620
1747 /* register host */ 1621 /* register host */
1748 shpnt = scsi_register(scsi_template, sizeof(struct ibmmca_hostdata)); 1622 shpnt = scsi_host_alloc(&ibmmca_driver_template,
1623 sizeof(struct ibmmca_hostdata));
1749 if (!shpnt) { 1624 if (!shpnt) {
1750 printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n"); 1625 printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
1751 release_region(port, IM_N_IO_PORT); 1626 goto out_release;
1752 return NULL; 1627 }
1628
1629 dev_set_drvdata(dev, shpnt);
1630 if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
1631 printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
1632 goto out_free_host;
1753 } 1633 }
1754 1634
1755 /* request I/O region */ 1635 /* request I/O region */
1756 hosts[found] = shpnt; /* add new found hostadapter to the list */ 1636 special(shpnt) = mca_dev->index; /* important assignment or else crash! */
1757 special(found) = adaptertype; /* important assignment or else crash! */ 1637 subsystem_connector_size(shpnt) = 0; /* preset slot-size */
1758 subsystem_connector_size(found) = 0; /* preset slot-size */ 1638 shpnt->irq = irq; /* assign necessary stuff for the adapter */
1759 shpnt->irq = IM_IRQ; /* assign necessary stuff for the adapter */
1760 shpnt->io_port = port; 1639 shpnt->io_port = port;
1761 shpnt->n_io_port = IM_N_IO_PORT; 1640 shpnt->n_io_port = IM_N_IO_PORT;
1762 shpnt->this_id = id; 1641 shpnt->this_id = id;
1763 shpnt->max_id = 8; /* 8 PUNs are default */ 1642 shpnt->max_id = 8; /* 8 PUNs are default */
1764 /* now, the SCSI-subsystem is connected to Linux */ 1643 /* now, the SCSI-subsystem is connected to Linux */
1765 1644
1766 ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
1767#ifdef IM_DEBUG_PROBE 1645#ifdef IM_DEBUG_PROBE
1646 ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
1768 printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found))); 1647 printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
1769 printk("IBM MCA SCSI: This adapters' POS-registers: "); 1648 printk("IBM MCA SCSI: This adapters' POS-registers: ");
1770 for (i = 0; i < 8; i++) 1649 for (i = 0; i < 8; i++)
1771 printk("%x ", pos[i]); 1650 printk("%x ", pos[i]);
1772 printk("\n"); 1651 printk("\n");
1773#endif 1652#endif
1774 reset_status(found) = IM_RESET_NOT_IN_PROGRESS; 1653 reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
1775 1654
1776 for (i = 0; i < 16; i++) /* reset the tables */ 1655 for (i = 0; i < 16; i++) /* reset the tables */
1777 for (j = 0; j < 8; j++) 1656 for (j = 0; j < 8; j++)
1778 get_ldn(found)[i][j] = MAX_LOG_DEV; 1657 get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
1779 1658
1780 /* check which logical devices exist */ 1659 /* check which logical devices exist */
1781 /* after this line, local interrupting is possible: */ 1660 /* after this line, local interrupting is possible: */
1782 local_checking_phase_flag(found) = 1; 1661 local_checking_phase_flag(shpnt) = 1;
1783 check_devices(found, adaptertype); /* call by value, using the global variable hosts */ 1662 check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
1784 local_checking_phase_flag(found) = 0; 1663 local_checking_phase_flag(shpnt) = 0;
1785 found++; /* now increase index to be prepared for next found subsystem */ 1664
1786 /* an ibm mca subsystem has been detected */ 1665 /* an ibm mca subsystem has been detected */
1787 return shpnt; 1666
1667 for (k = 2; k < 7; k++)
1668 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1669 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
1670 mca_device_set_name(mca_dev, description);
1671 /* FIXME: NEED TO REPLUMB TO SYSFS
1672 mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1673 */
1674 mca_device_set_claim(mca_dev, 1);
1675 if (scsi_add_host(shpnt, dev)) {
1676 dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
1677 goto out_free_host;
1678 }
1679 scsi_scan_host(shpnt);
1680
1681 return 0;
1682 out_free_host:
1683 scsi_host_put(shpnt);
1684 out_release:
1685 release_region(port, IM_N_IO_PORT);
1686 out_fail:
1687 return ret;
1788} 1688}
1789 1689
1790static int ibmmca_release(struct Scsi_Host *shpnt) 1690static int __devexit ibmmca_remove(struct device *dev)
1791{ 1691{
1692 struct Scsi_Host *shpnt = dev_get_drvdata(dev);
1693 scsi_remove_host(shpnt);
1792 release_region(shpnt->io_port, shpnt->n_io_port); 1694 release_region(shpnt->io_port, shpnt->n_io_port);
1793 if (!(--found)) 1695 free_irq(shpnt->irq, dev);
1794 free_irq(shpnt->irq, hosts);
1795 return 0; 1696 return 0;
1796} 1697}
1797 1698
@@ -1805,33 +1706,24 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1805 int current_ldn; 1706 int current_ldn;
1806 int id, lun; 1707 int id, lun;
1807 int target; 1708 int target;
1808 int host_index;
1809 int max_pun; 1709 int max_pun;
1810 int i; 1710 int i;
1811 struct scatterlist *sl; 1711 struct scatterlist *sg;
1812 1712
1813 shpnt = cmd->device->host; 1713 shpnt = cmd->device->host;
1814 /* search for the right hostadapter */
1815 for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
1816 1714
1817 if (!hosts[host_index]) { /* invalid hostadapter descriptor address */ 1715 max_pun = subsystem_maxid(shpnt);
1818 cmd->result = DID_NO_CONNECT << 16;
1819 if (done)
1820 done(cmd);
1821 return 0;
1822 }
1823 max_pun = subsystem_maxid(host_index);
1824 if (ibm_ansi_order) { 1716 if (ibm_ansi_order) {
1825 target = max_pun - 1 - cmd->device->id; 1717 target = max_pun - 1 - cmd->device->id;
1826 if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index))) 1718 if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
1827 target--; 1719 target--;
1828 else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index))) 1720 else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
1829 target++; 1721 target++;
1830 } else 1722 } else
1831 target = cmd->device->id; 1723 target = cmd->device->id;
1832 1724
1833 /* if (target,lun) is NO LUN or not existing at all, return error */ 1725 /* if (target,lun) is NO LUN or not existing at all, return error */
1834 if ((get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_DEVICE)) { 1726 if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
1835 cmd->result = DID_NO_CONNECT << 16; 1727 cmd->result = DID_NO_CONNECT << 16;
1836 if (done) 1728 if (done)
1837 done(cmd); 1729 done(cmd);
@@ -1839,16 +1731,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1839 } 1731 }
1840 1732
1841 /*if (target,lun) unassigned, do further checks... */ 1733 /*if (target,lun) unassigned, do further checks... */
1842 ldn = get_ldn(host_index)[target][cmd->device->lun]; 1734 ldn = get_ldn(shpnt)[target][cmd->device->lun];
1843 if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */ 1735 if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
1844 if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */ 1736 if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
1845 current_ldn = next_ldn(host_index); /* stop-value for one circle */ 1737 current_ldn = next_ldn(shpnt); /* stop-value for one circle */
1846 while (ld(host_index)[next_ldn(host_index)].cmd) { /* search for a occupied, but not in */ 1738 while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
1847 /* command-processing ldn. */ 1739 /* command-processing ldn. */
1848 next_ldn(host_index)++; 1740 next_ldn(shpnt)++;
1849 if (next_ldn(host_index) >= MAX_LOG_DEV) 1741 if (next_ldn(shpnt) >= MAX_LOG_DEV)
1850 next_ldn(host_index) = 7; 1742 next_ldn(shpnt) = 7;
1851 if (current_ldn == next_ldn(host_index)) { /* One circle done ? */ 1743 if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
1852 /* no non-processing ldn found */ 1744 /* no non-processing ldn found */
1853 scmd_printk(KERN_WARNING, cmd, 1745 scmd_printk(KERN_WARNING, cmd,
1854 "IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n" 1746 "IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
@@ -1864,56 +1756,56 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1864 /* unmap non-processing ldn */ 1756 /* unmap non-processing ldn */
1865 for (id = 0; id < max_pun; id++) 1757 for (id = 0; id < max_pun; id++)
1866 for (lun = 0; lun < 8; lun++) { 1758 for (lun = 0; lun < 8; lun++) {
1867 if (get_ldn(host_index)[id][lun] == next_ldn(host_index)) { 1759 if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
1868 get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE; 1760 get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
1869 get_scsi(host_index)[id][lun] = TYPE_NO_DEVICE; 1761 get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
1870 /* unmap entry */ 1762 /* unmap entry */
1871 } 1763 }
1872 } 1764 }
1873 /* set reduced interrupt_handler-mode for checking */ 1765 /* set reduced interrupt_handler-mode for checking */
1874 local_checking_phase_flag(host_index) = 1; 1766 local_checking_phase_flag(shpnt) = 1;
1875 /* map found ldn to pun,lun */ 1767 /* map found ldn to pun,lun */
1876 get_ldn(host_index)[target][cmd->device->lun] = next_ldn(host_index); 1768 get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
1877 /* change ldn to the right value, that is now next_ldn */ 1769 /* change ldn to the right value, that is now next_ldn */
1878 ldn = next_ldn(host_index); 1770 ldn = next_ldn(shpnt);
1879 /* unassign all ldns (pun,lun,ldn does not matter for remove) */ 1771 /* unassign all ldns (pun,lun,ldn does not matter for remove) */
1880 immediate_assign(host_index, 0, 0, 0, REMOVE_LDN); 1772 immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
1881 /* set only LDN for remapped device */ 1773 /* set only LDN for remapped device */
1882 immediate_assign(host_index, target, cmd->device->lun, ldn, SET_LDN); 1774 immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
1883 /* get device information for ld[ldn] */ 1775 /* get device information for ld[ldn] */
1884 if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) { 1776 if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
1885 ld(host_index)[ldn].cmd = NULL; /* To prevent panic set 0, because 1777 ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
1886 devices that were not assigned, 1778 devices that were not assigned,
1887 should have nothing in progress. */ 1779 should have nothing in progress. */
1888 get_scsi(host_index)[target][cmd->device->lun] = ld(host_index)[ldn].device_type; 1780 get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
1889 /* increase assignment counters for statistics in /proc */ 1781 /* increase assignment counters for statistics in /proc */
1890 IBM_DS(host_index).dynamical_assignments++; 1782 IBM_DS(shpnt).dynamical_assignments++;
1891 IBM_DS(host_index).ldn_assignments[ldn]++; 1783 IBM_DS(shpnt).ldn_assignments[ldn]++;
1892 } else 1784 } else
1893 /* panic here, because a device, found at boottime has 1785 /* panic here, because a device, found at boottime has
1894 vanished */ 1786 vanished */
1895 panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun); 1787 panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
1896 /* unassign again all ldns (pun,lun,ldn does not matter for remove) */ 1788 /* unassign again all ldns (pun,lun,ldn does not matter for remove) */
1897 immediate_assign(host_index, 0, 0, 0, REMOVE_LDN); 1789 immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
1898 /* remap all ldns, as written in the pun/lun table */ 1790 /* remap all ldns, as written in the pun/lun table */
1899 lun = 0; 1791 lun = 0;
1900#ifdef CONFIG_SCSI_MULTI_LUN 1792#ifdef CONFIG_SCSI_MULTI_LUN
1901 for (lun = 0; lun < 8; lun++) 1793 for (lun = 0; lun < 8; lun++)
1902#endif 1794#endif
1903 for (id = 0; id < max_pun; id++) { 1795 for (id = 0; id < max_pun; id++) {
1904 if (get_ldn(host_index)[id][lun] <= MAX_LOG_DEV) 1796 if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
1905 immediate_assign(host_index, id, lun, get_ldn(host_index)[id][lun], SET_LDN); 1797 immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
1906 } 1798 }
1907 /* set back to normal interrupt_handling */ 1799 /* set back to normal interrupt_handling */
1908 local_checking_phase_flag(host_index) = 0; 1800 local_checking_phase_flag(shpnt) = 0;
1909#ifdef IM_DEBUG_PROBE 1801#ifdef IM_DEBUG_PROBE
1910 /* Information on syslog terminal */ 1802 /* Information on syslog terminal */
1911 printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun); 1803 printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
1912#endif 1804#endif
1913 /* increase next_ldn for next dynamical assignment */ 1805 /* increase next_ldn for next dynamical assignment */
1914 next_ldn(host_index)++; 1806 next_ldn(shpnt)++;
1915 if (next_ldn(host_index) >= MAX_LOG_DEV) 1807 if (next_ldn(shpnt) >= MAX_LOG_DEV)
1916 next_ldn(host_index) = 7; 1808 next_ldn(shpnt) = 7;
1917 } else { /* wall against Linux accesses to the subsystem adapter */ 1809 } else { /* wall against Linux accesses to the subsystem adapter */
1918 cmd->result = DID_BAD_TARGET << 16; 1810 cmd->result = DID_BAD_TARGET << 16;
1919 if (done) 1811 if (done)
@@ -1923,34 +1815,32 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1923 } 1815 }
1924 1816
1925 /*verify there is no command already in progress for this log dev */ 1817 /*verify there is no command already in progress for this log dev */
1926 if (ld(host_index)[ldn].cmd) 1818 if (ld(shpnt)[ldn].cmd)
1927 panic("IBM MCA SCSI: cmd already in progress for this ldn.\n"); 1819 panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
1928 1820
1929 /*save done in cmd, and save cmd for the interrupt handler */ 1821 /*save done in cmd, and save cmd for the interrupt handler */
1930 cmd->scsi_done = done; 1822 cmd->scsi_done = done;
1931 ld(host_index)[ldn].cmd = cmd; 1823 ld(shpnt)[ldn].cmd = cmd;
1932 1824
1933 /*fill scb information independent of the scsi command */ 1825 /*fill scb information independent of the scsi command */
1934 scb = &(ld(host_index)[ldn].scb); 1826 scb = &(ld(shpnt)[ldn].scb);
1935 ld(host_index)[ldn].tsb.dev_status = 0; 1827 ld(shpnt)[ldn].tsb.dev_status = 0;
1936 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE; 1828 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
1937 scb->tsb_adr = isa_virt_to_bus(&(ld(host_index)[ldn].tsb)); 1829 scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
1938 scsi_cmd = cmd->cmnd[0]; 1830 scsi_cmd = cmd->cmnd[0];
1939 1831
1940 if (cmd->use_sg) { 1832 if (scsi_sg_count(cmd)) {
1941 i = cmd->use_sg; 1833 BUG_ON(scsi_sg_count(cmd) > 16);
1942 sl = (struct scatterlist *) (cmd->request_buffer); 1834
1943 if (i > 16) 1835 scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
1944 panic("IBM MCA SCSI: scatter-gather list too long.\n"); 1836 ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
1945 while (--i >= 0) { 1837 ld(shpnt)[ldn].sge[i].byte_length = sg->length;
1946 ld(host_index)[ldn].sge[i].address = (void *) (isa_page_to_bus(sl[i].page) + sl[i].offset);
1947 ld(host_index)[ldn].sge[i].byte_length = sl[i].length;
1948 } 1838 }
1949 scb->enable |= IM_POINTER_TO_LIST; 1839 scb->enable |= IM_POINTER_TO_LIST;
1950 scb->sys_buf_adr = isa_virt_to_bus(&(ld(host_index)[ldn].sge[0])); 1840 scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
1951 scb->sys_buf_length = cmd->use_sg * sizeof(struct im_sge); 1841 scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
1952 } else { 1842 } else {
1953 scb->sys_buf_adr = isa_virt_to_bus(cmd->request_buffer); 1843 scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
1954 /* recent Linux midlevel SCSI places 1024 byte for inquiry 1844 /* recent Linux midlevel SCSI places 1024 byte for inquiry
1955 * command. Far too much for old PS/2 hardware. */ 1845 * command. Far too much for old PS/2 hardware. */
1956 switch (scsi_cmd) { 1846 switch (scsi_cmd) {
@@ -1961,16 +1851,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1961 case REQUEST_SENSE: 1851 case REQUEST_SENSE:
1962 case MODE_SENSE: 1852 case MODE_SENSE:
1963 case MODE_SELECT: 1853 case MODE_SELECT:
1964 if (cmd->request_bufflen > 255) 1854 if (scsi_bufflen(cmd) > 255)
1965 scb->sys_buf_length = 255; 1855 scb->sys_buf_length = 255;
1966 else 1856 else
1967 scb->sys_buf_length = cmd->request_bufflen; 1857 scb->sys_buf_length = scsi_bufflen(cmd);
1968 break; 1858 break;
1969 case TEST_UNIT_READY: 1859 case TEST_UNIT_READY:
1970 scb->sys_buf_length = 0; 1860 scb->sys_buf_length = 0;
1971 break; 1861 break;
1972 default: 1862 default:
1973 scb->sys_buf_length = cmd->request_bufflen; 1863 scb->sys_buf_length = scsi_bufflen(cmd);
1974 break; 1864 break;
1975 } 1865 }
1976 } 1866 }
@@ -1982,16 +1872,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1982 1872
1983 /* for specific device-type debugging: */ 1873 /* for specific device-type debugging: */
1984#ifdef IM_DEBUG_CMD_SPEC_DEV 1874#ifdef IM_DEBUG_CMD_SPEC_DEV
1985 if (ld(host_index)[ldn].device_type == IM_DEBUG_CMD_DEVICE) 1875 if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
1986 printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(host_index)[ldn].device_type, scsi_cmd, ldn); 1876 printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
1987#endif 1877#endif
1988 1878
1989 /* for possible panics store current command */ 1879 /* for possible panics store current command */
1990 last_scsi_command(host_index)[ldn] = scsi_cmd; 1880 last_scsi_command(shpnt)[ldn] = scsi_cmd;
1991 last_scsi_type(host_index)[ldn] = IM_SCB; 1881 last_scsi_type(shpnt)[ldn] = IM_SCB;
1992 /* update statistical info */ 1882 /* update statistical info */
1993 IBM_DS(host_index).total_accesses++; 1883 IBM_DS(shpnt).total_accesses++;
1994 IBM_DS(host_index).ldn_access[ldn]++; 1884 IBM_DS(shpnt).ldn_access[ldn]++;
1995 1885
1996 switch (scsi_cmd) { 1886 switch (scsi_cmd) {
1997 case READ_6: 1887 case READ_6:
@@ -2003,17 +1893,17 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2003 /* Distinguish between disk and other devices. Only disks (that are the 1893 /* Distinguish between disk and other devices. Only disks (that are the
2004 most frequently accessed devices) should be supported by the 1894 most frequently accessed devices) should be supported by the
2005 IBM-SCSI-Subsystem commands. */ 1895 IBM-SCSI-Subsystem commands. */
2006 switch (ld(host_index)[ldn].device_type) { 1896 switch (ld(shpnt)[ldn].device_type) {
2007 case TYPE_DISK: /* for harddisks enter here ... */ 1897 case TYPE_DISK: /* for harddisks enter here ... */
2008 case TYPE_MOD: /* ... try it also for MO-drives (send flames as */ 1898 case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
2009 /* you like, if this won't work.) */ 1899 /* you like, if this won't work.) */
2010 if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) { 1900 if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
2011 /* read command preparations */ 1901 /* read command preparations */
2012 scb->enable |= IM_READ_CONTROL; 1902 scb->enable |= IM_READ_CONTROL;
2013 IBM_DS(host_index).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */ 1903 IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
2014 scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT; 1904 scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
2015 } else { /* write command preparations */ 1905 } else { /* write command preparations */
2016 IBM_DS(host_index).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */ 1906 IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
2017 scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT; 1907 scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
2018 } 1908 }
2019 if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) { 1909 if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
@@ -2023,9 +1913,9 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2023 scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24); 1913 scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
2024 scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8); 1914 scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
2025 } 1915 }
2026 last_scsi_logical_block(host_index)[ldn] = scb->u1.log_blk_adr; 1916 last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
2027 last_scsi_blockcount(host_index)[ldn] = scb->u2.blk.count; 1917 last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
2028 scb->u2.blk.length = ld(host_index)[ldn].block_length; 1918 scb->u2.blk.length = ld(shpnt)[ldn].block_length;
2029 break; 1919 break;
2030 /* for other devices, enter here. Other types are not known by 1920 /* for other devices, enter here. Other types are not known by
2031 Linux! TYPE_NO_LUN is forbidden as valid device. */ 1921 Linux! TYPE_NO_LUN is forbidden as valid device. */
@@ -2046,14 +1936,14 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2046 scb->enable |= IM_BYPASS_BUFFER; 1936 scb->enable |= IM_BYPASS_BUFFER;
2047 scb->u1.scsi_cmd_length = cmd->cmd_len; 1937 scb->u1.scsi_cmd_length = cmd->cmd_len;
2048 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len); 1938 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
2049 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1939 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2050 /* Read/write on this non-disk devices is also displayworthy, 1940 /* Read/write on this non-disk devices is also displayworthy,
2051 so flash-up the LED/display. */ 1941 so flash-up the LED/display. */
2052 break; 1942 break;
2053 } 1943 }
2054 break; 1944 break;
2055 case INQUIRY: 1945 case INQUIRY:
2056 IBM_DS(host_index).ldn_inquiry_access[ldn]++; 1946 IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
2057 scb->command = IM_DEVICE_INQUIRY_CMD; 1947 scb->command = IM_DEVICE_INQUIRY_CMD;
2058 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; 1948 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
2059 scb->u1.log_blk_adr = 0; 1949 scb->u1.log_blk_adr = 0;
@@ -2064,7 +1954,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2064 scb->u1.log_blk_adr = 0; 1954 scb->u1.log_blk_adr = 0;
2065 scb->u1.scsi_cmd_length = 6; 1955 scb->u1.scsi_cmd_length = 6;
2066 memcpy(scb->u2.scsi_command, cmd->cmnd, 6); 1956 memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
2067 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1957 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2068 break; 1958 break;
2069 case READ_CAPACITY: 1959 case READ_CAPACITY:
2070 /* the length of system memory buffer must be exactly 8 bytes */ 1960 /* the length of system memory buffer must be exactly 8 bytes */
@@ -2081,12 +1971,12 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2081 /* Commands that need write-only-mode (system -> device): */ 1971 /* Commands that need write-only-mode (system -> device): */
2082 case MODE_SELECT: 1972 case MODE_SELECT:
2083 case MODE_SELECT_10: 1973 case MODE_SELECT_10:
2084 IBM_DS(host_index).ldn_modeselect_access[ldn]++; 1974 IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
2085 scb->command = IM_OTHER_SCSI_CMD_CMD; 1975 scb->command = IM_OTHER_SCSI_CMD_CMD;
2086 scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */ 1976 scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
2087 scb->u1.scsi_cmd_length = cmd->cmd_len; 1977 scb->u1.scsi_cmd_length = cmd->cmd_len;
2088 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len); 1978 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
2089 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1979 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2090 break; 1980 break;
2091 /* For other commands, read-only is useful. Most other commands are 1981 /* For other commands, read-only is useful. Most other commands are
2092 running without an input-data-block. */ 1982 running without an input-data-block. */
@@ -2095,19 +1985,19 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2095 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; 1985 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
2096 scb->u1.scsi_cmd_length = cmd->cmd_len; 1986 scb->u1.scsi_cmd_length = cmd->cmd_len;
2097 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len); 1987 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
2098 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1988 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2099 break; 1989 break;
2100 } 1990 }
2101 /*issue scb command, and return */ 1991 /*issue scb command, and return */
2102 if (++disk_rw_in_progress == 1) 1992 if (++disk_rw_in_progress == 1)
2103 PS2_DISK_LED_ON(shpnt->host_no, target); 1993 PS2_DISK_LED_ON(shpnt->host_no, target);
2104 1994
2105 if (last_scsi_type(host_index)[ldn] == IM_LONG_SCB) { 1995 if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
2106 issue_cmd(host_index, isa_virt_to_bus(scb), IM_LONG_SCB | ldn); 1996 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
2107 IBM_DS(host_index).long_scbs++; 1997 IBM_DS(shpnt).long_scbs++;
2108 } else { 1998 } else {
2109 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn); 1999 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
2110 IBM_DS(host_index).scbs++; 2000 IBM_DS(shpnt).scbs++;
2111 } 2001 }
2112 return 0; 2002 return 0;
2113} 2003}
@@ -2122,7 +2012,6 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2122 unsigned int ldn; 2012 unsigned int ldn;
2123 void (*saved_done) (Scsi_Cmnd *); 2013 void (*saved_done) (Scsi_Cmnd *);
2124 int target; 2014 int target;
2125 int host_index;
2126 int max_pun; 2015 int max_pun;
2127 unsigned long imm_command; 2016 unsigned long imm_command;
2128 2017
@@ -2131,35 +2020,23 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2131#endif 2020#endif
2132 2021
2133 shpnt = cmd->device->host; 2022 shpnt = cmd->device->host;
2134 /* search for the right hostadapter */
2135 for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
2136 2023
2137 if (!hosts[host_index]) { /* invalid hostadapter descriptor address */ 2024 max_pun = subsystem_maxid(shpnt);
2138 cmd->result = DID_NO_CONNECT << 16;
2139 if (cmd->scsi_done)
2140 (cmd->scsi_done) (cmd);
2141 shpnt = cmd->device->host;
2142#ifdef IM_DEBUG_PROBE
2143 printk(KERN_DEBUG "IBM MCA SCSI: Abort adapter selection failed!\n");
2144#endif
2145 return SUCCESS;
2146 }
2147 max_pun = subsystem_maxid(host_index);
2148 if (ibm_ansi_order) { 2025 if (ibm_ansi_order) {
2149 target = max_pun - 1 - cmd->device->id; 2026 target = max_pun - 1 - cmd->device->id;
2150 if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index))) 2027 if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
2151 target--; 2028 target--;
2152 else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index))) 2029 else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
2153 target++; 2030 target++;
2154 } else 2031 } else
2155 target = cmd->device->id; 2032 target = cmd->device->id;
2156 2033
2157 /* get logical device number, and disable system interrupts */ 2034 /* get logical device number, and disable system interrupts */
2158 printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun); 2035 printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
2159 ldn = get_ldn(host_index)[target][cmd->device->lun]; 2036 ldn = get_ldn(shpnt)[target][cmd->device->lun];
2160 2037
2161 /*if cmd for this ldn has already finished, no need to abort */ 2038 /*if cmd for this ldn has already finished, no need to abort */
2162 if (!ld(host_index)[ldn].cmd) { 2039 if (!ld(shpnt)[ldn].cmd) {
2163 return SUCCESS; 2040 return SUCCESS;
2164 } 2041 }
2165 2042
@@ -2170,20 +2047,20 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2170 saved_done = cmd->scsi_done; 2047 saved_done = cmd->scsi_done;
2171 cmd->scsi_done = internal_done; 2048 cmd->scsi_done = internal_done;
2172 cmd->SCp.Status = 0; 2049 cmd->SCp.Status = 0;
2173 last_scsi_command(host_index)[ldn] = IM_ABORT_IMM_CMD; 2050 last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
2174 last_scsi_type(host_index)[ldn] = IM_IMM_CMD; 2051 last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
2175 imm_command = inl(IM_CMD_REG(host_index)); 2052 imm_command = inl(IM_CMD_REG(shpnt));
2176 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */ 2053 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
2177 imm_command |= (unsigned long) (IM_ABORT_IMM_CMD); 2054 imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
2178 /* must wait for attention reg not busy */ 2055 /* must wait for attention reg not busy */
2179 /* FIXME - timeout, politeness */ 2056 /* FIXME - timeout, politeness */
2180 while (1) { 2057 while (1) {
2181 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 2058 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
2182 break; 2059 break;
2183 } 2060 }
2184 /* write registers and enable system interrupts */ 2061 /* write registers and enable system interrupts */
2185 outl(imm_command, IM_CMD_REG(host_index)); 2062 outl(imm_command, IM_CMD_REG(shpnt));
2186 outb(IM_IMM_CMD | ldn, IM_ATTN_REG(host_index)); 2063 outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
2187#ifdef IM_DEBUG_PROBE 2064#ifdef IM_DEBUG_PROBE
2188 printk("IBM MCA SCSI: Abort queued to adapter...\n"); 2065 printk("IBM MCA SCSI: Abort queued to adapter...\n");
2189#endif 2066#endif
@@ -2202,7 +2079,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2202 cmd->result |= DID_ABORT << 16; 2079 cmd->result |= DID_ABORT << 16;
2203 if (cmd->scsi_done) 2080 if (cmd->scsi_done)
2204 (cmd->scsi_done) (cmd); 2081 (cmd->scsi_done) (cmd);
2205 ld(host_index)[ldn].cmd = NULL; 2082 ld(shpnt)[ldn].cmd = NULL;
2206#ifdef IM_DEBUG_PROBE 2083#ifdef IM_DEBUG_PROBE
2207 printk("IBM MCA SCSI: Abort finished with success.\n"); 2084 printk("IBM MCA SCSI: Abort finished with success.\n");
2208#endif 2085#endif
@@ -2211,7 +2088,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2211 cmd->result |= DID_NO_CONNECT << 16; 2088 cmd->result |= DID_NO_CONNECT << 16;
2212 if (cmd->scsi_done) 2089 if (cmd->scsi_done)
2213 (cmd->scsi_done) (cmd); 2090 (cmd->scsi_done) (cmd);
2214 ld(host_index)[ldn].cmd = NULL; 2091 ld(shpnt)[ldn].cmd = NULL;
2215#ifdef IM_DEBUG_PROBE 2092#ifdef IM_DEBUG_PROBE
2216 printk("IBM MCA SCSI: Abort failed.\n"); 2093 printk("IBM MCA SCSI: Abort failed.\n");
2217#endif 2094#endif
@@ -2236,71 +2113,65 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
2236 struct Scsi_Host *shpnt; 2113 struct Scsi_Host *shpnt;
2237 Scsi_Cmnd *cmd_aid; 2114 Scsi_Cmnd *cmd_aid;
2238 int ticks, i; 2115 int ticks, i;
2239 int host_index;
2240 unsigned long imm_command; 2116 unsigned long imm_command;
2241 2117
2242 BUG_ON(cmd == NULL); 2118 BUG_ON(cmd == NULL);
2243 2119
2244 ticks = IM_RESET_DELAY * HZ; 2120 ticks = IM_RESET_DELAY * HZ;
2245 shpnt = cmd->device->host; 2121 shpnt = cmd->device->host;
2246 /* search for the right hostadapter */
2247 for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
2248
2249 if (!hosts[host_index]) /* invalid hostadapter descriptor address */
2250 return FAILED;
2251 2122
2252 if (local_checking_phase_flag(host_index)) { 2123 if (local_checking_phase_flag(shpnt)) {
2253 printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n"); 2124 printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
2254 return FAILED; 2125 return FAILED;
2255 } 2126 }
2256 2127
2257 /* issue reset immediate command to subsystem, and wait for interrupt */ 2128 /* issue reset immediate command to subsystem, and wait for interrupt */
2258 printk("IBM MCA SCSI: resetting all devices.\n"); 2129 printk("IBM MCA SCSI: resetting all devices.\n");
2259 reset_status(host_index) = IM_RESET_IN_PROGRESS; 2130 reset_status(shpnt) = IM_RESET_IN_PROGRESS;
2260 last_scsi_command(host_index)[0xf] = IM_RESET_IMM_CMD; 2131 last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
2261 last_scsi_type(host_index)[0xf] = IM_IMM_CMD; 2132 last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
2262 imm_command = inl(IM_CMD_REG(host_index)); 2133 imm_command = inl(IM_CMD_REG(shpnt));
2263 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */ 2134 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
2264 imm_command |= (unsigned long) (IM_RESET_IMM_CMD); 2135 imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
2265 /* must wait for attention reg not busy */ 2136 /* must wait for attention reg not busy */
2266 while (1) { 2137 while (1) {
2267 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 2138 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
2268 break; 2139 break;
2269 spin_unlock_irq(shpnt->host_lock); 2140 spin_unlock_irq(shpnt->host_lock);
2270 yield(); 2141 yield();
2271 spin_lock_irq(shpnt->host_lock); 2142 spin_lock_irq(shpnt->host_lock);
2272 } 2143 }
2273 /*write registers and enable system interrupts */ 2144 /*write registers and enable system interrupts */
2274 outl(imm_command, IM_CMD_REG(host_index)); 2145 outl(imm_command, IM_CMD_REG(shpnt));
2275 outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(host_index)); 2146 outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
2276 /* wait for interrupt finished or intr_stat register to be set, as the 2147 /* wait for interrupt finished or intr_stat register to be set, as the
2277 * interrupt will not be executed, while we are in here! */ 2148 * interrupt will not be executed, while we are in here! */
2278 2149
2279 /* FIXME: This is really really icky we so want a sleeping version of this ! */ 2150 /* FIXME: This is really really icky we so want a sleeping version of this ! */
2280 while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(host_index)) & 0x8f) != 0x8f)) { 2151 while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
2281 udelay((1 + 999 / HZ) * 1000); 2152 udelay((1 + 999 / HZ) * 1000);
2282 barrier(); 2153 barrier();
2283 } 2154 }
2284 /* if reset did not complete, just return an error */ 2155 /* if reset did not complete, just return an error */
2285 if (!ticks) { 2156 if (!ticks) {
2286 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY); 2157 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
2287 reset_status(host_index) = IM_RESET_FINISHED_FAIL; 2158 reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
2288 return FAILED; 2159 return FAILED;
2289 } 2160 }
2290 2161
2291 if ((inb(IM_INTR_REG(host_index)) & 0x8f) == 0x8f) { 2162 if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
2292 /* analysis done by this routine and not by the intr-routine */ 2163 /* analysis done by this routine and not by the intr-routine */
2293 if (inb(IM_INTR_REG(host_index)) == 0xaf) 2164 if (inb(IM_INTR_REG(shpnt)) == 0xaf)
2294 reset_status(host_index) = IM_RESET_FINISHED_OK_NO_INT; 2165 reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
2295 else if (inb(IM_INTR_REG(host_index)) == 0xcf) 2166 else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
2296 reset_status(host_index) = IM_RESET_FINISHED_FAIL; 2167 reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
2297 else /* failed, 4get it */ 2168 else /* failed, 4get it */
2298 reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS_NO_INT; 2169 reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
2299 outb(IM_EOI | 0xf, IM_ATTN_REG(host_index)); 2170 outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
2300 } 2171 }
2301 2172
2302 /* if reset failed, just return an error */ 2173 /* if reset failed, just return an error */
2303 if (reset_status(host_index) == IM_RESET_FINISHED_FAIL) { 2174 if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
2304 printk(KERN_ERR "IBM MCA SCSI: reset failed.\n"); 2175 printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
2305 return FAILED; 2176 return FAILED;
2306 } 2177 }
@@ -2308,9 +2179,9 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
2308 /* so reset finished ok - call outstanding done's, and return success */ 2179 /* so reset finished ok - call outstanding done's, and return success */
2309 printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n"); 2180 printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
2310 for (i = 0; i < MAX_LOG_DEV; i++) { 2181 for (i = 0; i < MAX_LOG_DEV; i++) {
2311 cmd_aid = ld(host_index)[i].cmd; 2182 cmd_aid = ld(shpnt)[i].cmd;
2312 if (cmd_aid && cmd_aid->scsi_done) { 2183 if (cmd_aid && cmd_aid->scsi_done) {
2313 ld(host_index)[i].cmd = NULL; 2184 ld(shpnt)[i].cmd = NULL;
2314 cmd_aid->result = DID_RESET << 16; 2185 cmd_aid->result = DID_RESET << 16;
2315 } 2186 }
2316 } 2187 }
@@ -2351,46 +2222,46 @@ static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2351} 2222}
2352 2223
2353/* calculate percentage of total accesses on a ldn */ 2224/* calculate percentage of total accesses on a ldn */
2354static int ldn_access_load(int host_index, int ldn) 2225static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
2355{ 2226{
2356 if (IBM_DS(host_index).total_accesses == 0) 2227 if (IBM_DS(shpnt).total_accesses == 0)
2357 return (0); 2228 return (0);
2358 if (IBM_DS(host_index).ldn_access[ldn] == 0) 2229 if (IBM_DS(shpnt).ldn_access[ldn] == 0)
2359 return (0); 2230 return (0);
2360 return (IBM_DS(host_index).ldn_access[ldn] * 100) / IBM_DS(host_index).total_accesses; 2231 return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
2361} 2232}
2362 2233
2363/* calculate total amount of r/w-accesses */ 2234/* calculate total amount of r/w-accesses */
2364static int ldn_access_total_read_write(int host_index) 2235static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
2365{ 2236{
2366 int a; 2237 int a;
2367 int i; 2238 int i;
2368 2239
2369 a = 0; 2240 a = 0;
2370 for (i = 0; i <= MAX_LOG_DEV; i++) 2241 for (i = 0; i <= MAX_LOG_DEV; i++)
2371 a += IBM_DS(host_index).ldn_read_access[i] + IBM_DS(host_index).ldn_write_access[i]; 2242 a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
2372 return (a); 2243 return (a);
2373} 2244}
2374 2245
2375static int ldn_access_total_inquiry(int host_index) 2246static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
2376{ 2247{
2377 int a; 2248 int a;
2378 int i; 2249 int i;
2379 2250
2380 a = 0; 2251 a = 0;
2381 for (i = 0; i <= MAX_LOG_DEV; i++) 2252 for (i = 0; i <= MAX_LOG_DEV; i++)
2382 a += IBM_DS(host_index).ldn_inquiry_access[i]; 2253 a += IBM_DS(shpnt).ldn_inquiry_access[i];
2383 return (a); 2254 return (a);
2384} 2255}
2385 2256
2386static int ldn_access_total_modeselect(int host_index) 2257static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
2387{ 2258{
2388 int a; 2259 int a;
2389 int i; 2260 int i;
2390 2261
2391 a = 0; 2262 a = 0;
2392 for (i = 0; i <= MAX_LOG_DEV; i++) 2263 for (i = 0; i <= MAX_LOG_DEV; i++)
2393 a += IBM_DS(host_index).ldn_modeselect_access[i]; 2264 a += IBM_DS(shpnt).ldn_modeselect_access[i];
2394 return (a); 2265 return (a);
2395} 2266}
2396 2267
@@ -2398,19 +2269,14 @@ static int ldn_access_total_modeselect(int host_index)
2398static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout) 2269static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
2399{ 2270{
2400 int len = 0; 2271 int len = 0;
2401 int i, id, lun, host_index; 2272 int i, id, lun;
2402 unsigned long flags; 2273 unsigned long flags;
2403 int max_pun; 2274 int max_pun;
2404 2275
2405 for (i = 0; hosts[i] && hosts[i] != shpnt; i++);
2406 2276
2407 spin_lock_irqsave(hosts[i]->host_lock, flags); /* Check it */ 2277 spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
2408 host_index = i; 2278
2409 if (!shpnt) { 2279 max_pun = subsystem_maxid(shpnt);
2410 len += sprintf(buffer + len, "\nIBM MCA SCSI: Can't find adapter");
2411 return len;
2412 }
2413 max_pun = subsystem_maxid(host_index);
2414 2280
2415 len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION); 2281 len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
2416 len += sprintf(buffer + len, " SCSI Access-Statistics:\n"); 2282 len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
@@ -2421,40 +2287,40 @@ static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start,
2421 len += sprintf(buffer + len, " Multiple LUN probing.....: No\n"); 2287 len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
2422#endif 2288#endif
2423 len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no); 2289 len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
2424 len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(host_index))); 2290 len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
2425 len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ); 2291 len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
2426 len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(host_index).total_interrupts); 2292 len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
2427 len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(host_index).total_accesses); 2293 len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
2428 len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(host_index).scbs); 2294 len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
2429 len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(host_index).long_scbs); 2295 len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
2430 len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(host_index)); 2296 len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
2431 len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(host_index)); 2297 len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
2432 len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(host_index)); 2298 len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
2433 len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(host_index).total_accesses - ldn_access_total_read_write(host_index) 2299 len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
2434 - ldn_access_total_modeselect(host_index) 2300 - ldn_access_total_modeselect(shpnt)
2435 - ldn_access_total_inquiry(host_index)); 2301 - ldn_access_total_inquiry(shpnt));
2436 len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(host_index).total_errors); 2302 len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
2437 len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n"); 2303 len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
2438 len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n"); 2304 len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
2439 len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n"); 2305 len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
2440 for (i = 0; i <= MAX_LOG_DEV; i++) 2306 for (i = 0; i <= MAX_LOG_DEV; i++)
2441 len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(host_index, i), IBM_DS(host_index).ldn_read_access[i], IBM_DS(host_index).ldn_write_access[i], IBM_DS(host_index).ldn_assignments[i]); 2307 len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
2442 len += sprintf(buffer + len, " -----------------------------------------------------------\n\n"); 2308 len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
2443 len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n"); 2309 len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
2444 len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(host_index).total_scsi_devices); 2310 len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
2445 len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(host_index).dyn_flag ? "Yes" : "No "); 2311 len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
2446 len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(host_index)); 2312 len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
2447 len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(host_index).dynamical_assignments); 2313 len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
2448 len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n"); 2314 len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
2449 len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n"); 2315 len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
2450 len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n"); 2316 len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
2451 for (id = 0; id < max_pun; id++) { 2317 for (id = 0; id < max_pun; id++) {
2452 len += sprintf(buffer + len, " %2d ", id); 2318 len += sprintf(buffer + len, " %2d ", id);
2453 for (lun = 0; lun < 8; lun++) 2319 for (lun = 0; lun < 8; lun++)
2454 len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(host_index)[id][lun])); 2320 len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
2455 len += sprintf(buffer + len, " %2d ", id); 2321 len += sprintf(buffer + len, " %2d ", id);
2456 for (lun = 0; lun < 8; lun++) 2322 for (lun = 0; lun < 8; lun++)
2457 len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(host_index)[id][lun])); 2323 len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
2458 len += sprintf(buffer + len, "\n"); 2324 len += sprintf(buffer + len, "\n");
2459 } 2325 }
2460 2326
@@ -2488,20 +2354,31 @@ static int option_setup(char *str)
2488 2354
2489__setup("ibmmcascsi=", option_setup); 2355__setup("ibmmcascsi=", option_setup);
2490 2356
2491static struct scsi_host_template driver_template = { 2357static struct mca_driver ibmmca_driver = {
2492 .proc_name = "ibmmca", 2358 .id_table = ibmmca_id_table,
2493 .proc_info = ibmmca_proc_info, 2359 .driver = {
2494 .name = "IBM SCSI-Subsystem", 2360 .name = "ibmmca",
2495 .detect = ibmmca_detect, 2361 .bus = &mca_bus_type,
2496 .release = ibmmca_release, 2362 .probe = ibmmca_probe,
2497 .queuecommand = ibmmca_queuecommand, 2363 .remove = __devexit_p(ibmmca_remove),
2498 .eh_abort_handler = ibmmca_abort, 2364 },
2499 .eh_host_reset_handler = ibmmca_host_reset,
2500 .bios_param = ibmmca_biosparam,
2501 .can_queue = 16,
2502 .this_id = 7,
2503 .sg_tablesize = 16,
2504 .cmd_per_lun = 1,
2505 .use_clustering = ENABLE_CLUSTERING,
2506}; 2365};
2507#include "scsi_module.c" 2366
2367static int __init ibmmca_init(void)
2368{
2369#ifdef MODULE
2370 /* If the driver is run as module, read from conf.modules or cmd-line */
2371 if (boot_options)
2372 option_setup(boot_options);
2373#endif
2374
2375 return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
2376}
2377
2378static void __exit ibmmca_exit(void)
2379{
2380 mca_unregister_driver(&ibmmca_driver);
2381}
2382
2383module_init(ibmmca_init);
2384module_exit(ibmmca_exit);
diff --git a/drivers/scsi/ibmmca.h b/drivers/scsi/ibmmca.h
deleted file mode 100644
index 017ee2fa6d63..000000000000
--- a/drivers/scsi/ibmmca.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Low Level Driver for the IBM Microchannel SCSI Subsystem
3 * (Headerfile, see Documentation/scsi/ibmmca.txt for description of the
4 * IBM MCA SCSI-driver.
5 * For use under the GNU General Public License within the Linux-kernel project.
6 * This include file works only correctly with kernel 2.4.0 or higher!!! */
7
8#ifndef _IBMMCA_H
9#define _IBMMCA_H
10
11/* Common forward declarations for all Linux-versions: */
12
13/* Interfaces to the midlevel Linux SCSI driver */
14static int ibmmca_detect (struct scsi_host_template *);
15static int ibmmca_release (struct Scsi_Host *);
16static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
17static int ibmmca_abort (Scsi_Cmnd *);
18static int ibmmca_host_reset (Scsi_Cmnd *);
19static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
20
21#endif /* _IBMMCA_H */
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index b10eefe735c5..5870866abc99 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -173,9 +173,8 @@ static void release_event_pool(struct event_pool *pool,
173 } 173 }
174 } 174 }
175 if (in_use) 175 if (in_use)
176 printk(KERN_WARNING 176 dev_warn(hostdata->dev, "releasing event pool with %d "
177 "ibmvscsi: releasing event pool with %d " 177 "events still in use?\n", in_use);
178 "events still in use?\n", in_use);
179 kfree(pool->events); 178 kfree(pool->events);
180 dma_free_coherent(hostdata->dev, 179 dma_free_coherent(hostdata->dev,
181 pool->size * sizeof(*pool->iu_storage), 180 pool->size * sizeof(*pool->iu_storage),
@@ -210,15 +209,13 @@ static void free_event_struct(struct event_pool *pool,
210 struct srp_event_struct *evt) 209 struct srp_event_struct *evt)
211{ 210{
212 if (!valid_event_struct(pool, evt)) { 211 if (!valid_event_struct(pool, evt)) {
213 printk(KERN_ERR 212 dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
214 "ibmvscsi: Freeing invalid event_struct %p " 213 "(not in pool %p)\n", evt, pool->events);
215 "(not in pool %p)\n", evt, pool->events);
216 return; 214 return;
217 } 215 }
218 if (atomic_inc_return(&evt->free) != 1) { 216 if (atomic_inc_return(&evt->free) != 1) {
219 printk(KERN_ERR 217 dev_err(evt->hostdata->dev, "Freeing event_struct %p "
220 "ibmvscsi: Freeing event_struct %p " 218 "which is not in use!\n", evt);
221 "which is not in use!\n", evt);
222 return; 219 return;
223 } 220 }
224} 221}
@@ -353,20 +350,19 @@ static void unmap_cmd_data(struct srp_cmd *cmd,
353 } 350 }
354} 351}
355 352
356static int map_sg_list(int num_entries, 353static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
357 struct scatterlist *sg,
358 struct srp_direct_buf *md) 354 struct srp_direct_buf *md)
359{ 355{
360 int i; 356 int i;
357 struct scatterlist *sg;
361 u64 total_length = 0; 358 u64 total_length = 0;
362 359
363 for (i = 0; i < num_entries; ++i) { 360 scsi_for_each_sg(cmd, sg, nseg, i) {
364 struct srp_direct_buf *descr = md + i; 361 struct srp_direct_buf *descr = md + i;
365 struct scatterlist *sg_entry = &sg[i]; 362 descr->va = sg_dma_address(sg);
366 descr->va = sg_dma_address(sg_entry); 363 descr->len = sg_dma_len(sg);
367 descr->len = sg_dma_len(sg_entry);
368 descr->key = 0; 364 descr->key = 0;
369 total_length += sg_dma_len(sg_entry); 365 total_length += sg_dma_len(sg);
370 } 366 }
371 return total_length; 367 return total_length;
372} 368}
@@ -387,40 +383,37 @@ static int map_sg_data(struct scsi_cmnd *cmd,
387 383
388 int sg_mapped; 384 int sg_mapped;
389 u64 total_length = 0; 385 u64 total_length = 0;
390 struct scatterlist *sg = cmd->request_buffer;
391 struct srp_direct_buf *data = 386 struct srp_direct_buf *data =
392 (struct srp_direct_buf *) srp_cmd->add_data; 387 (struct srp_direct_buf *) srp_cmd->add_data;
393 struct srp_indirect_buf *indirect = 388 struct srp_indirect_buf *indirect =
394 (struct srp_indirect_buf *) data; 389 (struct srp_indirect_buf *) data;
395 390
396 sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); 391 sg_mapped = scsi_dma_map(cmd);
397 392 if (!sg_mapped)
398 if (sg_mapped == 0) 393 return 1;
394 else if (sg_mapped < 0)
399 return 0; 395 return 0;
396 else if (sg_mapped > SG_ALL) {
397 printk(KERN_ERR
398 "ibmvscsi: More than %d mapped sg entries, got %d\n",
399 SG_ALL, sg_mapped);
400 return 0;
401 }
400 402
401 set_srp_direction(cmd, srp_cmd, sg_mapped); 403 set_srp_direction(cmd, srp_cmd, sg_mapped);
402 404
403 /* special case; we can use a single direct descriptor */ 405 /* special case; we can use a single direct descriptor */
404 if (sg_mapped == 1) { 406 if (sg_mapped == 1) {
405 data->va = sg_dma_address(&sg[0]); 407 map_sg_list(cmd, sg_mapped, data);
406 data->len = sg_dma_len(&sg[0]);
407 data->key = 0;
408 return 1; 408 return 1;
409 } 409 }
410 410
411 if (sg_mapped > SG_ALL) {
412 printk(KERN_ERR
413 "ibmvscsi: More than %d mapped sg entries, got %d\n",
414 SG_ALL, sg_mapped);
415 return 0;
416 }
417
418 indirect->table_desc.va = 0; 411 indirect->table_desc.va = 0;
419 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); 412 indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf);
420 indirect->table_desc.key = 0; 413 indirect->table_desc.key = 0;
421 414
422 if (sg_mapped <= MAX_INDIRECT_BUFS) { 415 if (sg_mapped <= MAX_INDIRECT_BUFS) {
423 total_length = map_sg_list(sg_mapped, sg, 416 total_length = map_sg_list(cmd, sg_mapped,
424 &indirect->desc_list[0]); 417 &indirect->desc_list[0]);
425 indirect->len = total_length; 418 indirect->len = total_length;
426 return 1; 419 return 1;
@@ -429,61 +422,27 @@ static int map_sg_data(struct scsi_cmnd *cmd,
429 /* get indirect table */ 422 /* get indirect table */
430 if (!evt_struct->ext_list) { 423 if (!evt_struct->ext_list) {
431 evt_struct->ext_list = (struct srp_direct_buf *) 424 evt_struct->ext_list = (struct srp_direct_buf *)
432 dma_alloc_coherent(dev, 425 dma_alloc_coherent(dev,
433 SG_ALL * sizeof(struct srp_direct_buf), 426 SG_ALL * sizeof(struct srp_direct_buf),
434 &evt_struct->ext_list_token, 0); 427 &evt_struct->ext_list_token, 0);
435 if (!evt_struct->ext_list) { 428 if (!evt_struct->ext_list) {
436 printk(KERN_ERR 429 sdev_printk(KERN_ERR, cmd->device,
437 "ibmvscsi: Can't allocate memory for indirect table\n"); 430 "Can't allocate memory for indirect table\n");
438 return 0; 431 return 0;
439
440 } 432 }
441 } 433 }
442 434
443 total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); 435 total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
444 436
445 indirect->len = total_length; 437 indirect->len = total_length;
446 indirect->table_desc.va = evt_struct->ext_list_token; 438 indirect->table_desc.va = evt_struct->ext_list_token;
447 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); 439 indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]);
448 memcpy(indirect->desc_list, evt_struct->ext_list, 440 memcpy(indirect->desc_list, evt_struct->ext_list,
449 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); 441 MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
450
451 return 1; 442 return 1;
452} 443}
453 444
454/** 445/**
455 * map_single_data: - Maps memory and initializes memory decriptor fields
456 * @cmd: struct scsi_cmnd with the memory to be mapped
457 * @srp_cmd: srp_cmd that contains the memory descriptor
458 * @dev: device for which to map dma memory
459 *
460 * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
461 * Returns 1 on success.
462*/
463static int map_single_data(struct scsi_cmnd *cmd,
464 struct srp_cmd *srp_cmd, struct device *dev)
465{
466 struct srp_direct_buf *data =
467 (struct srp_direct_buf *) srp_cmd->add_data;
468
469 data->va =
470 dma_map_single(dev, cmd->request_buffer,
471 cmd->request_bufflen,
472 DMA_BIDIRECTIONAL);
473 if (dma_mapping_error(data->va)) {
474 printk(KERN_ERR
475 "ibmvscsi: Unable to map request_buffer for command!\n");
476 return 0;
477 }
478 data->len = cmd->request_bufflen;
479 data->key = 0;
480
481 set_srp_direction(cmd, srp_cmd, 1);
482
483 return 1;
484}
485
486/**
487 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds 446 * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
488 * @cmd: struct scsi_cmnd with the memory to be mapped 447 * @cmd: struct scsi_cmnd with the memory to be mapped
489 * @srp_cmd: srp_cmd that contains the memory descriptor 448 * @srp_cmd: srp_cmd that contains the memory descriptor
@@ -503,23 +462,83 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
503 case DMA_NONE: 462 case DMA_NONE:
504 return 1; 463 return 1;
505 case DMA_BIDIRECTIONAL: 464 case DMA_BIDIRECTIONAL:
506 printk(KERN_ERR 465 sdev_printk(KERN_ERR, cmd->device,
507 "ibmvscsi: Can't map DMA_BIDIRECTIONAL to read/write\n"); 466 "Can't map DMA_BIDIRECTIONAL to read/write\n");
508 return 0; 467 return 0;
509 default: 468 default:
510 printk(KERN_ERR 469 sdev_printk(KERN_ERR, cmd->device,
511 "ibmvscsi: Unknown data direction 0x%02x; can't map!\n", 470 "Unknown data direction 0x%02x; can't map!\n",
512 cmd->sc_data_direction); 471 cmd->sc_data_direction);
513 return 0; 472 return 0;
514 } 473 }
515 474
516 if (!cmd->request_buffer) 475 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
517 return 1;
518 if (cmd->use_sg)
519 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
520 return map_single_data(cmd, srp_cmd, dev);
521} 476}
522 477
478/**
479 * purge_requests: Our virtual adapter just shut down. purge any sent requests
480 * @hostdata: the adapter
481 */
482static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
483{
484 struct srp_event_struct *tmp_evt, *pos;
485 unsigned long flags;
486
487 spin_lock_irqsave(hostdata->host->host_lock, flags);
488 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
489 list_del(&tmp_evt->list);
490 del_timer(&tmp_evt->timer);
491 if (tmp_evt->cmnd) {
492 tmp_evt->cmnd->result = (error_code << 16);
493 unmap_cmd_data(&tmp_evt->iu.srp.cmd,
494 tmp_evt,
495 tmp_evt->hostdata->dev);
496 if (tmp_evt->cmnd_done)
497 tmp_evt->cmnd_done(tmp_evt->cmnd);
498 } else if (tmp_evt->done)
499 tmp_evt->done(tmp_evt);
500 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
501 }
502 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
503}
504
505/**
506 * ibmvscsi_reset_host - Reset the connection to the server
507 * @hostdata: struct ibmvscsi_host_data to reset
508*/
509static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
510{
511 scsi_block_requests(hostdata->host);
512 atomic_set(&hostdata->request_limit, 0);
513
514 purge_requests(hostdata, DID_ERROR);
515 if ((ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata)) ||
516 (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0)) ||
517 (vio_enable_interrupts(to_vio_dev(hostdata->dev)))) {
518 atomic_set(&hostdata->request_limit, -1);
519 dev_err(hostdata->dev, "error after reset\n");
520 }
521
522 scsi_unblock_requests(hostdata->host);
523}
524
525/**
526 * ibmvscsi_timeout - Internal command timeout handler
527 * @evt_struct: struct srp_event_struct that timed out
528 *
529 * Called when an internally generated command times out
530*/
531static void ibmvscsi_timeout(struct srp_event_struct *evt_struct)
532{
533 struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
534
535 dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
536 evt_struct->iu.srp.cmd.opcode);
537
538 ibmvscsi_reset_host(hostdata);
539}
540
541
523/* ------------------------------------------------------------ 542/* ------------------------------------------------------------
524 * Routines for sending and receiving SRPs 543 * Routines for sending and receiving SRPs
525 */ 544 */
@@ -527,12 +546,14 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
527 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq() 546 * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
528 * @evt_struct: evt_struct to be sent 547 * @evt_struct: evt_struct to be sent
529 * @hostdata: ibmvscsi_host_data of host 548 * @hostdata: ibmvscsi_host_data of host
549 * @timeout: timeout in seconds - 0 means do not time command
530 * 550 *
531 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success) 551 * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
532 * Note that this routine assumes that host_lock is held for synchronization 552 * Note that this routine assumes that host_lock is held for synchronization
533*/ 553*/
534static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, 554static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
535 struct ibmvscsi_host_data *hostdata) 555 struct ibmvscsi_host_data *hostdata,
556 unsigned long timeout)
536{ 557{
537 u64 *crq_as_u64 = (u64 *) &evt_struct->crq; 558 u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
538 int request_status; 559 int request_status;
@@ -588,12 +609,20 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
588 */ 609 */
589 list_add_tail(&evt_struct->list, &hostdata->sent); 610 list_add_tail(&evt_struct->list, &hostdata->sent);
590 611
612 init_timer(&evt_struct->timer);
613 if (timeout) {
614 evt_struct->timer.data = (unsigned long) evt_struct;
615 evt_struct->timer.expires = jiffies + (timeout * HZ);
616 evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout;
617 add_timer(&evt_struct->timer);
618 }
619
591 if ((rc = 620 if ((rc =
592 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { 621 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
593 list_del(&evt_struct->list); 622 list_del(&evt_struct->list);
623 del_timer(&evt_struct->timer);
594 624
595 printk(KERN_ERR "ibmvscsi: send error %d\n", 625 dev_err(hostdata->dev, "send error %d\n", rc);
596 rc);
597 atomic_inc(&hostdata->request_limit); 626 atomic_inc(&hostdata->request_limit);
598 goto send_error; 627 goto send_error;
599 } 628 }
@@ -634,9 +663,8 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
634 663
635 if (unlikely(rsp->opcode != SRP_RSP)) { 664 if (unlikely(rsp->opcode != SRP_RSP)) {
636 if (printk_ratelimit()) 665 if (printk_ratelimit())
637 printk(KERN_WARNING 666 dev_warn(evt_struct->hostdata->dev,
638 "ibmvscsi: bad SRP RSP type %d\n", 667 "bad SRP RSP type %d\n", rsp->opcode);
639 rsp->opcode);
640 } 668 }
641 669
642 if (cmnd) { 670 if (cmnd) {
@@ -650,9 +678,9 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
650 evt_struct->hostdata->dev); 678 evt_struct->hostdata->dev);
651 679
652 if (rsp->flags & SRP_RSP_FLAG_DOOVER) 680 if (rsp->flags & SRP_RSP_FLAG_DOOVER)
653 cmnd->resid = rsp->data_out_res_cnt; 681 scsi_set_resid(cmnd, rsp->data_out_res_cnt);
654 else if (rsp->flags & SRP_RSP_FLAG_DIOVER) 682 else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
655 cmnd->resid = rsp->data_in_res_cnt; 683 scsi_set_resid(cmnd, rsp->data_in_res_cnt);
656 } 684 }
657 685
658 if (evt_struct->cmnd_done) 686 if (evt_struct->cmnd_done)
@@ -697,7 +725,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
697 srp_cmd->lun = ((u64) lun) << 48; 725 srp_cmd->lun = ((u64) lun) << 48;
698 726
699 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { 727 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
700 printk(KERN_ERR "ibmvscsi: couldn't convert cmd to srp_cmd\n"); 728 sdev_printk(KERN_ERR, cmnd->device, "couldn't convert cmd to srp_cmd\n");
701 free_event_struct(&hostdata->pool, evt_struct); 729 free_event_struct(&hostdata->pool, evt_struct);
702 return SCSI_MLQUEUE_HOST_BUSY; 730 return SCSI_MLQUEUE_HOST_BUSY;
703 } 731 }
@@ -722,7 +750,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
722 offsetof(struct srp_indirect_buf, desc_list); 750 offsetof(struct srp_indirect_buf, desc_list);
723 } 751 }
724 752
725 return ibmvscsi_send_srp_event(evt_struct, hostdata); 753 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
726} 754}
727 755
728/* ------------------------------------------------------------ 756/* ------------------------------------------------------------
@@ -744,16 +772,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
744 DMA_BIDIRECTIONAL); 772 DMA_BIDIRECTIONAL);
745 773
746 if (evt_struct->xfer_iu->mad.adapter_info.common.status) { 774 if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
747 printk("ibmvscsi: error %d getting adapter info\n", 775 dev_err(hostdata->dev, "error %d getting adapter info\n",
748 evt_struct->xfer_iu->mad.adapter_info.common.status); 776 evt_struct->xfer_iu->mad.adapter_info.common.status);
749 } else { 777 } else {
750 printk("ibmvscsi: host srp version: %s, " 778 dev_info(hostdata->dev, "host srp version: %s, "
751 "host partition %s (%d), OS %d, max io %u\n", 779 "host partition %s (%d), OS %d, max io %u\n",
752 hostdata->madapter_info.srp_version, 780 hostdata->madapter_info.srp_version,
753 hostdata->madapter_info.partition_name, 781 hostdata->madapter_info.partition_name,
754 hostdata->madapter_info.partition_number, 782 hostdata->madapter_info.partition_number,
755 hostdata->madapter_info.os_type, 783 hostdata->madapter_info.os_type,
756 hostdata->madapter_info.port_max_txu[0]); 784 hostdata->madapter_info.port_max_txu[0]);
757 785
758 if (hostdata->madapter_info.port_max_txu[0]) 786 if (hostdata->madapter_info.port_max_txu[0])
759 hostdata->host->max_sectors = 787 hostdata->host->max_sectors =
@@ -761,11 +789,10 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
761 789
762 if (hostdata->madapter_info.os_type == 3 && 790 if (hostdata->madapter_info.os_type == 3 &&
763 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { 791 strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
764 printk("ibmvscsi: host (Ver. %s) doesn't support large" 792 dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
765 "transfers\n", 793 hostdata->madapter_info.srp_version);
766 hostdata->madapter_info.srp_version); 794 dev_err(hostdata->dev, "limiting scatterlists to %d\n",
767 printk("ibmvscsi: limiting scatterlists to %d\n", 795 MAX_INDIRECT_BUFS);
768 MAX_INDIRECT_BUFS);
769 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; 796 hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
770 } 797 }
771 } 798 }
@@ -784,19 +811,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
784{ 811{
785 struct viosrp_adapter_info *req; 812 struct viosrp_adapter_info *req;
786 struct srp_event_struct *evt_struct; 813 struct srp_event_struct *evt_struct;
814 unsigned long flags;
787 dma_addr_t addr; 815 dma_addr_t addr;
788 816
789 evt_struct = get_event_struct(&hostdata->pool); 817 evt_struct = get_event_struct(&hostdata->pool);
790 if (!evt_struct) { 818 if (!evt_struct) {
791 printk(KERN_ERR "ibmvscsi: couldn't allocate an event " 819 dev_err(hostdata->dev,
792 "for ADAPTER_INFO_REQ!\n"); 820 "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
793 return; 821 return;
794 } 822 }
795 823
796 init_event_struct(evt_struct, 824 init_event_struct(evt_struct,
797 adapter_info_rsp, 825 adapter_info_rsp,
798 VIOSRP_MAD_FORMAT, 826 VIOSRP_MAD_FORMAT,
799 init_timeout * HZ); 827 init_timeout);
800 828
801 req = &evt_struct->iu.mad.adapter_info; 829 req = &evt_struct->iu.mad.adapter_info;
802 memset(req, 0x00, sizeof(*req)); 830 memset(req, 0x00, sizeof(*req));
@@ -809,20 +837,20 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
809 DMA_BIDIRECTIONAL); 837 DMA_BIDIRECTIONAL);
810 838
811 if (dma_mapping_error(req->buffer)) { 839 if (dma_mapping_error(req->buffer)) {
812 printk(KERN_ERR 840 dev_err(hostdata->dev, "Unable to map request_buffer for adapter_info!\n");
813 "ibmvscsi: Unable to map request_buffer "
814 "for adapter_info!\n");
815 free_event_struct(&hostdata->pool, evt_struct); 841 free_event_struct(&hostdata->pool, evt_struct);
816 return; 842 return;
817 } 843 }
818 844
819 if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { 845 spin_lock_irqsave(hostdata->host->host_lock, flags);
820 printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); 846 if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
847 dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
821 dma_unmap_single(hostdata->dev, 848 dma_unmap_single(hostdata->dev,
822 addr, 849 addr,
823 sizeof(hostdata->madapter_info), 850 sizeof(hostdata->madapter_info),
824 DMA_BIDIRECTIONAL); 851 DMA_BIDIRECTIONAL);
825 } 852 }
853 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
826}; 854};
827 855
828/** 856/**
@@ -839,24 +867,23 @@ static void login_rsp(struct srp_event_struct *evt_struct)
839 case SRP_LOGIN_RSP: /* it worked! */ 867 case SRP_LOGIN_RSP: /* it worked! */
840 break; 868 break;
841 case SRP_LOGIN_REJ: /* refused! */ 869 case SRP_LOGIN_REJ: /* refused! */
842 printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", 870 dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
843 evt_struct->xfer_iu->srp.login_rej.reason); 871 evt_struct->xfer_iu->srp.login_rej.reason);
844 /* Login failed. */ 872 /* Login failed. */
845 atomic_set(&hostdata->request_limit, -1); 873 atomic_set(&hostdata->request_limit, -1);
846 return; 874 return;
847 default: 875 default:
848 printk(KERN_ERR 876 dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
849 "ibmvscsi: Invalid login response typecode 0x%02x!\n", 877 evt_struct->xfer_iu->srp.login_rsp.opcode);
850 evt_struct->xfer_iu->srp.login_rsp.opcode);
851 /* Login failed. */ 878 /* Login failed. */
852 atomic_set(&hostdata->request_limit, -1); 879 atomic_set(&hostdata->request_limit, -1);
853 return; 880 return;
854 } 881 }
855 882
856 printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); 883 dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
857 884
858 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) 885 if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
859 printk(KERN_ERR "ibmvscsi: Invalid request_limit.\n"); 886 dev_err(hostdata->dev, "Invalid request_limit.\n");
860 887
861 /* Now we know what the real request-limit is. 888 /* Now we know what the real request-limit is.
862 * This value is set rather than added to request_limit because 889 * This value is set rather than added to request_limit because
@@ -885,15 +912,14 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
885 struct srp_login_req *login; 912 struct srp_login_req *login;
886 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); 913 struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
887 if (!evt_struct) { 914 if (!evt_struct) {
888 printk(KERN_ERR 915 dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
889 "ibmvscsi: couldn't allocate an event for login req!\n");
890 return FAILED; 916 return FAILED;
891 } 917 }
892 918
893 init_event_struct(evt_struct, 919 init_event_struct(evt_struct,
894 login_rsp, 920 login_rsp,
895 VIOSRP_SRP_FORMAT, 921 VIOSRP_SRP_FORMAT,
896 init_timeout * HZ); 922 init_timeout);
897 923
898 login = &evt_struct->iu.srp.login_req; 924 login = &evt_struct->iu.srp.login_req;
899 memset(login, 0x00, sizeof(struct srp_login_req)); 925 memset(login, 0x00, sizeof(struct srp_login_req));
@@ -907,9 +933,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
907 */ 933 */
908 atomic_set(&hostdata->request_limit, 1); 934 atomic_set(&hostdata->request_limit, 1);
909 935
910 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 936 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
911 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 937 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
912 printk("ibmvscsic: sent SRP login\n"); 938 dev_info(hostdata->dev, "sent SRP login\n");
913 return rc; 939 return rc;
914}; 940};
915 941
@@ -958,20 +984,20 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
958 984
959 if (!found_evt) { 985 if (!found_evt) {
960 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 986 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
961 return FAILED; 987 return SUCCESS;
962 } 988 }
963 989
964 evt = get_event_struct(&hostdata->pool); 990 evt = get_event_struct(&hostdata->pool);
965 if (evt == NULL) { 991 if (evt == NULL) {
966 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 992 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
967 printk(KERN_ERR "ibmvscsi: failed to allocate abort event\n"); 993 sdev_printk(KERN_ERR, cmd->device, "failed to allocate abort event\n");
968 return FAILED; 994 return FAILED;
969 } 995 }
970 996
971 init_event_struct(evt, 997 init_event_struct(evt,
972 sync_completion, 998 sync_completion,
973 VIOSRP_SRP_FORMAT, 999 VIOSRP_SRP_FORMAT,
974 init_timeout * HZ); 1000 init_timeout);
975 1001
976 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1002 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
977 1003
@@ -982,15 +1008,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
982 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; 1008 tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
983 tsk_mgmt->task_tag = (u64) found_evt; 1009 tsk_mgmt->task_tag = (u64) found_evt;
984 1010
985 printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", 1011 sdev_printk(KERN_INFO, cmd->device, "aborting command. lun 0x%lx, tag 0x%lx\n",
986 tsk_mgmt->lun, tsk_mgmt->task_tag); 1012 tsk_mgmt->lun, tsk_mgmt->task_tag);
987 1013
988 evt->sync_srp = &srp_rsp; 1014 evt->sync_srp = &srp_rsp;
989 init_completion(&evt->comp); 1015 init_completion(&evt->comp);
990 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 1016 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
991 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1017 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
992 if (rsp_rc != 0) { 1018 if (rsp_rc != 0) {
993 printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); 1019 sdev_printk(KERN_ERR, cmd->device,
1020 "failed to send abort() event. rc=%d\n", rsp_rc);
994 return FAILED; 1021 return FAILED;
995 } 1022 }
996 1023
@@ -999,9 +1026,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
999 /* make sure we got a good response */ 1026 /* make sure we got a good response */
1000 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 1027 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1001 if (printk_ratelimit()) 1028 if (printk_ratelimit())
1002 printk(KERN_WARNING 1029 sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
1003 "ibmvscsi: abort bad SRP RSP type %d\n", 1030 srp_rsp.srp.rsp.opcode);
1004 srp_rsp.srp.rsp.opcode);
1005 return FAILED; 1031 return FAILED;
1006 } 1032 }
1007 1033
@@ -1012,10 +1038,9 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1012 1038
1013 if (rsp_rc) { 1039 if (rsp_rc) {
1014 if (printk_ratelimit()) 1040 if (printk_ratelimit())
1015 printk(KERN_WARNING 1041 sdev_printk(KERN_WARNING, cmd->device,
1016 "ibmvscsi: abort code %d for task tag 0x%lx\n", 1042 "abort code %d for task tag 0x%lx\n",
1017 rsp_rc, 1043 rsp_rc, tsk_mgmt->task_tag);
1018 tsk_mgmt->task_tag);
1019 return FAILED; 1044 return FAILED;
1020 } 1045 }
1021 1046
@@ -1034,15 +1059,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
1034 1059
1035 if (found_evt == NULL) { 1060 if (found_evt == NULL) {
1036 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1061 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1037 printk(KERN_INFO 1062 sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n",
1038 "ibmvscsi: aborted task tag 0x%lx completed\n", 1063 tsk_mgmt->task_tag);
1039 tsk_mgmt->task_tag);
1040 return SUCCESS; 1064 return SUCCESS;
1041 } 1065 }
1042 1066
1043 printk(KERN_INFO 1067 sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n",
1044 "ibmvscsi: successfully aborted task tag 0x%lx\n", 1068 tsk_mgmt->task_tag);
1045 tsk_mgmt->task_tag);
1046 1069
1047 cmd->result = (DID_ABORT << 16); 1070 cmd->result = (DID_ABORT << 16);
1048 list_del(&found_evt->list); 1071 list_del(&found_evt->list);
@@ -1076,14 +1099,14 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1076 evt = get_event_struct(&hostdata->pool); 1099 evt = get_event_struct(&hostdata->pool);
1077 if (evt == NULL) { 1100 if (evt == NULL) {
1078 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1101 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1079 printk(KERN_ERR "ibmvscsi: failed to allocate reset event\n"); 1102 sdev_printk(KERN_ERR, cmd->device, "failed to allocate reset event\n");
1080 return FAILED; 1103 return FAILED;
1081 } 1104 }
1082 1105
1083 init_event_struct(evt, 1106 init_event_struct(evt,
1084 sync_completion, 1107 sync_completion,
1085 VIOSRP_SRP_FORMAT, 1108 VIOSRP_SRP_FORMAT,
1086 init_timeout * HZ); 1109 init_timeout);
1087 1110
1088 tsk_mgmt = &evt->iu.srp.tsk_mgmt; 1111 tsk_mgmt = &evt->iu.srp.tsk_mgmt;
1089 1112
@@ -1093,15 +1116,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1093 tsk_mgmt->lun = ((u64) lun) << 48; 1116 tsk_mgmt->lun = ((u64) lun) << 48;
1094 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; 1117 tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
1095 1118
1096 printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", 1119 sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n",
1097 tsk_mgmt->lun); 1120 tsk_mgmt->lun);
1098 1121
1099 evt->sync_srp = &srp_rsp; 1122 evt->sync_srp = &srp_rsp;
1100 init_completion(&evt->comp); 1123 init_completion(&evt->comp);
1101 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata); 1124 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
1102 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1125 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1103 if (rsp_rc != 0) { 1126 if (rsp_rc != 0) {
1104 printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); 1127 sdev_printk(KERN_ERR, cmd->device,
1128 "failed to send reset event. rc=%d\n", rsp_rc);
1105 return FAILED; 1129 return FAILED;
1106 } 1130 }
1107 1131
@@ -1110,9 +1134,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1110 /* make sure we got a good response */ 1134 /* make sure we got a good response */
1111 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { 1135 if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
1112 if (printk_ratelimit()) 1136 if (printk_ratelimit())
1113 printk(KERN_WARNING 1137 sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
1114 "ibmvscsi: reset bad SRP RSP type %d\n", 1138 srp_rsp.srp.rsp.opcode);
1115 srp_rsp.srp.rsp.opcode);
1116 return FAILED; 1139 return FAILED;
1117 } 1140 }
1118 1141
@@ -1123,9 +1146,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1123 1146
1124 if (rsp_rc) { 1147 if (rsp_rc) {
1125 if (printk_ratelimit()) 1148 if (printk_ratelimit())
1126 printk(KERN_WARNING 1149 sdev_printk(KERN_WARNING, cmd->device,
1127 "ibmvscsi: reset code %d for task tag 0x%lx\n", 1150 "reset code %d for task tag 0x%lx\n",
1128 rsp_rc, tsk_mgmt->task_tag); 1151 rsp_rc, tsk_mgmt->task_tag);
1129 return FAILED; 1152 return FAILED;
1130 } 1153 }
1131 1154
@@ -1154,32 +1177,30 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
1154} 1177}
1155 1178
1156/** 1179/**
1157 * purge_requests: Our virtual adapter just shut down. purge any sent requests 1180 * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
1158 * @hostdata: the adapter 1181 * @cmd: struct scsi_cmnd having problems
1159 */ 1182*/
1160static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) 1183static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
1161{ 1184{
1162 struct srp_event_struct *tmp_evt, *pos; 1185 unsigned long wait_switch = 0;
1163 unsigned long flags; 1186 struct ibmvscsi_host_data *hostdata =
1187 (struct ibmvscsi_host_data *)cmd->device->host->hostdata;
1164 1188
1165 spin_lock_irqsave(hostdata->host->host_lock, flags); 1189 dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
1166 list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) { 1190
1167 list_del(&tmp_evt->list); 1191 ibmvscsi_reset_host(hostdata);
1168 if (tmp_evt->cmnd) { 1192
1169 tmp_evt->cmnd->result = (error_code << 16); 1193 for (wait_switch = jiffies + (init_timeout * HZ);
1170 unmap_cmd_data(&tmp_evt->iu.srp.cmd, 1194 time_before(jiffies, wait_switch) &&
1171 tmp_evt, 1195 atomic_read(&hostdata->request_limit) < 2;) {
1172 tmp_evt->hostdata->dev); 1196
1173 if (tmp_evt->cmnd_done) 1197 msleep(10);
1174 tmp_evt->cmnd_done(tmp_evt->cmnd);
1175 } else {
1176 if (tmp_evt->done) {
1177 tmp_evt->done(tmp_evt);
1178 }
1179 }
1180 free_event_struct(&tmp_evt->hostdata->pool, tmp_evt);
1181 } 1198 }
1182 spin_unlock_irqrestore(hostdata->host->host_lock, flags); 1199
1200 if (atomic_read(&hostdata->request_limit) <= 0)
1201 return FAILED;
1202
1203 return SUCCESS;
1183} 1204}
1184 1205
1185/** 1206/**
@@ -1191,6 +1212,7 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
1191void ibmvscsi_handle_crq(struct viosrp_crq *crq, 1212void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1192 struct ibmvscsi_host_data *hostdata) 1213 struct ibmvscsi_host_data *hostdata)
1193{ 1214{
1215 long rc;
1194 unsigned long flags; 1216 unsigned long flags;
1195 struct srp_event_struct *evt_struct = 1217 struct srp_event_struct *evt_struct =
1196 (struct srp_event_struct *)crq->IU_data_ptr; 1218 (struct srp_event_struct *)crq->IU_data_ptr;
@@ -1198,27 +1220,25 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1198 case 0xC0: /* initialization */ 1220 case 0xC0: /* initialization */
1199 switch (crq->format) { 1221 switch (crq->format) {
1200 case 0x01: /* Initialization message */ 1222 case 0x01: /* Initialization message */
1201 printk(KERN_INFO "ibmvscsi: partner initialized\n"); 1223 dev_info(hostdata->dev, "partner initialized\n");
1202 /* Send back a response */ 1224 /* Send back a response */
1203 if (ibmvscsi_send_crq(hostdata, 1225 if ((rc = ibmvscsi_send_crq(hostdata,
1204 0xC002000000000000LL, 0) == 0) { 1226 0xC002000000000000LL, 0)) == 0) {
1205 /* Now login */ 1227 /* Now login */
1206 send_srp_login(hostdata); 1228 send_srp_login(hostdata);
1207 } else { 1229 } else {
1208 printk(KERN_ERR 1230 dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
1209 "ibmvscsi: Unable to send init rsp\n");
1210 } 1231 }
1211 1232
1212 break; 1233 break;
1213 case 0x02: /* Initialization response */ 1234 case 0x02: /* Initialization response */
1214 printk(KERN_INFO 1235 dev_info(hostdata->dev, "partner initialization complete\n");
1215 "ibmvscsi: partner initialization complete\n");
1216 1236
1217 /* Now login */ 1237 /* Now login */
1218 send_srp_login(hostdata); 1238 send_srp_login(hostdata);
1219 break; 1239 break;
1220 default: 1240 default:
1221 printk(KERN_ERR "ibmvscsi: unknown crq message type\n"); 1241 dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
1222 } 1242 }
1223 return; 1243 return;
1224 case 0xFF: /* Hypervisor telling us the connection is closed */ 1244 case 0xFF: /* Hypervisor telling us the connection is closed */
@@ -1226,8 +1246,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1226 atomic_set(&hostdata->request_limit, 0); 1246 atomic_set(&hostdata->request_limit, 0);
1227 if (crq->format == 0x06) { 1247 if (crq->format == 0x06) {
1228 /* We need to re-setup the interpartition connection */ 1248 /* We need to re-setup the interpartition connection */
1229 printk(KERN_INFO 1249 dev_info(hostdata->dev, "Re-enabling adapter!\n");
1230 "ibmvscsi: Re-enabling adapter!\n");
1231 purge_requests(hostdata, DID_REQUEUE); 1250 purge_requests(hostdata, DID_REQUEUE);
1232 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, 1251 if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
1233 hostdata)) || 1252 hostdata)) ||
@@ -1235,14 +1254,11 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1235 0xC001000000000000LL, 0))) { 1254 0xC001000000000000LL, 0))) {
1236 atomic_set(&hostdata->request_limit, 1255 atomic_set(&hostdata->request_limit,
1237 -1); 1256 -1);
1238 printk(KERN_ERR 1257 dev_err(hostdata->dev, "error after enable\n");
1239 "ibmvscsi: error after"
1240 " enable\n");
1241 } 1258 }
1242 } else { 1259 } else {
1243 printk(KERN_INFO 1260 dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
1244 "ibmvscsi: Virtual adapter failed rc %d!\n", 1261 crq->format);
1245 crq->format);
1246 1262
1247 purge_requests(hostdata, DID_ERROR); 1263 purge_requests(hostdata, DID_ERROR);
1248 if ((ibmvscsi_reset_crq_queue(&hostdata->queue, 1264 if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
@@ -1251,8 +1267,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1251 0xC001000000000000LL, 0))) { 1267 0xC001000000000000LL, 0))) {
1252 atomic_set(&hostdata->request_limit, 1268 atomic_set(&hostdata->request_limit,
1253 -1); 1269 -1);
1254 printk(KERN_ERR 1270 dev_err(hostdata->dev, "error after reset\n");
1255 "ibmvscsi: error after reset\n");
1256 } 1271 }
1257 } 1272 }
1258 scsi_unblock_requests(hostdata->host); 1273 scsi_unblock_requests(hostdata->host);
@@ -1260,9 +1275,8 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1260 case 0x80: /* real payload */ 1275 case 0x80: /* real payload */
1261 break; 1276 break;
1262 default: 1277 default:
1263 printk(KERN_ERR 1278 dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
1264 "ibmvscsi: got an invalid message type 0x%02x\n", 1279 crq->valid);
1265 crq->valid);
1266 return; 1280 return;
1267 } 1281 }
1268 1282
@@ -1271,16 +1285,14 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1271 * actually sent 1285 * actually sent
1272 */ 1286 */
1273 if (!valid_event_struct(&hostdata->pool, evt_struct)) { 1287 if (!valid_event_struct(&hostdata->pool, evt_struct)) {
1274 printk(KERN_ERR 1288 dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
1275 "ibmvscsi: returned correlation_token 0x%p is invalid!\n",
1276 (void *)crq->IU_data_ptr); 1289 (void *)crq->IU_data_ptr);
1277 return; 1290 return;
1278 } 1291 }
1279 1292
1280 if (atomic_read(&evt_struct->free)) { 1293 if (atomic_read(&evt_struct->free)) {
1281 printk(KERN_ERR 1294 dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
1282 "ibmvscsi: received duplicate correlation_token 0x%p!\n", 1295 (void *)crq->IU_data_ptr);
1283 (void *)crq->IU_data_ptr);
1284 return; 1296 return;
1285 } 1297 }
1286 1298
@@ -1288,11 +1300,12 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
1288 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, 1300 atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta,
1289 &hostdata->request_limit); 1301 &hostdata->request_limit);
1290 1302
1303 del_timer(&evt_struct->timer);
1304
1291 if (evt_struct->done) 1305 if (evt_struct->done)
1292 evt_struct->done(evt_struct); 1306 evt_struct->done(evt_struct);
1293 else 1307 else
1294 printk(KERN_ERR 1308 dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
1295 "ibmvscsi: returned done() is NULL; not running it!\n");
1296 1309
1297 /* 1310 /*
1298 * Lock the host_lock before messing with these structures, since we 1311 * Lock the host_lock before messing with these structures, since we
@@ -1313,20 +1326,20 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1313{ 1326{
1314 struct viosrp_host_config *host_config; 1327 struct viosrp_host_config *host_config;
1315 struct srp_event_struct *evt_struct; 1328 struct srp_event_struct *evt_struct;
1329 unsigned long flags;
1316 dma_addr_t addr; 1330 dma_addr_t addr;
1317 int rc; 1331 int rc;
1318 1332
1319 evt_struct = get_event_struct(&hostdata->pool); 1333 evt_struct = get_event_struct(&hostdata->pool);
1320 if (!evt_struct) { 1334 if (!evt_struct) {
1321 printk(KERN_ERR 1335 dev_err(hostdata->dev, "couldn't allocate event for HOST_CONFIG!\n");
1322 "ibmvscsi: could't allocate event for HOST_CONFIG!\n");
1323 return -1; 1336 return -1;
1324 } 1337 }
1325 1338
1326 init_event_struct(evt_struct, 1339 init_event_struct(evt_struct,
1327 sync_completion, 1340 sync_completion,
1328 VIOSRP_MAD_FORMAT, 1341 VIOSRP_MAD_FORMAT,
1329 init_timeout * HZ); 1342 init_timeout);
1330 1343
1331 host_config = &evt_struct->iu.mad.host_config; 1344 host_config = &evt_struct->iu.mad.host_config;
1332 1345
@@ -1339,14 +1352,15 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1339 DMA_BIDIRECTIONAL); 1352 DMA_BIDIRECTIONAL);
1340 1353
1341 if (dma_mapping_error(host_config->buffer)) { 1354 if (dma_mapping_error(host_config->buffer)) {
1342 printk(KERN_ERR 1355 dev_err(hostdata->dev, "dma_mapping error getting host config\n");
1343 "ibmvscsi: dma_mapping error " "getting host config\n");
1344 free_event_struct(&hostdata->pool, evt_struct); 1356 free_event_struct(&hostdata->pool, evt_struct);
1345 return -1; 1357 return -1;
1346 } 1358 }
1347 1359
1348 init_completion(&evt_struct->comp); 1360 init_completion(&evt_struct->comp);
1349 rc = ibmvscsi_send_srp_event(evt_struct, hostdata); 1361 spin_lock_irqsave(hostdata->host->host_lock, flags);
1362 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
1363 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
1350 if (rc == 0) 1364 if (rc == 0)
1351 wait_for_completion(&evt_struct->comp); 1365 wait_for_completion(&evt_struct->comp);
1352 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); 1366 dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL);
@@ -1375,6 +1389,23 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1375 return 0; 1389 return 0;
1376} 1390}
1377 1391
1392/**
1393 * ibmvscsi_change_queue_depth - Change the device's queue depth
1394 * @sdev: scsi device struct
1395 * @qdepth: depth to set
1396 *
1397 * Return value:
1398 * actual depth set
1399 **/
1400static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1401{
1402 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1403 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1404
1405 scsi_adjust_queue_depth(sdev, 0, qdepth);
1406 return sdev->queue_depth;
1407}
1408
1378/* ------------------------------------------------------------ 1409/* ------------------------------------------------------------
1379 * sysfs attributes 1410 * sysfs attributes
1380 */ 1411 */
@@ -1520,7 +1551,9 @@ static struct scsi_host_template driver_template = {
1520 .queuecommand = ibmvscsi_queuecommand, 1551 .queuecommand = ibmvscsi_queuecommand,
1521 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1552 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1522 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1553 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1554 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
1523 .slave_configure = ibmvscsi_slave_configure, 1555 .slave_configure = ibmvscsi_slave_configure,
1556 .change_queue_depth = ibmvscsi_change_queue_depth,
1524 .cmd_per_lun = 16, 1557 .cmd_per_lun = 16,
1525 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, 1558 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1526 .this_id = -1, 1559 .this_id = -1,
@@ -1545,7 +1578,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1545 driver_template.can_queue = max_requests; 1578 driver_template.can_queue = max_requests;
1546 host = scsi_host_alloc(&driver_template, sizeof(*hostdata)); 1579 host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
1547 if (!host) { 1580 if (!host) {
1548 printk(KERN_ERR "ibmvscsi: couldn't allocate host data\n"); 1581 dev_err(&vdev->dev, "couldn't allocate host data\n");
1549 goto scsi_host_alloc_failed; 1582 goto scsi_host_alloc_failed;
1550 } 1583 }
1551 1584
@@ -1559,11 +1592,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1559 1592
1560 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); 1593 rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
1561 if (rc != 0 && rc != H_RESOURCE) { 1594 if (rc != 0 && rc != H_RESOURCE) {
1562 printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); 1595 dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
1563 goto init_crq_failed; 1596 goto init_crq_failed;
1564 } 1597 }
1565 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) { 1598 if (initialize_event_pool(&hostdata->pool, max_requests, hostdata) != 0) {
1566 printk(KERN_ERR "ibmvscsi: couldn't initialize event pool\n"); 1599 dev_err(&vdev->dev, "couldn't initialize event pool\n");
1567 goto init_pool_failed; 1600 goto init_pool_failed;
1568 } 1601 }
1569 1602
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 77cc1d40f5bb..b19c2e26c2a5 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -45,6 +45,7 @@ struct Scsi_Host;
45#define MAX_INDIRECT_BUFS 10 45#define MAX_INDIRECT_BUFS 10
46 46
47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100 47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
48#define IBMVSCSI_MAX_CMDS_PER_LUN 64
48 49
49/* ------------------------------------------------------------ 50/* ------------------------------------------------------------
50 * Data Structures 51 * Data Structures
@@ -69,6 +70,7 @@ struct srp_event_struct {
69 union viosrp_iu iu; 70 union viosrp_iu iu;
70 void (*cmnd_done) (struct scsi_cmnd *); 71 void (*cmnd_done) (struct scsi_cmnd *);
71 struct completion comp; 72 struct completion comp;
73 struct timer_list timer;
72 union viosrp_iu *sync_srp; 74 union viosrp_iu *sync_srp;
73 struct srp_direct_buf *ext_list; 75 struct srp_direct_buf *ext_list;
74 dma_addr_t ext_list_token; 76 dma_addr_t ext_list_token;
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index d8700aaa6114..9c14e789df5f 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -177,7 +177,7 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
177 memset(&hostdata->madapter_info, 0x00, 177 memset(&hostdata->madapter_info, 0x00,
178 sizeof(hostdata->madapter_info)); 178 sizeof(hostdata->madapter_info));
179 179
180 printk(KERN_INFO "rpa_vscsi: SPR_VERSION: %s\n", SRP_VERSION); 180 dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
181 strcpy(hostdata->madapter_info.srp_version, SRP_VERSION); 181 strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
182 182
183 strncpy(hostdata->madapter_info.partition_name, partition_name, 183 strncpy(hostdata->madapter_info.partition_name, partition_name,
@@ -232,25 +232,24 @@ int ibmvscsi_init_crq_queue(struct crq_queue *queue,
232 232
233 if (rc == 2) { 233 if (rc == 2) {
234 /* Adapter is good, but other end is not ready */ 234 /* Adapter is good, but other end is not ready */
235 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 235 dev_warn(hostdata->dev, "Partner adapter not ready\n");
236 retrc = 0; 236 retrc = 0;
237 } else if (rc != 0) { 237 } else if (rc != 0) {
238 printk(KERN_WARNING "ibmvscsi: Error %d opening adapter\n", rc); 238 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
239 goto reg_crq_failed; 239 goto reg_crq_failed;
240 } 240 }
241 241
242 if (request_irq(vdev->irq, 242 if (request_irq(vdev->irq,
243 ibmvscsi_handle_event, 243 ibmvscsi_handle_event,
244 0, "ibmvscsi", (void *)hostdata) != 0) { 244 0, "ibmvscsi", (void *)hostdata) != 0) {
245 printk(KERN_ERR "ibmvscsi: couldn't register irq 0x%x\n", 245 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
246 vdev->irq); 246 vdev->irq);
247 goto req_irq_failed; 247 goto req_irq_failed;
248 } 248 }
249 249
250 rc = vio_enable_interrupts(vdev); 250 rc = vio_enable_interrupts(vdev);
251 if (rc != 0) { 251 if (rc != 0) {
252 printk(KERN_ERR "ibmvscsi: Error %d enabling interrupts!!!\n", 252 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
253 rc);
254 goto req_irq_failed; 253 goto req_irq_failed;
255 } 254 }
256 255
@@ -294,7 +293,7 @@ int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
294 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc))); 293 } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
295 294
296 if (rc) 295 if (rc)
297 printk(KERN_ERR "ibmvscsi: Error %d enabling adapter\n", rc); 296 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
298 return rc; 297 return rc;
299} 298}
300 299
@@ -327,10 +326,9 @@ int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
327 queue->msg_token, PAGE_SIZE); 326 queue->msg_token, PAGE_SIZE);
328 if (rc == 2) { 327 if (rc == 2) {
329 /* Adapter is good, but other end is not ready */ 328 /* Adapter is good, but other end is not ready */
330 printk(KERN_WARNING "ibmvscsi: Partner adapter not ready\n"); 329 dev_warn(hostdata->dev, "Partner adapter not ready\n");
331 } else if (rc != 0) { 330 } else if (rc != 0) {
332 printk(KERN_WARNING 331 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
333 "ibmvscsi: couldn't register crq--rc 0x%x\n", rc);
334 } 332 }
335 return rc; 333 return rc;
336} 334}
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7e7635ca78f1..d9dfb69ae031 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl> 5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
6 * All rights reserved. 6 * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
7 * Copyright (c) 2007 Red Hat <alan@redhat.com>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -19,38 +20,6 @@
19 * along with this program; see the file COPYING. If not, write to 20 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 * 22 *
22 * --------------------------------------------------------------------------
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions, and the following disclaimer,
29 * without modification, immediately at the beginning of the file.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. The name of the author may not be used to endorse or promote products
34 * derived from this software without specific prior written permission.
35 *
36 * Where this Software is combined with software released under the terms of
37 * the GNU General Public License ("GPL") and the terms of the GPL would require the
38 * combined work to also be released under the terms of the GPL, the terms
39 * and conditions of this License will apply in addition to those of the
40 * GPL with the exception of any terms or conditions of this License that
41 * conflict with, or are expressly prohibited by, the GPL.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
47 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 * 23 *
55 ************************************************************************* 24 *************************************************************************
56 * 25 *
@@ -70,14 +39,14 @@
70 * - Fix memory allocation problem 39 * - Fix memory allocation problem
71 * 03/04/98 hc - v1.01l 40 * 03/04/98 hc - v1.01l
72 * - Fix tape rewind which will hang the system problem 41 * - Fix tape rewind which will hang the system problem
73 * - Set can_queue to tul_num_scb 42 * - Set can_queue to initio_num_scb
74 * 06/25/98 hc - v1.01m 43 * 06/25/98 hc - v1.01m
75 * - Get it work for kernel version >= 2.1.75 44 * - Get it work for kernel version >= 2.1.75
76 * - Dynamic assign SCSI bus reset holding time in init_tulip() 45 * - Dynamic assign SCSI bus reset holding time in initio_init()
77 * 07/02/98 hc - v1.01n 46 * 07/02/98 hc - v1.01n
78 * - Support 0002134A 47 * - Support 0002134A
79 * 08/07/98 hc - v1.01o 48 * 08/07/98 hc - v1.01o
80 * - Change the tul_abort_srb routine to use scsi_done. <01> 49 * - Change the initio_abort_srb routine to use scsi_done. <01>
81 * 09/07/98 hl - v1.02 50 * 09/07/98 hl - v1.02
82 * - Change the INI9100U define and proc_dir_entry to 51 * - Change the INI9100U define and proc_dir_entry to
83 * reflect the newer Kernel 2.1.118, but the v1.o1o 52 * reflect the newer Kernel 2.1.118, but the v1.o1o
@@ -150,23 +119,13 @@
150static unsigned int i91u_debug = DEBUG_DEFAULT; 119static unsigned int i91u_debug = DEBUG_DEFAULT;
151#endif 120#endif
152 121
153#define TUL_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) )) 122static int initio_tag_enable = 1;
154
155typedef struct PCI_ID_Struc {
156 unsigned short vendor_id;
157 unsigned short device_id;
158} PCI_ID;
159
160static int tul_num_ch = 4; /* Maximum 4 adapters */
161static int tul_num_scb;
162static int tul_tag_enable = 1;
163static SCB *tul_scb;
164 123
165#ifdef DEBUG_i91u 124#ifdef DEBUG_i91u
166static int setup_debug = 0; 125static int setup_debug = 0;
167#endif 126#endif
168 127
169static void i91uSCBPost(BYTE * pHcb, BYTE * pScb); 128static void i91uSCBPost(u8 * pHcb, u8 * pScb);
170 129
171/* PCI Devices supported by this driver */ 130/* PCI Devices supported by this driver */
172static struct pci_device_id i91u_pci_devices[] = { 131static struct pci_device_id i91u_pci_devices[] = {
@@ -184,74 +143,66 @@ MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
184#define DEBUG_STATE 0 143#define DEBUG_STATE 0
185#define INT_DISC 0 144#define INT_DISC 0
186 145
187/*--- external functions --*/ 146/*--- forward references ---*/
188static void tul_se2_wait(void); 147static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
189 148static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
190/*--- forward refrence ---*/ 149
191static SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun); 150static int tulip_main(struct initio_host * host);
192static SCB *tul_find_done_scb(HCS * pCurHcb); 151
193 152static int initio_next_state(struct initio_host * host);
194static int tulip_main(HCS * pCurHcb); 153static int initio_state_1(struct initio_host * host);
195 154static int initio_state_2(struct initio_host * host);
196static int tul_next_state(HCS * pCurHcb); 155static int initio_state_3(struct initio_host * host);
197static int tul_state_1(HCS * pCurHcb); 156static int initio_state_4(struct initio_host * host);
198static int tul_state_2(HCS * pCurHcb); 157static int initio_state_5(struct initio_host * host);
199static int tul_state_3(HCS * pCurHcb); 158static int initio_state_6(struct initio_host * host);
200static int tul_state_4(HCS * pCurHcb); 159static int initio_state_7(struct initio_host * host);
201static int tul_state_5(HCS * pCurHcb); 160static int initio_xfer_data_in(struct initio_host * host);
202static int tul_state_6(HCS * pCurHcb); 161static int initio_xfer_data_out(struct initio_host * host);
203static int tul_state_7(HCS * pCurHcb); 162static int initio_xpad_in(struct initio_host * host);
204static int tul_xfer_data_in(HCS * pCurHcb); 163static int initio_xpad_out(struct initio_host * host);
205static int tul_xfer_data_out(HCS * pCurHcb); 164static int initio_status_msg(struct initio_host * host);
206static int tul_xpad_in(HCS * pCurHcb); 165
207static int tul_xpad_out(HCS * pCurHcb); 166static int initio_msgin(struct initio_host * host);
208static int tul_status_msg(HCS * pCurHcb); 167static int initio_msgin_sync(struct initio_host * host);
209 168static int initio_msgin_accept(struct initio_host * host);
210static int tul_msgin(HCS * pCurHcb); 169static int initio_msgout_reject(struct initio_host * host);
211static int tul_msgin_sync(HCS * pCurHcb); 170static int initio_msgin_extend(struct initio_host * host);
212static int tul_msgin_accept(HCS * pCurHcb); 171
213static int tul_msgout_reject(HCS * pCurHcb); 172static int initio_msgout_ide(struct initio_host * host);
214static int tul_msgin_extend(HCS * pCurHcb); 173static int initio_msgout_abort_targ(struct initio_host * host);
215 174static int initio_msgout_abort_tag(struct initio_host * host);
216static int tul_msgout_ide(HCS * pCurHcb); 175
217static int tul_msgout_abort_targ(HCS * pCurHcb); 176static int initio_bus_device_reset(struct initio_host * host);
218static int tul_msgout_abort_tag(HCS * pCurHcb); 177static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
219 178static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
220static int tul_bus_device_reset(HCS * pCurHcb); 179static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
221static void tul_select_atn(HCS * pCurHcb, SCB * pCurScb); 180static int int_initio_busfree(struct initio_host * host);
222static void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb); 181static int int_initio_scsi_rst(struct initio_host * host);
223static void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb); 182static int int_initio_bad_seq(struct initio_host * host);
224static int int_tul_busfree(HCS * pCurHcb); 183static int int_initio_resel(struct initio_host * host);
225static int int_tul_scsi_rst(HCS * pCurHcb); 184static int initio_sync_done(struct initio_host * host);
226static int int_tul_bad_seq(HCS * pCurHcb); 185static int wdtr_done(struct initio_host * host);
227static int int_tul_resel(HCS * pCurHcb); 186static int wait_tulip(struct initio_host * host);
228static int tul_sync_done(HCS * pCurHcb); 187static int initio_wait_done_disc(struct initio_host * host);
229static int wdtr_done(HCS * pCurHcb); 188static int initio_wait_disc(struct initio_host * host);
230static int wait_tulip(HCS * pCurHcb); 189static void tulip_scsi(struct initio_host * host);
231static int tul_wait_done_disc(HCS * pCurHcb); 190static int initio_post_scsi_rst(struct initio_host * host);
232static int tul_wait_disc(HCS * pCurHcb); 191
233static void tulip_scsi(HCS * pCurHcb); 192static void initio_se2_ew_en(unsigned long base);
234static int tul_post_scsi_rst(HCS * pCurHcb); 193static void initio_se2_ew_ds(unsigned long base);
235 194static int initio_se2_rd_all(unsigned long base);
236static void tul_se2_ew_en(WORD CurBase); 195static void initio_se2_update_all(unsigned long base); /* setup default pattern */
237static void tul_se2_ew_ds(WORD CurBase); 196static void initio_read_eeprom(unsigned long base);
238static int tul_se2_rd_all(WORD CurBase); 197
239static void tul_se2_update_all(WORD CurBase); /* setup default pattern */ 198/* ---- INTERNAL VARIABLES ---- */
240static void tul_read_eeprom(WORD CurBase); 199
241
242 /* ---- INTERNAL VARIABLES ---- */
243static HCS tul_hcs[MAX_SUPPORTED_ADAPTERS];
244static INI_ADPT_STRUCT i91u_adpt[MAX_SUPPORTED_ADAPTERS];
245
246/*NVRAM nvram, *nvramp = &nvram; */
247static NVRAM i91unvram; 200static NVRAM i91unvram;
248static NVRAM *i91unvramp; 201static NVRAM *i91unvramp;
249 202
250 203static u8 i91udftNvRam[64] =
251
252static UCHAR i91udftNvRam[64] =
253{ 204{
254/*----------- header -----------*/ 205 /*----------- header -----------*/
255 0x25, 0xc9, /* Signature */ 206 0x25, 0xc9, /* Signature */
256 0x40, /* Size */ 207 0x40, /* Size */
257 0x01, /* Revision */ 208 0x01, /* Revision */
@@ -289,7 +240,7 @@ static UCHAR i91udftNvRam[64] =
289 0, 0}; /* - CheckSum - */ 240 0, 0}; /* - CheckSum - */
290 241
291 242
292static UCHAR tul_rate_tbl[8] = /* fast 20 */ 243static u8 initio_rate_tbl[8] = /* fast 20 */
293{ 244{
294 /* nanosecond devide by 4 */ 245 /* nanosecond devide by 4 */
295 12, /* 50ns, 20M */ 246 12, /* 50ns, 20M */
@@ -302,53 +253,17 @@ static UCHAR tul_rate_tbl[8] = /* fast 20 */
302 62 /* 250ns, 4M */ 253 62 /* 250ns, 4M */
303}; 254};
304 255
305static void tul_do_pause(unsigned amount) 256static void initio_do_pause(unsigned amount)
306{ /* Pause for amount jiffies */ 257{
258 /* Pause for amount jiffies */
307 unsigned long the_time = jiffies + amount; 259 unsigned long the_time = jiffies + amount;
308 260
309 while (time_before_eq(jiffies, the_time)); 261 while (time_before_eq(jiffies, the_time))
262 cpu_relax();
310} 263}
311 264
312/*-- forward reference --*/ 265/*-- forward reference --*/
313 266
314/*******************************************************************
315 Use memeory refresh time ~ 15us * 2
316********************************************************************/
317void tul_se2_wait(void)
318{
319#if 1
320 udelay(30);
321#else
322 UCHAR readByte;
323
324 readByte = TUL_RD(0, 0x61);
325 if ((readByte & 0x10) == 0x10) {
326 for (;;) {
327 readByte = TUL_RD(0, 0x61);
328 if ((readByte & 0x10) == 0x10)
329 break;
330 }
331 for (;;) {
332 readByte = TUL_RD(0, 0x61);
333 if ((readByte & 0x10) != 0x10)
334 break;
335 }
336 } else {
337 for (;;) {
338 readByte = TUL_RD(0, 0x61);
339 if ((readByte & 0x10) == 0x10)
340 break;
341 }
342 for (;;) {
343 readByte = TUL_RD(0, 0x61);
344 if ((readByte & 0x10) != 0x10)
345 break;
346 }
347 }
348#endif
349}
350
351
352/****************************************************************** 267/******************************************************************
353 Input: instruction for Serial E2PROM 268 Input: instruction for Serial E2PROM
354 269
@@ -379,1174 +294,1019 @@ void tul_se2_wait(void)
379 294
380 295
381******************************************************************/ 296******************************************************************/
382static void tul_se2_instr(WORD CurBase, UCHAR instr) 297
298/**
299 * initio_se2_instr - bitbang an instruction
300 * @base: Base of InitIO controller
301 * @instr: Instruction for serial E2PROM
302 *
303 * Bitbang an instruction out to the serial E2Prom
304 */
305
306static void initio_se2_instr(unsigned long base, u8 instr)
383{ 307{
384 int i; 308 int i;
385 UCHAR b; 309 u8 b;
386 310
387 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* cs+start bit */ 311 outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
388 tul_se2_wait(); 312 udelay(30);
389 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK | SE2DO); /* +CLK */ 313 outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
390 tul_se2_wait(); 314 udelay(30);
391 315
392 for (i = 0; i < 8; i++) { 316 for (i = 0; i < 8; i++) {
393 if (instr & 0x80) 317 if (instr & 0x80)
394 b = SE2CS | SE2DO; /* -CLK+dataBit */ 318 b = SE2CS | SE2DO; /* -CLK+dataBit */
395 else 319 else
396 b = SE2CS; /* -CLK */ 320 b = SE2CS; /* -CLK */
397 TUL_WR(CurBase + TUL_NVRAM, b); 321 outb(b, base + TUL_NVRAM);
398 tul_se2_wait(); 322 udelay(30);
399 TUL_WR(CurBase + TUL_NVRAM, b | SE2CLK); /* +CLK */ 323 outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
400 tul_se2_wait(); 324 udelay(30);
401 instr <<= 1; 325 instr <<= 1;
402 } 326 }
403 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 327 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
404 tul_se2_wait(); 328 udelay(30);
405 return;
406} 329}
407 330
408 331
409/****************************************************************** 332/**
410 Function name : tul_se2_ew_en 333 * initio_se2_ew_en - Enable erase/write
411 Description : Enable erase/write state of serial EEPROM 334 * @base: Base address of InitIO controller
412******************************************************************/ 335 *
413void tul_se2_ew_en(WORD CurBase) 336 * Enable erase/write state of serial EEPROM
337 */
338void initio_se2_ew_en(unsigned long base)
414{ 339{
415 tul_se2_instr(CurBase, 0x30); /* EWEN */ 340 initio_se2_instr(base, 0x30); /* EWEN */
416 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 341 outb(0, base + TUL_NVRAM); /* -CS */
417 tul_se2_wait(); 342 udelay(30);
418 return;
419} 343}
420 344
421 345
422/************************************************************************ 346/**
423 Disable erase/write state of serial EEPROM 347 * initio_se2_ew_ds - Disable erase/write
424*************************************************************************/ 348 * @base: Base address of InitIO controller
425void tul_se2_ew_ds(WORD CurBase) 349 *
350 * Disable erase/write state of serial EEPROM
351 */
352void initio_se2_ew_ds(unsigned long base)
426{ 353{
427 tul_se2_instr(CurBase, 0); /* EWDS */ 354 initio_se2_instr(base, 0); /* EWDS */
428 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 355 outb(0, base + TUL_NVRAM); /* -CS */
429 tul_se2_wait(); 356 udelay(30);
430 return;
431} 357}
432 358
433 359
434/****************************************************************** 360/**
435 Input :address of Serial E2PROM 361 * initio_se2_rd - read E2PROM word
436 Output :value stored in Serial E2PROM 362 * @base: Base of InitIO controller
437*******************************************************************/ 363 * @addr: Address of word in E2PROM
438static USHORT tul_se2_rd(WORD CurBase, ULONG adr) 364 *
365 * Read a word from the NV E2PROM device
366 */
367static u16 initio_se2_rd(unsigned long base, u8 addr)
439{ 368{
440 UCHAR instr, readByte; 369 u8 instr, rb;
441 USHORT readWord; 370 u16 val = 0;
442 int i; 371 int i;
443 372
444 instr = (UCHAR) (adr | 0x80); 373 instr = (u8) (addr | 0x80);
445 tul_se2_instr(CurBase, instr); /* READ INSTR */ 374 initio_se2_instr(base, instr); /* READ INSTR */
446 readWord = 0;
447 375
448 for (i = 15; i >= 0; i--) { 376 for (i = 15; i >= 0; i--) {
449 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */ 377 outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
450 tul_se2_wait(); 378 udelay(30);
451 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 379 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
452 380
453 /* sample data after the following edge of clock */ 381 /* sample data after the following edge of clock */
454 readByte = TUL_RD(CurBase, TUL_NVRAM); 382 rb = inb(base + TUL_NVRAM);
455 readByte &= SE2DI; 383 rb &= SE2DI;
456 readWord += (readByte << i); 384 val += (rb << i);
457 tul_se2_wait(); /* 6/20/95 */ 385 udelay(30); /* 6/20/95 */
458 } 386 }
459 387
460 TUL_WR(CurBase + TUL_NVRAM, 0); /* no chip select */ 388 outb(0, base + TUL_NVRAM); /* no chip select */
461 tul_se2_wait(); 389 udelay(30);
462 return readWord; 390 return val;
463} 391}
464 392
465 393/**
466/****************************************************************** 394 * initio_se2_wr - read E2PROM word
467 Input: new value in Serial E2PROM, address of Serial E2PROM 395 * @base: Base of InitIO controller
468*******************************************************************/ 396 * @addr: Address of word in E2PROM
469static void tul_se2_wr(WORD CurBase, UCHAR adr, USHORT writeWord) 397 * @val: Value to write
398 *
399 * Write a word to the NV E2PROM device. Used when recovering from
400 * a problem with the NV.
401 */
402static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
470{ 403{
471 UCHAR readByte; 404 u8 rb;
472 UCHAR instr; 405 u8 instr;
473 int i; 406 int i;
474 407
475 instr = (UCHAR) (adr | 0x40); 408 instr = (u8) (addr | 0x40);
476 tul_se2_instr(CurBase, instr); /* WRITE INSTR */ 409 initio_se2_instr(base, instr); /* WRITE INSTR */
477 for (i = 15; i >= 0; i--) { 410 for (i = 15; i >= 0; i--) {
478 if (writeWord & 0x8000) 411 if (val & 0x8000)
479 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* -CLK+dataBit 1 */ 412 outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
480 else 413 else
481 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK+dataBit 0 */ 414 outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
482 tul_se2_wait(); 415 udelay(30);
483 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */ 416 outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
484 tul_se2_wait(); 417 udelay(30);
485 writeWord <<= 1; 418 val <<= 1;
486 } 419 }
487 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 420 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
488 tul_se2_wait(); 421 udelay(30);
489 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 422 outb(0, base + TUL_NVRAM); /* -CS */
490 tul_se2_wait(); 423 udelay(30);
491 424
492 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* +CS */ 425 outb(SE2CS, base + TUL_NVRAM); /* +CS */
493 tul_se2_wait(); 426 udelay(30);
494 427
495 for (;;) { 428 for (;;) {
496 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */ 429 outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
497 tul_se2_wait(); 430 udelay(30);
498 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 431 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
499 tul_se2_wait(); 432 udelay(30);
500 if ((readByte = TUL_RD(CurBase, TUL_NVRAM)) & SE2DI) 433 if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
501 break; /* write complete */ 434 break; /* write complete */
502 } 435 }
503 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 436 outb(0, base + TUL_NVRAM); /* -CS */
504 return;
505} 437}
506 438
439/**
440 * initio_se2_rd_all - read hostadapter NV configuration
441 * @base: Base address of InitIO controller
442 *
443 * Reads the E2PROM data into main memory. Ensures that the checksum
444 * and header marker are valid. Returns 1 on success -1 on error.
445 */
507 446
508/*********************************************************************** 447static int initio_se2_rd_all(unsigned long base)
509 Read SCSI H/A configuration parameters from serial EEPROM
510************************************************************************/
511int tul_se2_rd_all(WORD CurBase)
512{ 448{
513 int i; 449 int i;
514 ULONG chksum = 0; 450 u16 chksum = 0;
515 USHORT *np; 451 u16 *np;
516 452
517 i91unvramp = &i91unvram; 453 i91unvramp = &i91unvram;
518 np = (USHORT *) i91unvramp; 454 np = (u16 *) i91unvramp;
519 for (i = 0; i < 32; i++) { 455 for (i = 0; i < 32; i++)
520 *np++ = tul_se2_rd(CurBase, i); 456 *np++ = initio_se2_rd(base, i);
521 }
522 457
523/*--------------------Is signature "ini" ok ? ----------------*/ 458 /* Is signature "ini" ok ? */
524 if (i91unvramp->NVM_Signature != INI_SIGNATURE) 459 if (i91unvramp->NVM_Signature != INI_SIGNATURE)
525 return -1; 460 return -1;
526/*---------------------- Is ckecksum ok ? ----------------------*/ 461 /* Is ckecksum ok ? */
527 np = (USHORT *) i91unvramp; 462 np = (u16 *) i91unvramp;
528 for (i = 0; i < 31; i++) 463 for (i = 0; i < 31; i++)
529 chksum += *np++; 464 chksum += *np++;
530 if (i91unvramp->NVM_CheckSum != (USHORT) chksum) 465 if (i91unvramp->NVM_CheckSum != chksum)
531 return -1; 466 return -1;
532 return 1; 467 return 1;
533} 468}
534 469
535 470/**
536/*********************************************************************** 471 * initio_se2_update_all - Update E2PROM
537 Update SCSI H/A configuration parameters from serial EEPROM 472 * @base: Base of InitIO controller
538************************************************************************/ 473 *
539void tul_se2_update_all(WORD CurBase) 474 * Update the E2PROM by wrting any changes into the E2PROM
475 * chip, rewriting the checksum.
476 */
477static void initio_se2_update_all(unsigned long base)
540{ /* setup default pattern */ 478{ /* setup default pattern */
541 int i; 479 int i;
542 ULONG chksum = 0; 480 u16 chksum = 0;
543 USHORT *np, *np1; 481 u16 *np, *np1;
544 482
545 i91unvramp = &i91unvram; 483 i91unvramp = &i91unvram;
546 /* Calculate checksum first */ 484 /* Calculate checksum first */
547 np = (USHORT *) i91udftNvRam; 485 np = (u16 *) i91udftNvRam;
548 for (i = 0; i < 31; i++) 486 for (i = 0; i < 31; i++)
549 chksum += *np++; 487 chksum += *np++;
550 *np = (USHORT) chksum; 488 *np = chksum;
551 tul_se2_ew_en(CurBase); /* Enable write */ 489 initio_se2_ew_en(base); /* Enable write */
552 490
553 np = (USHORT *) i91udftNvRam; 491 np = (u16 *) i91udftNvRam;
554 np1 = (USHORT *) i91unvramp; 492 np1 = (u16 *) i91unvramp;
555 for (i = 0; i < 32; i++, np++, np1++) { 493 for (i = 0; i < 32; i++, np++, np1++) {
556 if (*np != *np1) { 494 if (*np != *np1)
557 tul_se2_wr(CurBase, i, *np); 495 initio_se2_wr(base, i, *np);
558 }
559 } 496 }
560 497 initio_se2_ew_ds(base); /* Disable write */
561 tul_se2_ew_ds(CurBase); /* Disable write */
562 return;
563} 498}
564 499
565/************************************************************************* 500/**
566 Function name : read_eeprom 501 * initio_read_eeprom - Retrieve configuration
567**************************************************************************/ 502 * @base: Base of InitIO Host Adapter
568void tul_read_eeprom(WORD CurBase) 503 *
569{ 504 * Retrieve the host adapter configuration data from E2Prom. If the
570 UCHAR gctrl; 505 * data is invalid then the defaults are used and are also restored
571 506 * into the E2PROM. This forms the access point for the SCSI driver
572 i91unvramp = &i91unvram; 507 * into the E2PROM layer, the other functions for the E2PROM are all
573/*------Enable EEProm programming ---*/ 508 * internal use.
574 gctrl = TUL_RD(CurBase, TUL_GCTRL); 509 *
575 TUL_WR(CurBase + TUL_GCTRL, gctrl | TUL_GCTRL_EEPROM_BIT); 510 * Must be called single threaded, uses a shared global area.
576 if (tul_se2_rd_all(CurBase) != 1) { 511 */
577 tul_se2_update_all(CurBase); /* setup default pattern */
578 tul_se2_rd_all(CurBase); /* load again */
579 }
580/*------ Disable EEProm programming ---*/
581 gctrl = TUL_RD(CurBase, TUL_GCTRL);
582 TUL_WR(CurBase + TUL_GCTRL, gctrl & ~TUL_GCTRL_EEPROM_BIT);
583} /* read_eeprom */
584 512
585static int Addi91u_into_Adapter_table(WORD wBIOS, WORD wBASE, BYTE bInterrupt, 513static void initio_read_eeprom(unsigned long base)
586 BYTE bBus, BYTE bDevice)
587{ 514{
588 int i, j; 515 u8 gctrl;
589 516
590 for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) { 517 i91unvramp = &i91unvram;
591 if (i91u_adpt[i].ADPT_BIOS < wBIOS) 518 /* Enable EEProm programming */
592 continue; 519 gctrl = inb(base + TUL_GCTRL);
593 if (i91u_adpt[i].ADPT_BIOS == wBIOS) { 520 outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
594 if (i91u_adpt[i].ADPT_BASE == wBASE) { 521 if (initio_se2_rd_all(base) != 1) {
595 if (i91u_adpt[i].ADPT_Bus != 0xFF) 522 initio_se2_update_all(base); /* setup default pattern */
596 return 1; 523 initio_se2_rd_all(base); /* load again */
597 } else if (i91u_adpt[i].ADPT_BASE < wBASE)
598 continue;
599 }
600 for (j = MAX_SUPPORTED_ADAPTERS - 1; j > i; j--) {
601 i91u_adpt[j].ADPT_BASE = i91u_adpt[j - 1].ADPT_BASE;
602 i91u_adpt[j].ADPT_INTR = i91u_adpt[j - 1].ADPT_INTR;
603 i91u_adpt[j].ADPT_BIOS = i91u_adpt[j - 1].ADPT_BIOS;
604 i91u_adpt[j].ADPT_Bus = i91u_adpt[j - 1].ADPT_Bus;
605 i91u_adpt[j].ADPT_Device = i91u_adpt[j - 1].ADPT_Device;
606 }
607 i91u_adpt[i].ADPT_BASE = wBASE;
608 i91u_adpt[i].ADPT_INTR = bInterrupt;
609 i91u_adpt[i].ADPT_BIOS = wBIOS;
610 i91u_adpt[i].ADPT_Bus = bBus;
611 i91u_adpt[i].ADPT_Device = bDevice;
612 return 0;
613 } 524 }
614 return 1; 525 /* Disable EEProm programming */
526 gctrl = inb(base + TUL_GCTRL);
527 outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
615} 528}
616 529
617static void init_i91uAdapter_table(void) 530/**
618{ 531 * initio_stop_bm - stop bus master
619 int i; 532 * @host: InitIO we are stopping
620 533 *
621 for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) { /* Initialize adapter structure */ 534 * Stop any pending DMA operation, aborting the DMA if neccessary
622 i91u_adpt[i].ADPT_BIOS = 0xffff; 535 */
623 i91u_adpt[i].ADPT_BASE = 0xffff;
624 i91u_adpt[i].ADPT_INTR = 0xff;
625 i91u_adpt[i].ADPT_Bus = 0xff;
626 i91u_adpt[i].ADPT_Device = 0xff;
627 }
628 return;
629}
630 536
631static void tul_stop_bm(HCS * pCurHcb) 537static void initio_stop_bm(struct initio_host * host)
632{ 538{
633 539
634 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ 540 if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
635 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO); 541 outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
636 /* wait Abort DMA xfer done */ 542 /* wait Abort DMA xfer done */
637 while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0); 543 while ((inb(host->addr + TUL_Int) & XABT) == 0)
544 cpu_relax();
638 } 545 }
639 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 546 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
640} 547}
641 548
642/***************************************************************************/ 549/**
643static void get_tulipPCIConfig(HCS * pCurHcb, int ch_idx) 550 * initio_reset_scsi - Reset SCSI host controller
644{ 551 * @host: InitIO host to reset
645 pCurHcb->HCS_Base = i91u_adpt[ch_idx].ADPT_BASE; /* Supply base address */ 552 * @seconds: Recovery time
646 pCurHcb->HCS_BIOS = i91u_adpt[ch_idx].ADPT_BIOS; /* Supply BIOS address */ 553 *
647 pCurHcb->HCS_Intr = i91u_adpt[ch_idx].ADPT_INTR; /* Supply interrupt line */ 554 * Perform a full reset of the SCSI subsystem.
648 return; 555 */
649}
650 556
651/***************************************************************************/ 557static int initio_reset_scsi(struct initio_host * host, int seconds)
652static int tul_reset_scsi(HCS * pCurHcb, int seconds)
653{ 558{
654 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_BUS); 559 outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
655 560
656 while (!((pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt)) & TSS_SCSIRST_INT)); 561 while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
657 /* reset tulip chip */ 562 cpu_relax();
658 563
659 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, 0); 564 /* reset tulip chip */
565 outb(0, host->addr + TUL_SSignal);
660 566
661 /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */ 567 /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
662 /* SONY 5200 tape drive won't work if only stall for 1 sec */ 568 /* SONY 5200 tape drive won't work if only stall for 1 sec */
663 tul_do_pause(seconds * HZ); 569 /* FIXME: this is a very long busy wait right now */
664 570 initio_do_pause(seconds * HZ);
665 TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
666 571
667 return (SCSI_RESET_SUCCESS); 572 inb(host->addr + TUL_SInt);
573 return SCSI_RESET_SUCCESS;
668} 574}
669 575
670/***************************************************************************/ 576/**
671static int init_tulip(HCS * pCurHcb, SCB * scbp, int tul_num_scb, 577 * initio_init - set up an InitIO host adapter
672 BYTE * pbBiosAdr, int seconds) 578 * @host: InitIO host adapter
579 * @num_scbs: Number of SCBS
580 * @bios_addr: BIOS address
581 *
582 * Set up the host adapter and devices according to the configuration
583 * retrieved from the E2PROM.
584 *
585 * Locking: Calls E2PROM layer code which is not re-enterable so must
586 * run single threaded for now.
587 */
588
589static void initio_init(struct initio_host * host, u8 *bios_addr)
673{ 590{
674 int i; 591 int i;
675 BYTE *pwFlags; 592 u8 *flags;
676 BYTE *pbHeads; 593 u8 *heads;
677 SCB *pTmpScb, *pPrevScb = NULL; 594
678 595 /* Get E2Prom configuration */
679 pCurHcb->HCS_NumScbs = tul_num_scb; 596 initio_read_eeprom(host->addr);
680 pCurHcb->HCS_Semaph = 1;
681 spin_lock_init(&pCurHcb->HCS_SemaphLock);
682 pCurHcb->HCS_JSStatus0 = 0;
683 pCurHcb->HCS_Scb = scbp;
684 pCurHcb->HCS_NxtPend = scbp;
685 pCurHcb->HCS_NxtAvail = scbp;
686 for (i = 0, pTmpScb = scbp; i < tul_num_scb; i++, pTmpScb++) {
687 pTmpScb->SCB_TagId = i;
688 if (i != 0)
689 pPrevScb->SCB_NxtScb = pTmpScb;
690 pPrevScb = pTmpScb;
691 }
692 pPrevScb->SCB_NxtScb = NULL;
693 pCurHcb->HCS_ScbEnd = pTmpScb;
694 pCurHcb->HCS_FirstAvail = scbp;
695 pCurHcb->HCS_LastAvail = pPrevScb;
696 spin_lock_init(&pCurHcb->HCS_AvailLock);
697 pCurHcb->HCS_FirstPend = NULL;
698 pCurHcb->HCS_LastPend = NULL;
699 pCurHcb->HCS_FirstBusy = NULL;
700 pCurHcb->HCS_LastBusy = NULL;
701 pCurHcb->HCS_FirstDone = NULL;
702 pCurHcb->HCS_LastDone = NULL;
703 pCurHcb->HCS_ActScb = NULL;
704 pCurHcb->HCS_ActTcs = NULL;
705
706 tul_read_eeprom(pCurHcb->HCS_Base);
707/*---------- get H/A configuration -------------*/
708 if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8) 597 if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
709 pCurHcb->HCS_MaxTar = 8; 598 host->max_tar = 8;
710 else 599 else
711 pCurHcb->HCS_MaxTar = 16; 600 host->max_tar = 16;
712 601
713 pCurHcb->HCS_Config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1; 602 host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
714 603
715 pCurHcb->HCS_SCSI_ID = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID; 604 host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
716 pCurHcb->HCS_IdMask = ~(1 << pCurHcb->HCS_SCSI_ID); 605 host->idmask = ~(1 << host->scsi_id);
717 606
718#ifdef CHK_PARITY 607#ifdef CHK_PARITY
719 /* Enable parity error response */ 608 /* Enable parity error response */
720 TUL_WR(pCurHcb->HCS_Base + TUL_PCMD, TUL_RD(pCurHcb->HCS_Base, TUL_PCMD) | 0x40); 609 outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
721#endif 610#endif
722 611
723 /* Mask all the interrupt */ 612 /* Mask all the interrupt */
724 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F); 613 outb(0x1F, host->addr + TUL_Mask);
725 614
726 tul_stop_bm(pCurHcb); 615 initio_stop_bm(host);
727 /* --- Initialize the tulip --- */ 616 /* --- Initialize the tulip --- */
728 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_CHIP); 617 outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
729 618
730 /* program HBA's SCSI ID */ 619 /* program HBA's SCSI ID */
731 TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId, pCurHcb->HCS_SCSI_ID << 4); 620 outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
732 621
733 /* Enable Initiator Mode ,phase latch,alternate sync period mode, 622 /* Enable Initiator Mode ,phase latch,alternate sync period mode,
734 disable SCSI reset */ 623 disable SCSI reset */
735 if (pCurHcb->HCS_Config & HCC_EN_PAR) 624 if (host->config & HCC_EN_PAR)
736 pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR); 625 host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
737 else 626 else
738 pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT); 627 host->sconf1 = (TSC_INITDEFAULT);
739 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_SConf1); 628 outb(host->sconf1, host->addr + TUL_SConfig);
740 629
741 /* Enable HW reselect */ 630 /* Enable HW reselect */
742 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); 631 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
743 632
744 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, 0); 633 outb(0, host->addr + TUL_SPeriod);
745 634
746 /* selection time out = 250 ms */ 635 /* selection time out = 250 ms */
747 TUL_WR(pCurHcb->HCS_Base + TUL_STimeOut, 153); 636 outb(153, host->addr + TUL_STimeOut);
748 637
749/*--------- Enable SCSI terminator -----*/ 638 /* Enable SCSI terminator */
750 TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, (pCurHcb->HCS_Config & (HCC_ACT_TERM1 | HCC_ACT_TERM2))); 639 outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
751 TUL_WR(pCurHcb->HCS_Base + TUL_GCTRL1, 640 host->addr + TUL_XCtrl);
752 ((pCurHcb->HCS_Config & HCC_AUTO_TERM) >> 4) | (TUL_RD(pCurHcb->HCS_Base, TUL_GCTRL1) & 0xFE)); 641 outb(((host->config & HCC_AUTO_TERM) >> 4) |
642 (inb(host->addr + TUL_GCTRL1) & 0xFE),
643 host->addr + TUL_GCTRL1);
753 644
754 for (i = 0, 645 for (i = 0,
755 pwFlags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config), 646 flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
756 pbHeads = pbBiosAdr + 0x180; 647 heads = bios_addr + 0x180;
757 i < pCurHcb->HCS_MaxTar; 648 i < host->max_tar;
758 i++, pwFlags++) { 649 i++, flags++) {
759 pCurHcb->HCS_Tcs[i].TCS_Flags = *pwFlags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE); 650 host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
760 if (pCurHcb->HCS_Tcs[i].TCS_Flags & TCF_EN_255) 651 if (host->targets[i].flags & TCF_EN_255)
761 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63; 652 host->targets[i].drv_flags = TCF_DRV_255_63;
762 else 653 else
763 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0; 654 host->targets[i].drv_flags = 0;
764 pCurHcb->HCS_Tcs[i].TCS_JS_Period = 0; 655 host->targets[i].js_period = 0;
765 pCurHcb->HCS_Tcs[i].TCS_SConfig0 = pCurHcb->HCS_SConf1; 656 host->targets[i].sconfig0 = host->sconf1;
766 pCurHcb->HCS_Tcs[i].TCS_DrvHead = *pbHeads++; 657 host->targets[i].heads = *heads++;
767 if (pCurHcb->HCS_Tcs[i].TCS_DrvHead == 255) 658 if (host->targets[i].heads == 255)
768 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63; 659 host->targets[i].drv_flags = TCF_DRV_255_63;
769 else 660 else
770 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0; 661 host->targets[i].drv_flags = 0;
771 pCurHcb->HCS_Tcs[i].TCS_DrvSector = *pbHeads++; 662 host->targets[i].sectors = *heads++;
772 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY; 663 host->targets[i].flags &= ~TCF_BUSY;
773 pCurHcb->HCS_ActTags[i] = 0; 664 host->act_tags[i] = 0;
774 pCurHcb->HCS_MaxTags[i] = 0xFF; 665 host->max_tags[i] = 0xFF;
775 } /* for */ 666 } /* for */
776 printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n", 667 printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
777 pCurHcb->HCS_Base, pCurHcb->HCS_Intr, 668 host->addr, host->irq,
778 pCurHcb->HCS_BIOS, pCurHcb->HCS_SCSI_ID); 669 host->bios_addr, host->scsi_id);
779/*------------------- reset SCSI Bus ---------------------------*/ 670 /* Reset SCSI Bus */
780 if (pCurHcb->HCS_Config & HCC_SCSI_RESET) { 671 if (host->config & HCC_SCSI_RESET) {
781 printk("i91u: Reset SCSI Bus ... \n"); 672 printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
782 tul_reset_scsi(pCurHcb, seconds); 673 initio_reset_scsi(host, 10);
783 } 674 }
784 TUL_WR(pCurHcb->HCS_Base + TUL_SCFG1, 0x17); 675 outb(0x17, host->addr + TUL_SCFG1);
785 TUL_WR(pCurHcb->HCS_Base + TUL_SIntEnable, 0xE9); 676 outb(0xE9, host->addr + TUL_SIntEnable);
786 return (0);
787} 677}
788 678
789/***************************************************************************/ 679/**
790static SCB *tul_alloc_scb(HCS * hcsp) 680 * initio_alloc_scb - Allocate an SCB
681 * @host: InitIO host we are allocating for
682 *
683 * Walk the SCB list for the controller and allocate a free SCB if
684 * one exists.
685 */
686static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
791{ 687{
792 SCB *pTmpScb; 688 struct scsi_ctrl_blk *scb;
793 ULONG flags; 689 unsigned long flags;
794 spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags); 690
795 if ((pTmpScb = hcsp->HCS_FirstAvail) != NULL) { 691 spin_lock_irqsave(&host->avail_lock, flags);
692 if ((scb = host->first_avail) != NULL) {
796#if DEBUG_QUEUE 693#if DEBUG_QUEUE
797 printk("find scb at %08lx\n", (ULONG) pTmpScb); 694 printk("find scb at %p\n", scb);
798#endif 695#endif
799 if ((hcsp->HCS_FirstAvail = pTmpScb->SCB_NxtScb) == NULL) 696 if ((host->first_avail = scb->next) == NULL)
800 hcsp->HCS_LastAvail = NULL; 697 host->last_avail = NULL;
801 pTmpScb->SCB_NxtScb = NULL; 698 scb->next = NULL;
802 pTmpScb->SCB_Status = SCB_RENT; 699 scb->status = SCB_RENT;
803 } 700 }
804 spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags); 701 spin_unlock_irqrestore(&host->avail_lock, flags);
805 return (pTmpScb); 702 return scb;
806} 703}
807 704
808/***************************************************************************/ 705/**
809static void tul_release_scb(HCS * hcsp, SCB * scbp) 706 * initio_release_scb - Release an SCB
707 * @host: InitIO host that owns the SCB
708 * @cmnd: SCB command block being returned
709 *
710 * Return an allocated SCB to the host free list
711 */
712
713static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
810{ 714{
811 ULONG flags; 715 unsigned long flags;
812 716
813#if DEBUG_QUEUE 717#if DEBUG_QUEUE
814 printk("Release SCB %lx; ", (ULONG) scbp); 718 printk("Release SCB %p; ", cmnd);
815#endif 719#endif
816 spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags); 720 spin_lock_irqsave(&(host->avail_lock), flags);
817 scbp->SCB_Srb = NULL; 721 cmnd->srb = NULL;
818 scbp->SCB_Status = 0; 722 cmnd->status = 0;
819 scbp->SCB_NxtScb = NULL; 723 cmnd->next = NULL;
820 if (hcsp->HCS_LastAvail != NULL) { 724 if (host->last_avail != NULL) {
821 hcsp->HCS_LastAvail->SCB_NxtScb = scbp; 725 host->last_avail->next = cmnd;
822 hcsp->HCS_LastAvail = scbp; 726 host->last_avail = cmnd;
823 } else { 727 } else {
824 hcsp->HCS_FirstAvail = scbp; 728 host->first_avail = cmnd;
825 hcsp->HCS_LastAvail = scbp; 729 host->last_avail = cmnd;
826 } 730 }
827 spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags); 731 spin_unlock_irqrestore(&(host->avail_lock), flags);
828} 732}
829 733
830/***************************************************************************/ 734/***************************************************************************/
831static void tul_append_pend_scb(HCS * pCurHcb, SCB * scbp) 735static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
832{ 736{
833 737
834#if DEBUG_QUEUE 738#if DEBUG_QUEUE
835 printk("Append pend SCB %lx; ", (ULONG) scbp); 739 printk("Append pend SCB %p; ", scbp);
836#endif 740#endif
837 scbp->SCB_Status = SCB_PEND; 741 scbp->status = SCB_PEND;
838 scbp->SCB_NxtScb = NULL; 742 scbp->next = NULL;
839 if (pCurHcb->HCS_LastPend != NULL) { 743 if (host->last_pending != NULL) {
840 pCurHcb->HCS_LastPend->SCB_NxtScb = scbp; 744 host->last_pending->next = scbp;
841 pCurHcb->HCS_LastPend = scbp; 745 host->last_pending = scbp;
842 } else { 746 } else {
843 pCurHcb->HCS_FirstPend = scbp; 747 host->first_pending = scbp;
844 pCurHcb->HCS_LastPend = scbp; 748 host->last_pending = scbp;
845 } 749 }
846} 750}
847 751
848/***************************************************************************/ 752/***************************************************************************/
849static void tul_push_pend_scb(HCS * pCurHcb, SCB * scbp) 753static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
850{ 754{
851 755
852#if DEBUG_QUEUE 756#if DEBUG_QUEUE
853 printk("Push pend SCB %lx; ", (ULONG) scbp); 757 printk("Push pend SCB %p; ", scbp);
854#endif 758#endif
855 scbp->SCB_Status = SCB_PEND; 759 scbp->status = SCB_PEND;
856 if ((scbp->SCB_NxtScb = pCurHcb->HCS_FirstPend) != NULL) { 760 if ((scbp->next = host->first_pending) != NULL) {
857 pCurHcb->HCS_FirstPend = scbp; 761 host->first_pending = scbp;
858 } else { 762 } else {
859 pCurHcb->HCS_FirstPend = scbp; 763 host->first_pending = scbp;
860 pCurHcb->HCS_LastPend = scbp; 764 host->last_pending = scbp;
861 } 765 }
862} 766}
863 767
864/***************************************************************************/ 768static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
865static SCB *tul_find_first_pend_scb(HCS * pCurHcb)
866{ 769{
867 SCB *pFirstPend; 770 struct scsi_ctrl_blk *first;
868 771
869 772
870 pFirstPend = pCurHcb->HCS_FirstPend; 773 first = host->first_pending;
871 while (pFirstPend != NULL) { 774 while (first != NULL) {
872 if (pFirstPend->SCB_Opcode != ExecSCSI) { 775 if (first->opcode != ExecSCSI)
873 return (pFirstPend); 776 return first;
874 } 777 if (first->tagmsg == 0) {
875 if (pFirstPend->SCB_TagMsg == 0) { 778 if ((host->act_tags[first->target] == 0) &&
876 if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] == 0) && 779 !(host->targets[first->target].flags & TCF_BUSY))
877 !(pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) { 780 return first;
878 return (pFirstPend);
879 }
880 } else { 781 } else {
881 if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] >= 782 if ((host->act_tags[first->target] >=
882 pCurHcb->HCS_MaxTags[pFirstPend->SCB_Target]) | 783 host->max_tags[first->target]) |
883 (pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) { 784 (host->targets[first->target].flags & TCF_BUSY)) {
884 pFirstPend = pFirstPend->SCB_NxtScb; 785 first = first->next;
885 continue; 786 continue;
886 } 787 }
887 return (pFirstPend); 788 return first;
888 } 789 }
889 pFirstPend = pFirstPend->SCB_NxtScb; 790 first = first->next;
890 } 791 }
891 792 return first;
892
893 return (pFirstPend);
894} 793}
895/***************************************************************************/ 794
896static void tul_unlink_pend_scb(HCS * pCurHcb, SCB * pCurScb) 795static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
897{ 796{
898 SCB *pTmpScb, *pPrevScb; 797 struct scsi_ctrl_blk *tmp, *prev;
899 798
900#if DEBUG_QUEUE 799#if DEBUG_QUEUE
901 printk("unlink pend SCB %lx; ", (ULONG) pCurScb); 800 printk("unlink pend SCB %p; ", scb);
902#endif 801#endif
903 802
904 pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend; 803 prev = tmp = host->first_pending;
905 while (pTmpScb != NULL) { 804 while (tmp != NULL) {
906 if (pCurScb == pTmpScb) { /* Unlink this SCB */ 805 if (scb == tmp) { /* Unlink this SCB */
907 if (pTmpScb == pCurHcb->HCS_FirstPend) { 806 if (tmp == host->first_pending) {
908 if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL) 807 if ((host->first_pending = tmp->next) == NULL)
909 pCurHcb->HCS_LastPend = NULL; 808 host->last_pending = NULL;
910 } else { 809 } else {
911 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 810 prev->next = tmp->next;
912 if (pTmpScb == pCurHcb->HCS_LastPend) 811 if (tmp == host->last_pending)
913 pCurHcb->HCS_LastPend = pPrevScb; 812 host->last_pending = prev;
914 } 813 }
915 pTmpScb->SCB_NxtScb = NULL; 814 tmp->next = NULL;
916 break; 815 break;
917 } 816 }
918 pPrevScb = pTmpScb; 817 prev = tmp;
919 pTmpScb = pTmpScb->SCB_NxtScb; 818 tmp = tmp->next;
920 } 819 }
921 return;
922} 820}
923/***************************************************************************/ 821
924static void tul_append_busy_scb(HCS * pCurHcb, SCB * scbp) 822static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
925{ 823{
926 824
927#if DEBUG_QUEUE 825#if DEBUG_QUEUE
928 printk("append busy SCB %lx; ", (ULONG) scbp); 826 printk("append busy SCB %o; ", scbp);
929#endif 827#endif
930 if (scbp->SCB_TagMsg) 828 if (scbp->tagmsg)
931 pCurHcb->HCS_ActTags[scbp->SCB_Target]++; 829 host->act_tags[scbp->target]++;
932 else 830 else
933 pCurHcb->HCS_Tcs[scbp->SCB_Target].TCS_Flags |= TCF_BUSY; 831 host->targets[scbp->target].flags |= TCF_BUSY;
934 scbp->SCB_Status = SCB_BUSY; 832 scbp->status = SCB_BUSY;
935 scbp->SCB_NxtScb = NULL; 833 scbp->next = NULL;
936 if (pCurHcb->HCS_LastBusy != NULL) { 834 if (host->last_busy != NULL) {
937 pCurHcb->HCS_LastBusy->SCB_NxtScb = scbp; 835 host->last_busy->next = scbp;
938 pCurHcb->HCS_LastBusy = scbp; 836 host->last_busy = scbp;
939 } else { 837 } else {
940 pCurHcb->HCS_FirstBusy = scbp; 838 host->first_busy = scbp;
941 pCurHcb->HCS_LastBusy = scbp; 839 host->last_busy = scbp;
942 } 840 }
943} 841}
944 842
945/***************************************************************************/ 843/***************************************************************************/
946static SCB *tul_pop_busy_scb(HCS * pCurHcb) 844static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
947{ 845{
948 SCB *pTmpScb; 846 struct scsi_ctrl_blk *tmp;
949 847
950 848
951 if ((pTmpScb = pCurHcb->HCS_FirstBusy) != NULL) { 849 if ((tmp = host->first_busy) != NULL) {
952 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 850 if ((host->first_busy = tmp->next) == NULL)
953 pCurHcb->HCS_LastBusy = NULL; 851 host->last_busy = NULL;
954 pTmpScb->SCB_NxtScb = NULL; 852 tmp->next = NULL;
955 if (pTmpScb->SCB_TagMsg) 853 if (tmp->tagmsg)
956 pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--; 854 host->act_tags[tmp->target]--;
957 else 855 else
958 pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY; 856 host->targets[tmp->target].flags &= ~TCF_BUSY;
959 } 857 }
960#if DEBUG_QUEUE 858#if DEBUG_QUEUE
961 printk("Pop busy SCB %lx; ", (ULONG) pTmpScb); 859 printk("Pop busy SCB %p; ", tmp);
962#endif 860#endif
963 return (pTmpScb); 861 return tmp;
964} 862}
965 863
966/***************************************************************************/ 864/***************************************************************************/
967static void tul_unlink_busy_scb(HCS * pCurHcb, SCB * pCurScb) 865static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
968{ 866{
969 SCB *pTmpScb, *pPrevScb; 867 struct scsi_ctrl_blk *tmp, *prev;
970 868
971#if DEBUG_QUEUE 869#if DEBUG_QUEUE
972 printk("unlink busy SCB %lx; ", (ULONG) pCurScb); 870 printk("unlink busy SCB %p; ", scb);
973#endif 871#endif
974 872
975 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; 873 prev = tmp = host->first_busy;
976 while (pTmpScb != NULL) { 874 while (tmp != NULL) {
977 if (pCurScb == pTmpScb) { /* Unlink this SCB */ 875 if (scb == tmp) { /* Unlink this SCB */
978 if (pTmpScb == pCurHcb->HCS_FirstBusy) { 876 if (tmp == host->first_busy) {
979 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 877 if ((host->first_busy = tmp->next) == NULL)
980 pCurHcb->HCS_LastBusy = NULL; 878 host->last_busy = NULL;
981 } else { 879 } else {
982 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 880 prev->next = tmp->next;
983 if (pTmpScb == pCurHcb->HCS_LastBusy) 881 if (tmp == host->last_busy)
984 pCurHcb->HCS_LastBusy = pPrevScb; 882 host->last_busy = prev;
985 } 883 }
986 pTmpScb->SCB_NxtScb = NULL; 884 tmp->next = NULL;
987 if (pTmpScb->SCB_TagMsg) 885 if (tmp->tagmsg)
988 pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--; 886 host->act_tags[tmp->target]--;
989 else 887 else
990 pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY; 888 host->targets[tmp->target].flags &= ~TCF_BUSY;
991 break; 889 break;
992 } 890 }
993 pPrevScb = pTmpScb; 891 prev = tmp;
994 pTmpScb = pTmpScb->SCB_NxtScb; 892 tmp = tmp->next;
995 } 893 }
996 return; 894 return;
997} 895}
998 896
999/***************************************************************************/ 897struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
1000SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun)
1001{ 898{
1002 SCB *pTmpScb, *pPrevScb; 899 struct scsi_ctrl_blk *tmp, *prev;
1003 WORD scbp_tarlun; 900 u16 scbp_tarlun;
1004 901
1005 902
1006 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; 903 prev = tmp = host->first_busy;
1007 while (pTmpScb != NULL) { 904 while (tmp != NULL) {
1008 scbp_tarlun = (pTmpScb->SCB_Lun << 8) | (pTmpScb->SCB_Target); 905 scbp_tarlun = (tmp->lun << 8) | (tmp->target);
1009 if (scbp_tarlun == tarlun) { /* Unlink this SCB */ 906 if (scbp_tarlun == tarlun) { /* Unlink this SCB */
1010 break; 907 break;
1011 } 908 }
1012 pPrevScb = pTmpScb; 909 prev = tmp;
1013 pTmpScb = pTmpScb->SCB_NxtScb; 910 tmp = tmp->next;
1014 } 911 }
1015#if DEBUG_QUEUE 912#if DEBUG_QUEUE
1016 printk("find busy SCB %lx; ", (ULONG) pTmpScb); 913 printk("find busy SCB %p; ", tmp);
1017#endif 914#endif
1018 return (pTmpScb); 915 return tmp;
1019} 916}
1020 917
1021/***************************************************************************/ 918static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
1022static void tul_append_done_scb(HCS * pCurHcb, SCB * scbp)
1023{ 919{
1024
1025#if DEBUG_QUEUE 920#if DEBUG_QUEUE
1026 printk("append done SCB %lx; ", (ULONG) scbp); 921 printk("append done SCB %p; ", scbp);
1027#endif 922#endif
1028 923
1029 scbp->SCB_Status = SCB_DONE; 924 scbp->status = SCB_DONE;
1030 scbp->SCB_NxtScb = NULL; 925 scbp->next = NULL;
1031 if (pCurHcb->HCS_LastDone != NULL) { 926 if (host->last_done != NULL) {
1032 pCurHcb->HCS_LastDone->SCB_NxtScb = scbp; 927 host->last_done->next = scbp;
1033 pCurHcb->HCS_LastDone = scbp; 928 host->last_done = scbp;
1034 } else { 929 } else {
1035 pCurHcb->HCS_FirstDone = scbp; 930 host->first_done = scbp;
1036 pCurHcb->HCS_LastDone = scbp; 931 host->last_done = scbp;
1037 } 932 }
1038} 933}
1039 934
1040/***************************************************************************/ 935struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
1041SCB *tul_find_done_scb(HCS * pCurHcb)
1042{ 936{
1043 SCB *pTmpScb; 937 struct scsi_ctrl_blk *tmp;
1044 938
1045 939 if ((tmp = host->first_done) != NULL) {
1046 if ((pTmpScb = pCurHcb->HCS_FirstDone) != NULL) { 940 if ((host->first_done = tmp->next) == NULL)
1047 if ((pCurHcb->HCS_FirstDone = pTmpScb->SCB_NxtScb) == NULL) 941 host->last_done = NULL;
1048 pCurHcb->HCS_LastDone = NULL; 942 tmp->next = NULL;
1049 pTmpScb->SCB_NxtScb = NULL;
1050 } 943 }
1051#if DEBUG_QUEUE 944#if DEBUG_QUEUE
1052 printk("find done SCB %lx; ", (ULONG) pTmpScb); 945 printk("find done SCB %p; ",tmp);
1053#endif 946#endif
1054 return (pTmpScb); 947 return tmp;
1055} 948}
1056 949
1057/***************************************************************************/ 950static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
1058static int tul_abort_srb(HCS * pCurHcb, struct scsi_cmnd *srbp)
1059{ 951{
1060 ULONG flags; 952 unsigned long flags;
1061 SCB *pTmpScb, *pPrevScb; 953 struct scsi_ctrl_blk *tmp, *prev;
1062 954
1063 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 955 spin_lock_irqsave(&host->semaph_lock, flags);
1064 956
1065 if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) { 957 if ((host->semaph == 0) && (host->active == NULL)) {
1066 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1067 /* disable Jasmin SCSI Int */ 958 /* disable Jasmin SCSI Int */
1068 959 outb(0x1F, host->addr + TUL_Mask);
1069 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 960 spin_unlock_irqrestore(&host->semaph_lock, flags);
1070 961 /* FIXME: synchronize_irq needed ? */
1071 tulip_main(pCurHcb); 962 tulip_main(host);
1072 963 spin_lock_irqsave(&host->semaph_lock, flags);
1073 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 964 host->semaph = 1;
1074 965 outb(0x0F, host->addr + TUL_Mask);
1075 pCurHcb->HCS_Semaph = 1; 966 spin_unlock_irqrestore(&host->semaph_lock, flags);
1076 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1077
1078 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1079
1080 return SCSI_ABORT_SNOOZE; 967 return SCSI_ABORT_SNOOZE;
1081 } 968 }
1082 pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend; /* Check Pend queue */ 969 prev = tmp = host->first_pending; /* Check Pend queue */
1083 while (pTmpScb != NULL) { 970 while (tmp != NULL) {
1084 /* 07/27/98 */ 971 /* 07/27/98 */
1085 if (pTmpScb->SCB_Srb == srbp) { 972 if (tmp->srb == srbp) {
1086 if (pTmpScb == pCurHcb->HCS_ActScb) { 973 if (tmp == host->active) {
1087 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 974 spin_unlock_irqrestore(&host->semaph_lock, flags);
1088 return SCSI_ABORT_BUSY; 975 return SCSI_ABORT_BUSY;
1089 } else if (pTmpScb == pCurHcb->HCS_FirstPend) { 976 } else if (tmp == host->first_pending) {
1090 if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL) 977 if ((host->first_pending = tmp->next) == NULL)
1091 pCurHcb->HCS_LastPend = NULL; 978 host->last_pending = NULL;
1092 } else { 979 } else {
1093 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 980 prev->next = tmp->next;
1094 if (pTmpScb == pCurHcb->HCS_LastPend) 981 if (tmp == host->last_pending)
1095 pCurHcb->HCS_LastPend = pPrevScb; 982 host->last_pending = prev;
1096 } 983 }
1097 pTmpScb->SCB_HaStat = HOST_ABORTED; 984 tmp->hastat = HOST_ABORTED;
1098 pTmpScb->SCB_Flags |= SCF_DONE; 985 tmp->flags |= SCF_DONE;
1099 if (pTmpScb->SCB_Flags & SCF_POST) 986 if (tmp->flags & SCF_POST)
1100 (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb); 987 (*tmp->post) ((u8 *) host, (u8 *) tmp);
1101 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 988 spin_unlock_irqrestore(&host->semaph_lock, flags);
1102 return SCSI_ABORT_SUCCESS; 989 return SCSI_ABORT_SUCCESS;
1103 } 990 }
1104 pPrevScb = pTmpScb; 991 prev = tmp;
1105 pTmpScb = pTmpScb->SCB_NxtScb; 992 tmp = tmp->next;
1106 } 993 }
1107 994
1108 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */ 995 prev = tmp = host->first_busy; /* Check Busy queue */
1109 while (pTmpScb != NULL) { 996 while (tmp != NULL) {
1110 997 if (tmp->srb == srbp) {
1111 if (pTmpScb->SCB_Srb == srbp) { 998 if (tmp == host->active) {
1112 999 spin_unlock_irqrestore(&host->semaph_lock, flags);
1113 if (pTmpScb == pCurHcb->HCS_ActScb) {
1114 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1115 return SCSI_ABORT_BUSY; 1000 return SCSI_ABORT_BUSY;
1116 } else if (pTmpScb->SCB_TagMsg == 0) { 1001 } else if (tmp->tagmsg == 0) {
1117 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1002 spin_unlock_irqrestore(&host->semaph_lock, flags);
1118 return SCSI_ABORT_BUSY; 1003 return SCSI_ABORT_BUSY;
1119 } else { 1004 } else {
1120 pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--; 1005 host->act_tags[tmp->target]--;
1121 if (pTmpScb == pCurHcb->HCS_FirstBusy) { 1006 if (tmp == host->first_busy) {
1122 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 1007 if ((host->first_busy = tmp->next) == NULL)
1123 pCurHcb->HCS_LastBusy = NULL; 1008 host->last_busy = NULL;
1124 } else { 1009 } else {
1125 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 1010 prev->next = tmp->next;
1126 if (pTmpScb == pCurHcb->HCS_LastBusy) 1011 if (tmp == host->last_busy)
1127 pCurHcb->HCS_LastBusy = pPrevScb; 1012 host->last_busy = prev;
1128 } 1013 }
1129 pTmpScb->SCB_NxtScb = NULL; 1014 tmp->next = NULL;
1130 1015
1131 1016
1132 pTmpScb->SCB_HaStat = HOST_ABORTED; 1017 tmp->hastat = HOST_ABORTED;
1133 pTmpScb->SCB_Flags |= SCF_DONE; 1018 tmp->flags |= SCF_DONE;
1134 if (pTmpScb->SCB_Flags & SCF_POST) 1019 if (tmp->flags & SCF_POST)
1135 (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb); 1020 (*tmp->post) ((u8 *) host, (u8 *) tmp);
1136 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1021 spin_unlock_irqrestore(&host->semaph_lock, flags);
1137 return SCSI_ABORT_SUCCESS; 1022 return SCSI_ABORT_SUCCESS;
1138 } 1023 }
1139 } 1024 }
1140 pPrevScb = pTmpScb; 1025 prev = tmp;
1141 pTmpScb = pTmpScb->SCB_NxtScb; 1026 tmp = tmp->next;
1142 } 1027 }
1143 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1028 spin_unlock_irqrestore(&host->semaph_lock, flags);
1144 return (SCSI_ABORT_NOT_RUNNING); 1029 return SCSI_ABORT_NOT_RUNNING;
1145} 1030}
1146 1031
1147/***************************************************************************/ 1032/***************************************************************************/
1148static int tul_bad_seq(HCS * pCurHcb) 1033static int initio_bad_seq(struct initio_host * host)
1149{ 1034{
1150 SCB *pCurScb; 1035 struct scsi_ctrl_blk *scb;
1151
1152 printk("tul_bad_seg c=%d\n", pCurHcb->HCS_Index);
1153
1154 if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) {
1155 tul_unlink_busy_scb(pCurHcb, pCurScb);
1156 pCurScb->SCB_HaStat = HOST_BAD_PHAS;
1157 pCurScb->SCB_TaStat = 0;
1158 tul_append_done_scb(pCurHcb, pCurScb);
1159 }
1160 tul_stop_bm(pCurHcb);
1161
1162 tul_reset_scsi(pCurHcb, 8); /* 7/29/98 */
1163
1164 return (tul_post_scsi_rst(pCurHcb));
1165}
1166
1167#if 0
1168
1169/************************************************************************/
1170static int tul_device_reset(HCS * pCurHcb, struct scsi_cmnd *pSrb,
1171 unsigned int target, unsigned int ResetFlags)
1172{
1173 ULONG flags;
1174 SCB *pScb;
1175 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1176
1177 if (ResetFlags & SCSI_RESET_ASYNCHRONOUS) {
1178
1179 if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) {
1180 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1181 /* disable Jasmin SCSI Int */
1182
1183 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1184
1185 tulip_main(pCurHcb);
1186
1187 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1188
1189 pCurHcb->HCS_Semaph = 1;
1190 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1191
1192 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1193
1194 return SCSI_RESET_SNOOZE;
1195 }
1196 pScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
1197 while (pScb != NULL) {
1198 if (pScb->SCB_Srb == pSrb)
1199 break;
1200 pScb = pScb->SCB_NxtScb;
1201 }
1202 if (pScb == NULL) {
1203 printk("Unable to Reset - No SCB Found\n");
1204
1205 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1206 return SCSI_RESET_NOT_RUNNING;
1207 }
1208 }
1209 if ((pScb = tul_alloc_scb(pCurHcb)) == NULL) {
1210 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1211 return SCSI_RESET_NOT_RUNNING;
1212 }
1213 pScb->SCB_Opcode = BusDevRst;
1214 pScb->SCB_Flags = SCF_POST;
1215 pScb->SCB_Target = target;
1216 pScb->SCB_Mode = 0;
1217
1218 pScb->SCB_Srb = NULL;
1219 if (ResetFlags & SCSI_RESET_SYNCHRONOUS) {
1220 pScb->SCB_Srb = pSrb;
1221 }
1222 tul_push_pend_scb(pCurHcb, pScb); /* push this SCB to Pending queue */
1223
1224 if (pCurHcb->HCS_Semaph == 1) {
1225 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1226 /* disable Jasmin SCSI Int */
1227 pCurHcb->HCS_Semaph = 0;
1228
1229 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1230 1036
1231 tulip_main(pCurHcb); 1037 printk("initio_bad_seg c=%d\n", host->index);
1232 1038
1233 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 1039 if ((scb = host->active) != NULL) {
1234 1040 initio_unlink_busy_scb(host, scb);
1235 pCurHcb->HCS_Semaph = 1; 1041 scb->hastat = HOST_BAD_PHAS;
1236 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F); 1042 scb->tastat = 0;
1043 initio_append_done_scb(host, scb);
1237 } 1044 }
1238 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1045 initio_stop_bm(host);
1239 return SCSI_RESET_PENDING; 1046 initio_reset_scsi(host, 8); /* 7/29/98 */
1047 return initio_post_scsi_rst(host);
1240} 1048}
1241 1049
1242static int tul_reset_scsi_bus(HCS * pCurHcb)
1243{
1244 ULONG flags;
1245
1246 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1247 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1248 pCurHcb->HCS_Semaph = 0;
1249
1250 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1251
1252 tul_stop_bm(pCurHcb);
1253
1254 tul_reset_scsi(pCurHcb, 2); /* 7/29/98 */
1255
1256 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1257 tul_post_scsi_rst(pCurHcb);
1258
1259 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1260
1261 tulip_main(pCurHcb);
1262
1263 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1264
1265 pCurHcb->HCS_Semaph = 1;
1266 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1267 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1268 return (SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET);
1269}
1270
1271#endif /* 0 */
1272 1050
1273/************************************************************************/ 1051/************************************************************************/
1274static void tul_exec_scb(HCS * pCurHcb, SCB * pCurScb) 1052static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
1275{ 1053{
1276 ULONG flags; 1054 unsigned long flags;
1277 1055
1278 pCurScb->SCB_Mode = 0; 1056 scb->mode = 0;
1279 1057
1280 pCurScb->SCB_SGIdx = 0; 1058 scb->sgidx = 0;
1281 pCurScb->SCB_SGMax = pCurScb->SCB_SGLen; 1059 scb->sgmax = scb->sglen;
1282 1060
1283 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 1061 spin_lock_irqsave(&host->semaph_lock, flags);
1284 1062
1285 tul_append_pend_scb(pCurHcb, pCurScb); /* Append this SCB to Pending queue */ 1063 initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
1286 1064
1287/* VVVVV 07/21/98 */ 1065/* VVVVV 07/21/98 */
1288 if (pCurHcb->HCS_Semaph == 1) { 1066 if (host->semaph == 1) {
1289 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F); 1067 /* Disable Jasmin SCSI Int */
1290 /* disable Jasmin SCSI Int */ 1068 outb(0x1F, host->addr + TUL_Mask);
1291 pCurHcb->HCS_Semaph = 0; 1069 host->semaph = 0;
1292 1070 spin_unlock_irqrestore(&host->semaph_lock, flags);
1293 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1294 1071
1295 tulip_main(pCurHcb); 1072 tulip_main(host);
1296 1073
1297 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 1074 spin_lock_irqsave(&host->semaph_lock, flags);
1298 1075 host->semaph = 1;
1299 pCurHcb->HCS_Semaph = 1; 1076 outb(0x0F, host->addr + TUL_Mask);
1300 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1301 } 1077 }
1302 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1078 spin_unlock_irqrestore(&host->semaph_lock, flags);
1303 return; 1079 return;
1304} 1080}
1305 1081
1306/***************************************************************************/ 1082/***************************************************************************/
1307static int tul_isr(HCS * pCurHcb) 1083static int initio_isr(struct initio_host * host)
1308{ 1084{
1309 /* Enter critical section */ 1085 if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
1310 1086 if (host->semaph == 1) {
1311 if (TUL_RD(pCurHcb->HCS_Base, TUL_Int) & TSS_INT_PENDING) { 1087 outb(0x1F, host->addr + TUL_Mask);
1312 if (pCurHcb->HCS_Semaph == 1) {
1313 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1314 /* Disable Tulip SCSI Int */ 1088 /* Disable Tulip SCSI Int */
1315 pCurHcb->HCS_Semaph = 0; 1089 host->semaph = 0;
1316 1090
1317 tulip_main(pCurHcb); 1091 tulip_main(host);
1318 1092
1319 pCurHcb->HCS_Semaph = 1; 1093 host->semaph = 1;
1320 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F); 1094 outb(0x0F, host->addr + TUL_Mask);
1321 return (1); 1095 return 1;
1322 } 1096 }
1323 } 1097 }
1324 return (0); 1098 return 0;
1325} 1099}
1326 1100
1327/***************************************************************************/ 1101static int tulip_main(struct initio_host * host)
1328int tulip_main(HCS * pCurHcb)
1329{ 1102{
1330 SCB *pCurScb; 1103 struct scsi_ctrl_blk *scb;
1331 1104
1332 for (;;) { 1105 for (;;) {
1333 1106 tulip_scsi(host); /* Call tulip_scsi */
1334 tulip_scsi(pCurHcb); /* Call tulip_scsi */ 1107
1335 1108 /* Walk the list of completed SCBs */
1336 while ((pCurScb = tul_find_done_scb(pCurHcb)) != NULL) { /* find done entry */ 1109 while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
1337 if (pCurScb->SCB_TaStat == INI_QUEUE_FULL) { 1110 if (scb->tastat == INI_QUEUE_FULL) {
1338 pCurHcb->HCS_MaxTags[pCurScb->SCB_Target] = 1111 host->max_tags[scb->target] =
1339 pCurHcb->HCS_ActTags[pCurScb->SCB_Target] - 1; 1112 host->act_tags[scb->target] - 1;
1340 pCurScb->SCB_TaStat = 0; 1113 scb->tastat = 0;
1341 tul_append_pend_scb(pCurHcb, pCurScb); 1114 initio_append_pend_scb(host, scb);
1342 continue; 1115 continue;
1343 } 1116 }
1344 if (!(pCurScb->SCB_Mode & SCM_RSENS)) { /* not in auto req. sense mode */ 1117 if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
1345 if (pCurScb->SCB_TaStat == 2) { 1118 if (scb->tastat == 2) {
1346 1119
1347 /* clr sync. nego flag */ 1120 /* clr sync. nego flag */
1348 1121
1349 if (pCurScb->SCB_Flags & SCF_SENSE) { 1122 if (scb->flags & SCF_SENSE) {
1350 BYTE len; 1123 u8 len;
1351 len = pCurScb->SCB_SenseLen; 1124 len = scb->senselen;
1352 if (len == 0) 1125 if (len == 0)
1353 len = 1; 1126 len = 1;
1354 pCurScb->SCB_BufLen = pCurScb->SCB_SenseLen; 1127 scb->buflen = scb->senselen;
1355 pCurScb->SCB_BufPtr = pCurScb->SCB_SensePtr; 1128 scb->bufptr = scb->senseptr;
1356 pCurScb->SCB_Flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */ 1129 scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
1357/* pCurScb->SCB_Flags |= SCF_NO_DCHK; */ 1130 /* so, we won't report wrong direction in xfer_data_in,
1358 /* so, we won't report worng direction in xfer_data_in,
1359 and won't report HOST_DO_DU in state_6 */ 1131 and won't report HOST_DO_DU in state_6 */
1360 pCurScb->SCB_Mode = SCM_RSENS; 1132 scb->mode = SCM_RSENS;
1361 pCurScb->SCB_Ident &= 0xBF; /* Disable Disconnect */ 1133 scb->ident &= 0xBF; /* Disable Disconnect */
1362 pCurScb->SCB_TagMsg = 0; 1134 scb->tagmsg = 0;
1363 pCurScb->SCB_TaStat = 0; 1135 scb->tastat = 0;
1364 pCurScb->SCB_CDBLen = 6; 1136 scb->cdblen = 6;
1365 pCurScb->SCB_CDB[0] = SCSICMD_RequestSense; 1137 scb->cdb[0] = SCSICMD_RequestSense;
1366 pCurScb->SCB_CDB[1] = 0; 1138 scb->cdb[1] = 0;
1367 pCurScb->SCB_CDB[2] = 0; 1139 scb->cdb[2] = 0;
1368 pCurScb->SCB_CDB[3] = 0; 1140 scb->cdb[3] = 0;
1369 pCurScb->SCB_CDB[4] = len; 1141 scb->cdb[4] = len;
1370 pCurScb->SCB_CDB[5] = 0; 1142 scb->cdb[5] = 0;
1371 tul_push_pend_scb(pCurHcb, pCurScb); 1143 initio_push_pend_scb(host, scb);
1372 break; 1144 break;
1373 } 1145 }
1374 } 1146 }
1375 } else { /* in request sense mode */ 1147 } else { /* in request sense mode */
1376 1148
1377 if (pCurScb->SCB_TaStat == 2) { /* check contition status again after sending 1149 if (scb->tastat == 2) { /* check contition status again after sending
1378 requset sense cmd 0x3 */ 1150 requset sense cmd 0x3 */
1379 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 1151 scb->hastat = HOST_BAD_PHAS;
1380 } 1152 }
1381 pCurScb->SCB_TaStat = 2; 1153 scb->tastat = 2;
1382 } 1154 }
1383 pCurScb->SCB_Flags |= SCF_DONE; 1155 scb->flags |= SCF_DONE;
1384 if (pCurScb->SCB_Flags & SCF_POST) { 1156 if (scb->flags & SCF_POST) {
1385 (*pCurScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pCurScb); 1157 /* FIXME: only one post method and lose casts */
1158 (*scb->post) ((u8 *) host, (u8 *) scb);
1386 } 1159 }
1387 } /* while */ 1160 } /* while */
1388
1389 /* find_active: */ 1161 /* find_active: */
1390 if (TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0) & TSS_INT_PENDING) 1162 if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
1391 continue; 1163 continue;
1392 1164 if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
1393 if (pCurHcb->HCS_ActScb) { /* return to OS and wait for xfer_done_ISR/Selected_ISR */
1394 return 1; /* return to OS, enable interrupt */ 1165 return 1; /* return to OS, enable interrupt */
1395 }
1396 /* Check pending SCB */ 1166 /* Check pending SCB */
1397 if (tul_find_first_pend_scb(pCurHcb) == NULL) { 1167 if (initio_find_first_pend_scb(host) == NULL)
1398 return 1; /* return to OS, enable interrupt */ 1168 return 1; /* return to OS, enable interrupt */
1399 }
1400 } /* End of for loop */ 1169 } /* End of for loop */
1401 /* statement won't reach here */ 1170 /* statement won't reach here */
1402} 1171}
1403 1172
1404 1173static void tulip_scsi(struct initio_host * host)
1405
1406
1407/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
1408/***************************************************************************/
1409/***************************************************************************/
1410/***************************************************************************/
1411/***************************************************************************/
1412
1413/***************************************************************************/
1414void tulip_scsi(HCS * pCurHcb)
1415{ 1174{
1416 SCB *pCurScb; 1175 struct scsi_ctrl_blk *scb;
1417 TCS *pCurTcb; 1176 struct target_control *active_tc;
1418 1177
1419 /* make sure to service interrupt asap */ 1178 /* make sure to service interrupt asap */
1420 1179 if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
1421 if ((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) & TSS_INT_PENDING) { 1180 host->phase = host->jsstatus0 & TSS_PH_MASK;
1422 1181 host->jsstatus1 = inb(host->addr + TUL_SStatus1);
1423 pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK; 1182 host->jsint = inb(host->addr + TUL_SInt);
1424 pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1); 1183 if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
1425 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 1184 int_initio_scsi_rst(host);
1426 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
1427 int_tul_scsi_rst(pCurHcb);
1428 return; 1185 return;
1429 } 1186 }
1430 if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if selected/reselected interrupt */ 1187 if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
1431 if (int_tul_resel(pCurHcb) == 0) 1188 if (int_initio_resel(host) == 0)
1432 tul_next_state(pCurHcb); 1189 initio_next_state(host);
1433 return; 1190 return;
1434 } 1191 }
1435 if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) { 1192 if (host->jsint & TSS_SEL_TIMEOUT) {
1436 int_tul_busfree(pCurHcb); 1193 int_initio_busfree(host);
1437 return; 1194 return;
1438 } 1195 }
1439 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */ 1196 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
1440 int_tul_busfree(pCurHcb); /* unexpected bus free or sel timeout */ 1197 int_initio_busfree(host); /* unexpected bus free or sel timeout */
1441 return; 1198 return;
1442 } 1199 }
1443 if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */ 1200 if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
1444 if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) 1201 if ((scb = host->active) != NULL)
1445 tul_next_state(pCurHcb); 1202 initio_next_state(host);
1446 return; 1203 return;
1447 } 1204 }
1448 } 1205 }
1449 if (pCurHcb->HCS_ActScb != NULL) 1206 if (host->active != NULL)
1450 return; 1207 return;
1451 1208
1452 if ((pCurScb = tul_find_first_pend_scb(pCurHcb)) == NULL) 1209 if ((scb = initio_find_first_pend_scb(host)) == NULL)
1453 return; 1210 return;
1454 1211
1455 /* program HBA's SCSI ID & target SCSI ID */ 1212 /* program HBA's SCSI ID & target SCSI ID */
1456 TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId, 1213 outb((host->scsi_id << 4) | (scb->target & 0x0F),
1457 (pCurHcb->HCS_SCSI_ID << 4) | (pCurScb->SCB_Target & 0x0F)); 1214 host->addr + TUL_SScsiId);
1458 if (pCurScb->SCB_Opcode == ExecSCSI) { 1215 if (scb->opcode == ExecSCSI) {
1459 pCurTcb = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 1216 active_tc = &host->targets[scb->target];
1460 1217
1461 if (pCurScb->SCB_TagMsg) 1218 if (scb->tagmsg)
1462 pCurTcb->TCS_DrvFlags |= TCF_DRV_EN_TAG; 1219 active_tc->drv_flags |= TCF_DRV_EN_TAG;
1463 else 1220 else
1464 pCurTcb->TCS_DrvFlags &= ~TCF_DRV_EN_TAG; 1221 active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
1465 1222
1466 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period); 1223 outb(active_tc->js_period, host->addr + TUL_SPeriod);
1467 if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */ 1224 if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
1468 tul_select_atn_stop(pCurHcb, pCurScb); 1225 initio_select_atn_stop(host, scb);
1469 } else { 1226 } else {
1470 if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */ 1227 if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
1471 tul_select_atn_stop(pCurHcb, pCurScb); 1228 initio_select_atn_stop(host, scb);
1472 } else { 1229 } else {
1473 if (pCurScb->SCB_TagMsg) 1230 if (scb->tagmsg)
1474 tul_select_atn3(pCurHcb, pCurScb); 1231 initio_select_atn3(host, scb);
1475 else 1232 else
1476 tul_select_atn(pCurHcb, pCurScb); 1233 initio_select_atn(host, scb);
1477 } 1234 }
1478 } 1235 }
1479 if (pCurScb->SCB_Flags & SCF_POLL) { 1236 if (scb->flags & SCF_POLL) {
1480 while (wait_tulip(pCurHcb) != -1) { 1237 while (wait_tulip(host) != -1) {
1481 if (tul_next_state(pCurHcb) == -1) 1238 if (initio_next_state(host) == -1)
1482 break; 1239 break;
1483 } 1240 }
1484 } 1241 }
1485 } else if (pCurScb->SCB_Opcode == BusDevRst) { 1242 } else if (scb->opcode == BusDevRst) {
1486 tul_select_atn_stop(pCurHcb, pCurScb); 1243 initio_select_atn_stop(host, scb);
1487 pCurScb->SCB_NxtStat = 8; 1244 scb->next_state = 8;
1488 if (pCurScb->SCB_Flags & SCF_POLL) { 1245 if (scb->flags & SCF_POLL) {
1489 while (wait_tulip(pCurHcb) != -1) { 1246 while (wait_tulip(host) != -1) {
1490 if (tul_next_state(pCurHcb) == -1) 1247 if (initio_next_state(host) == -1)
1491 break; 1248 break;
1492 } 1249 }
1493 } 1250 }
1494 } else if (pCurScb->SCB_Opcode == AbortCmd) { 1251 } else if (scb->opcode == AbortCmd) {
1495 if (tul_abort_srb(pCurHcb, pCurScb->SCB_Srb) != 0) { 1252 if (initio_abort_srb(host, scb->srb) != 0) {
1496 1253 initio_unlink_pend_scb(host, scb);
1497 1254 initio_release_scb(host, scb);
1498 tul_unlink_pend_scb(pCurHcb, pCurScb);
1499
1500 tul_release_scb(pCurHcb, pCurScb);
1501 } else { 1255 } else {
1502 pCurScb->SCB_Opcode = BusDevRst; 1256 scb->opcode = BusDevRst;
1503 tul_select_atn_stop(pCurHcb, pCurScb); 1257 initio_select_atn_stop(host, scb);
1504 pCurScb->SCB_NxtStat = 8; 1258 scb->next_state = 8;
1505 } 1259 }
1506
1507/* 08/03/98 */
1508 } else { 1260 } else {
1509 tul_unlink_pend_scb(pCurHcb, pCurScb); 1261 initio_unlink_pend_scb(host, scb);
1510 pCurScb->SCB_HaStat = 0x16; /* bad command */ 1262 scb->hastat = 0x16; /* bad command */
1511 tul_append_done_scb(pCurHcb, pCurScb); 1263 initio_append_done_scb(host, scb);
1512 } 1264 }
1513 return; 1265 return;
1514} 1266}
1515 1267
1268/**
1269 * initio_next_state - Next SCSI state
1270 * @host: InitIO host we are processing
1271 *
1272 * Progress the active command block along the state machine
1273 * until we hit a state which we must wait for activity to occur.
1274 *
1275 * Returns zero or a negative code.
1276 */
1516 1277
1517/***************************************************************************/ 1278static int initio_next_state(struct initio_host * host)
1518int tul_next_state(HCS * pCurHcb)
1519{ 1279{
1520 int next; 1280 int next;
1521 1281
1522 next = pCurHcb->HCS_ActScb->SCB_NxtStat; 1282 next = host->active->next_state;
1523 for (;;) { 1283 for (;;) {
1524 switch (next) { 1284 switch (next) {
1525 case 1: 1285 case 1:
1526 next = tul_state_1(pCurHcb); 1286 next = initio_state_1(host);
1527 break; 1287 break;
1528 case 2: 1288 case 2:
1529 next = tul_state_2(pCurHcb); 1289 next = initio_state_2(host);
1530 break; 1290 break;
1531 case 3: 1291 case 3:
1532 next = tul_state_3(pCurHcb); 1292 next = initio_state_3(host);
1533 break; 1293 break;
1534 case 4: 1294 case 4:
1535 next = tul_state_4(pCurHcb); 1295 next = initio_state_4(host);
1536 break; 1296 break;
1537 case 5: 1297 case 5:
1538 next = tul_state_5(pCurHcb); 1298 next = initio_state_5(host);
1539 break; 1299 break;
1540 case 6: 1300 case 6:
1541 next = tul_state_6(pCurHcb); 1301 next = initio_state_6(host);
1542 break; 1302 break;
1543 case 7: 1303 case 7:
1544 next = tul_state_7(pCurHcb); 1304 next = initio_state_7(host);
1545 break; 1305 break;
1546 case 8: 1306 case 8:
1547 return (tul_bus_device_reset(pCurHcb)); 1307 return initio_bus_device_reset(host);
1548 default: 1308 default:
1549 return (tul_bad_seq(pCurHcb)); 1309 return initio_bad_seq(host);
1550 } 1310 }
1551 if (next <= 0) 1311 if (next <= 0)
1552 return next; 1312 return next;
@@ -1554,338 +1314,363 @@ int tul_next_state(HCS * pCurHcb)
1554} 1314}
1555 1315
1556 1316
1557/***************************************************************************/ 1317/**
1558/* sTate after selection with attention & stop */ 1318 * initio_state_1 - SCSI state machine
1559int tul_state_1(HCS * pCurHcb) 1319 * @host: InitIO host we are controlling
1320 *
1321 * Perform SCSI state processing for Select/Attention/Stop
1322 */
1323
1324static int initio_state_1(struct initio_host * host)
1560{ 1325{
1561 SCB *pCurScb = pCurHcb->HCS_ActScb; 1326 struct scsi_ctrl_blk *scb = host->active;
1562 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1327 struct target_control *active_tc = host->active_tc;
1563#if DEBUG_STATE 1328#if DEBUG_STATE
1564 printk("-s1-"); 1329 printk("-s1-");
1565#endif 1330#endif
1566 1331
1567 tul_unlink_pend_scb(pCurHcb, pCurScb); 1332 /* Move the SCB from pending to busy */
1568 tul_append_busy_scb(pCurHcb, pCurScb); 1333 initio_unlink_pend_scb(host, scb);
1334 initio_append_busy_scb(host, scb);
1569 1335
1570 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0); 1336 outb(active_tc->sconfig0, host->addr + TUL_SConfig );
1571 /* ATN on */ 1337 /* ATN on */
1572 if (pCurHcb->HCS_Phase == MSG_OUT) { 1338 if (host->phase == MSG_OUT) {
1573 1339 outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
1574 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, (TSC_EN_BUS_IN | TSC_HW_RESELECT)); 1340 outb(scb->ident, host->addr + TUL_SFifo);
1575 1341
1576 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident); 1342 if (scb->tagmsg) {
1577 1343 outb(scb->tagmsg, host->addr + TUL_SFifo);
1578 if (pCurScb->SCB_TagMsg) { 1344 outb(scb->tagid, host->addr + TUL_SFifo);
1579 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg); 1345 }
1580 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId); 1346 if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
1581 } 1347 active_tc->flags |= TCF_WDTR_DONE;
1582 if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { 1348 outb(MSG_EXTEND, host->addr + TUL_SFifo);
1583 1349 outb(2, host->addr + TUL_SFifo); /* Extended msg length */
1584 pCurTcb->TCS_Flags |= TCF_WDTR_DONE; 1350 outb(3, host->addr + TUL_SFifo); /* Sync request */
1585 1351 outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
1586 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 1352 } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
1587 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2); /* Extended msg length */ 1353 active_tc->flags |= TCF_SYNC_DONE;
1588 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* Sync request */ 1354 outb(MSG_EXTEND, host->addr + TUL_SFifo);
1589 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* Start from 16 bits */ 1355 outb(3, host->addr + TUL_SFifo); /* extended msg length */
1590 } else if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { 1356 outb(1, host->addr + TUL_SFifo); /* sync request */
1591 1357 outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
1592 pCurTcb->TCS_Flags |= TCF_SYNC_DONE; 1358 outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
1593 1359 }
1594 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 1360 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1595 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* extended msg length */ 1361 if (wait_tulip(host) == -1)
1596 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */ 1362 return -1;
1597 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]); 1363 }
1598 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */ 1364 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1599 } 1365 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
1600 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1366 /* Into before CDB xfer */
1601 if (wait_tulip(pCurHcb) == -1) 1367 return 3;
1602 return (-1); 1368}
1603 } 1369
1604 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1370
1605 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7))); 1371/**
1606 return (3); 1372 * initio_state_2 - SCSI state machine
1607} 1373 * @host: InitIO host we are controlling
1608 1374 *
1375 * state after selection with attention
1376 * state after selection with attention3
1377 */
1609 1378
1610/***************************************************************************/ 1379static int initio_state_2(struct initio_host * host)
1611/* state after selection with attention */
1612/* state after selection with attention3 */
1613int tul_state_2(HCS * pCurHcb)
1614{ 1380{
1615 SCB *pCurScb = pCurHcb->HCS_ActScb; 1381 struct scsi_ctrl_blk *scb = host->active;
1616 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1382 struct target_control *active_tc = host->active_tc;
1617#if DEBUG_STATE 1383#if DEBUG_STATE
1618 printk("-s2-"); 1384 printk("-s2-");
1619#endif 1385#endif
1620 1386
1621 tul_unlink_pend_scb(pCurHcb, pCurScb); 1387 initio_unlink_pend_scb(host, scb);
1622 tul_append_busy_scb(pCurHcb, pCurScb); 1388 initio_append_busy_scb(host, scb);
1623 1389
1624 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0); 1390 outb(active_tc->sconfig0, host->addr + TUL_SConfig);
1625 1391
1626 if (pCurHcb->HCS_JSStatus1 & TSS_CMD_PH_CMP) { 1392 if (host->jsstatus1 & TSS_CMD_PH_CMP)
1627 return (4); 1393 return 4;
1628 } 1394
1629 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1395 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1630 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7))); 1396 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
1631 return (3); 1397 /* Into before CDB xfer */
1398 return 3;
1632} 1399}
1633 1400
1634/***************************************************************************/ 1401/**
1635/* state before CDB xfer is done */ 1402 * initio_state_3 - SCSI state machine
1636int tul_state_3(HCS * pCurHcb) 1403 * @host: InitIO host we are controlling
1404 *
1405 * state before CDB xfer is done
1406 */
1407
1408static int initio_state_3(struct initio_host * host)
1637{ 1409{
1638 SCB *pCurScb = pCurHcb->HCS_ActScb; 1410 struct scsi_ctrl_blk *scb = host->active;
1639 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1411 struct target_control *active_tc = host->active_tc;
1640 int i; 1412 int i;
1641 1413
1642#if DEBUG_STATE 1414#if DEBUG_STATE
1643 printk("-s3-"); 1415 printk("-s3-");
1644#endif 1416#endif
1645 for (;;) { 1417 for (;;) {
1646 switch (pCurHcb->HCS_Phase) { 1418 switch (host->phase) {
1647 case CMD_OUT: /* Command out phase */ 1419 case CMD_OUT: /* Command out phase */
1648 for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++) 1420 for (i = 0; i < (int) scb->cdblen; i++)
1649 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]); 1421 outb(scb->cdb[i], host->addr + TUL_SFifo);
1650 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1422 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1651 if (wait_tulip(pCurHcb) == -1) 1423 if (wait_tulip(host) == -1)
1652 return (-1); 1424 return -1;
1653 if (pCurHcb->HCS_Phase == CMD_OUT) { 1425 if (host->phase == CMD_OUT)
1654 return (tul_bad_seq(pCurHcb)); 1426 return initio_bad_seq(host);
1655 } 1427 return 4;
1656 return (4);
1657 1428
1658 case MSG_IN: /* Message in phase */ 1429 case MSG_IN: /* Message in phase */
1659 pCurScb->SCB_NxtStat = 3; 1430 scb->next_state = 3;
1660 if (tul_msgin(pCurHcb) == -1) 1431 if (initio_msgin(host) == -1)
1661 return (-1); 1432 return -1;
1662 break; 1433 break;
1663 1434
1664 case STATUS_IN: /* Status phase */ 1435 case STATUS_IN: /* Status phase */
1665 if (tul_status_msg(pCurHcb) == -1) 1436 if (initio_status_msg(host) == -1)
1666 return (-1); 1437 return -1;
1667 break; 1438 break;
1668 1439
1669 case MSG_OUT: /* Message out phase */ 1440 case MSG_OUT: /* Message out phase */
1670 if (pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) { 1441 if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
1671 1442 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
1672 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */ 1443 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1673 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1444 if (wait_tulip(host) == -1)
1674 if (wait_tulip(pCurHcb) == -1) 1445 return -1;
1675 return (-1);
1676
1677 } else { 1446 } else {
1678 pCurTcb->TCS_Flags |= TCF_SYNC_DONE; 1447 active_tc->flags |= TCF_SYNC_DONE;
1679 1448
1680 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 1449 outb(MSG_EXTEND, host->addr + TUL_SFifo);
1681 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* ext. msg len */ 1450 outb(3, host->addr + TUL_SFifo); /* ext. msg len */
1682 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */ 1451 outb(1, host->addr + TUL_SFifo); /* sync request */
1683 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]); 1452 outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
1684 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */ 1453 outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
1685 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1454 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1686 if (wait_tulip(pCurHcb) == -1) 1455 if (wait_tulip(host) == -1)
1687 return (-1); 1456 return -1;
1688 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1457 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1689 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)); 1458 outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
1690 1459
1691 } 1460 }
1692 break; 1461 break;
1693
1694 default: 1462 default:
1695 return (tul_bad_seq(pCurHcb)); 1463 return initio_bad_seq(host);
1696 } 1464 }
1697 } 1465 }
1698} 1466}
1699 1467
1468/**
1469 * initio_state_4 - SCSI state machine
1470 * @host: InitIO host we are controlling
1471 *
1472 * SCSI state machine. State 4
1473 */
1700 1474
1701/***************************************************************************/ 1475static int initio_state_4(struct initio_host * host)
1702int tul_state_4(HCS * pCurHcb)
1703{ 1476{
1704 SCB *pCurScb = pCurHcb->HCS_ActScb; 1477 struct scsi_ctrl_blk *scb = host->active;
1705 1478
1706#if DEBUG_STATE 1479#if DEBUG_STATE
1707 printk("-s4-"); 1480 printk("-s4-");
1708#endif 1481#endif
1709 if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_NO_XF) { 1482 if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
1710 return (6); /* Go to state 6 */ 1483 return 6; /* Go to state 6 (After data) */
1711 } 1484 }
1712 for (;;) { 1485 for (;;) {
1713 if (pCurScb->SCB_BufLen == 0) 1486 if (scb->buflen == 0)
1714 return (6); /* Go to state 6 */ 1487 return 6;
1715 1488
1716 switch (pCurHcb->HCS_Phase) { 1489 switch (host->phase) {
1717 1490
1718 case STATUS_IN: /* Status phase */ 1491 case STATUS_IN: /* Status phase */
1719 if ((pCurScb->SCB_Flags & SCF_DIR) != 0) { /* if direction bit set then report data underrun */ 1492 if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
1720 pCurScb->SCB_HaStat = HOST_DO_DU; 1493 scb->hastat = HOST_DO_DU;
1721 } 1494 if ((initio_status_msg(host)) == -1)
1722 if ((tul_status_msg(pCurHcb)) == -1) 1495 return -1;
1723 return (-1);
1724 break; 1496 break;
1725 1497
1726 case MSG_IN: /* Message in phase */ 1498 case MSG_IN: /* Message in phase */
1727 pCurScb->SCB_NxtStat = 0x4; 1499 scb->next_state = 0x4;
1728 if (tul_msgin(pCurHcb) == -1) 1500 if (initio_msgin(host) == -1)
1729 return (-1); 1501 return -1;
1730 break; 1502 break;
1731 1503
1732 case MSG_OUT: /* Message out phase */ 1504 case MSG_OUT: /* Message out phase */
1733 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { 1505 if (host->jsstatus0 & TSS_PAR_ERROR) {
1734 pCurScb->SCB_BufLen = 0; 1506 scb->buflen = 0;
1735 pCurScb->SCB_HaStat = HOST_DO_DU; 1507 scb->hastat = HOST_DO_DU;
1736 if (tul_msgout_ide(pCurHcb) == -1) 1508 if (initio_msgout_ide(host) == -1)
1737 return (-1); 1509 return -1;
1738 return (6); /* Go to state 6 */ 1510 return 6;
1739 } else { 1511 } else {
1740 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */ 1512 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
1741 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1513 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1742 if (wait_tulip(pCurHcb) == -1) 1514 if (wait_tulip(host) == -1)
1743 return (-1); 1515 return -1;
1744 } 1516 }
1745 break; 1517 break;
1746 1518
1747 case DATA_IN: /* Data in phase */ 1519 case DATA_IN: /* Data in phase */
1748 return (tul_xfer_data_in(pCurHcb)); 1520 return initio_xfer_data_in(host);
1749 1521
1750 case DATA_OUT: /* Data out phase */ 1522 case DATA_OUT: /* Data out phase */
1751 return (tul_xfer_data_out(pCurHcb)); 1523 return initio_xfer_data_out(host);
1752 1524
1753 default: 1525 default:
1754 return (tul_bad_seq(pCurHcb)); 1526 return initio_bad_seq(host);
1755 } 1527 }
1756 } 1528 }
1757} 1529}
1758 1530
1759 1531
1760/***************************************************************************/ 1532/**
1761/* state after dma xfer done or phase change before xfer done */ 1533 * initio_state_5 - SCSI state machine
1762int tul_state_5(HCS * pCurHcb) 1534 * @host: InitIO host we are controlling
1535 *
1536 * State after dma xfer done or phase change before xfer done
1537 */
1538
1539static int initio_state_5(struct initio_host * host)
1763{ 1540{
1764 SCB *pCurScb = pCurHcb->HCS_ActScb; 1541 struct scsi_ctrl_blk *scb = host->active;
1765 long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */ 1542 long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
1766 1543
1767#if DEBUG_STATE 1544#if DEBUG_STATE
1768 printk("-s5-"); 1545 printk("-s5-");
1769#endif 1546#endif
1770/*------ get remaining count -------*/ 1547 /*------ get remaining count -------*/
1771 1548 cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
1772 cnt = TUL_RDLONG(pCurHcb->HCS_Base, TUL_SCnt0) & 0x0FFFFFF;
1773 1549
1774 if (TUL_RD(pCurHcb->HCS_Base, TUL_XCmd) & 0x20) { 1550 if (inb(host->addr + TUL_XCmd) & 0x20) {
1775 /* ----------------------- DATA_IN ----------------------------- */ 1551 /* ----------------------- DATA_IN ----------------------------- */
1776 /* check scsi parity error */ 1552 /* check scsi parity error */
1777 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { 1553 if (host->jsstatus0 & TSS_PAR_ERROR)
1778 pCurScb->SCB_HaStat = HOST_DO_DU; 1554 scb->hastat = HOST_DO_DU;
1779 } 1555 if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
1780 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
1781 /* tell Hardware scsi xfer has been terminated */ 1556 /* tell Hardware scsi xfer has been terminated */
1782 TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, TUL_RD(pCurHcb->HCS_Base, TUL_XCtrl) | 0x80); 1557 outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
1783 /* wait until DMA xfer not pending */ 1558 /* wait until DMA xfer not pending */
1784 while (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND); 1559 while (inb(host->addr + TUL_XStatus) & XPEND)
1560 cpu_relax();
1785 } 1561 }
1786 } else { 1562 } else {
1787/*-------- DATA OUT -----------*/ 1563 /*-------- DATA OUT -----------*/
1788 if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0) { 1564 if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
1789 if (pCurHcb->HCS_ActTcs->TCS_JS_Period & TSC_WIDE_SCSI) 1565 if (host->active_tc->js_period & TSC_WIDE_SCSI)
1790 cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F) << 1; 1566 cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
1791 else 1567 else
1792 cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F); 1568 cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
1793 } 1569 }
1794 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ 1570 if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
1795 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT); 1571 outb(TAX_X_ABT, host->addr + TUL_XCmd);
1796 /* wait Abort DMA xfer done */ 1572 /* wait Abort DMA xfer done */
1797 while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0); 1573 while ((inb(host->addr + TUL_Int) & XABT) == 0)
1574 cpu_relax();
1798 } 1575 }
1799 if ((cnt == 1) && (pCurHcb->HCS_Phase == DATA_OUT)) { 1576 if ((cnt == 1) && (host->phase == DATA_OUT)) {
1800 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1577 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1801 if (wait_tulip(pCurHcb) == -1) { 1578 if (wait_tulip(host) == -1)
1802 return (-1); 1579 return -1;
1803 }
1804 cnt = 0; 1580 cnt = 0;
1805 } else { 1581 } else {
1806 if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0) 1582 if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
1807 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1583 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1808 } 1584 }
1809 } 1585 }
1810
1811 if (cnt == 0) { 1586 if (cnt == 0) {
1812 pCurScb->SCB_BufLen = 0; 1587 scb->buflen = 0;
1813 return (6); /* Go to state 6 */ 1588 return 6; /* After Data */
1814 } 1589 }
1815 /* Update active data pointer */ 1590 /* Update active data pointer */
1816 xcnt = (long) pCurScb->SCB_BufLen - cnt; /* xcnt== bytes already xferred */ 1591 xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
1817 pCurScb->SCB_BufLen = (U32) cnt; /* cnt == bytes left to be xferred */ 1592 scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
1818 if (pCurScb->SCB_Flags & SCF_SG) { 1593 if (scb->flags & SCF_SG) {
1819 register SG *sgp; 1594 struct sg_entry *sgp;
1820 ULONG i; 1595 unsigned long i;
1821 1596
1822 sgp = &pCurScb->SCB_SGList[pCurScb->SCB_SGIdx]; 1597 sgp = &scb->sglist[scb->sgidx];
1823 for (i = pCurScb->SCB_SGIdx; i < pCurScb->SCB_SGMax; sgp++, i++) { 1598 for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
1824 xcnt -= (long) sgp->SG_Len; 1599 xcnt -= (long) sgp->len;
1825 if (xcnt < 0) { /* this sgp xfer half done */ 1600 if (xcnt < 0) { /* this sgp xfer half done */
1826 xcnt += (long) sgp->SG_Len; /* xcnt == bytes xferred in this sgp */ 1601 xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
1827 sgp->SG_Ptr += (U32) xcnt; /* new ptr to be xfer */ 1602 sgp->data += (u32) xcnt; /* new ptr to be xfer */
1828 sgp->SG_Len -= (U32) xcnt; /* new len to be xfer */ 1603 sgp->len -= (u32) xcnt; /* new len to be xfer */
1829 pCurScb->SCB_BufPtr += ((U32) (i - pCurScb->SCB_SGIdx) << 3); 1604 scb->bufptr += ((u32) (i - scb->sgidx) << 3);
1830 /* new SG table ptr */ 1605 /* new SG table ptr */
1831 pCurScb->SCB_SGLen = (BYTE) (pCurScb->SCB_SGMax - i); 1606 scb->sglen = (u8) (scb->sgmax - i);
1832 /* new SG table len */ 1607 /* new SG table len */
1833 pCurScb->SCB_SGIdx = (WORD) i; 1608 scb->sgidx = (u16) i;
1834 /* for next disc and come in this loop */ 1609 /* for next disc and come in this loop */
1835 return (4); /* Go to state 4 */ 1610 return 4; /* Go to state 4 */
1836 } 1611 }
1837 /* else (xcnt >= 0 , i.e. this sgp already xferred */ 1612 /* else (xcnt >= 0 , i.e. this sgp already xferred */
1838 } /* for */ 1613 } /* for */
1839 return (6); /* Go to state 6 */ 1614 return 6; /* Go to state 6 */
1840 } else { 1615 } else {
1841 pCurScb->SCB_BufPtr += (U32) xcnt; 1616 scb->bufptr += (u32) xcnt;
1842 } 1617 }
1843 return (4); /* Go to state 4 */ 1618 return 4; /* Go to state 4 */
1844} 1619}
1845 1620
1846/***************************************************************************/ 1621/**
1847/* state after Data phase */ 1622 * initio_state_6 - SCSI state machine
1848int tul_state_6(HCS * pCurHcb) 1623 * @host: InitIO host we are controlling
1624 *
1625 * State after Data phase
1626 */
1627
1628static int initio_state_6(struct initio_host * host)
1849{ 1629{
1850 SCB *pCurScb = pCurHcb->HCS_ActScb; 1630 struct scsi_ctrl_blk *scb = host->active;
1851 1631
1852#if DEBUG_STATE 1632#if DEBUG_STATE
1853 printk("-s6-"); 1633 printk("-s6-");
1854#endif 1634#endif
1855 for (;;) { 1635 for (;;) {
1856 switch (pCurHcb->HCS_Phase) { 1636 switch (host->phase) {
1857 case STATUS_IN: /* Status phase */ 1637 case STATUS_IN: /* Status phase */
1858 if ((tul_status_msg(pCurHcb)) == -1) 1638 if ((initio_status_msg(host)) == -1)
1859 return (-1); 1639 return -1;
1860 break; 1640 break;
1861 1641
1862 case MSG_IN: /* Message in phase */ 1642 case MSG_IN: /* Message in phase */
1863 pCurScb->SCB_NxtStat = 6; 1643 scb->next_state = 6;
1864 if ((tul_msgin(pCurHcb)) == -1) 1644 if ((initio_msgin(host)) == -1)
1865 return (-1); 1645 return -1;
1866 break; 1646 break;
1867 1647
1868 case MSG_OUT: /* Message out phase */ 1648 case MSG_OUT: /* Message out phase */
1869 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */ 1649 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
1870 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1650 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1871 if (wait_tulip(pCurHcb) == -1) 1651 if (wait_tulip(host) == -1)
1872 return (-1); 1652 return -1;
1873 break; 1653 break;
1874 1654
1875 case DATA_IN: /* Data in phase */ 1655 case DATA_IN: /* Data in phase */
1876 return (tul_xpad_in(pCurHcb)); 1656 return initio_xpad_in(host);
1877 1657
1878 case DATA_OUT: /* Data out phase */ 1658 case DATA_OUT: /* Data out phase */
1879 return (tul_xpad_out(pCurHcb)); 1659 return initio_xpad_out(host);
1880 1660
1881 default: 1661 default:
1882 return (tul_bad_seq(pCurHcb)); 1662 return initio_bad_seq(host);
1883 } 1663 }
1884 } 1664 }
1885} 1665}
1886 1666
1887/***************************************************************************/ 1667/**
1888int tul_state_7(HCS * pCurHcb) 1668 * initio_state_7 - SCSI state machine
1669 * @host: InitIO host we are controlling
1670 *
1671 */
1672
1673int initio_state_7(struct initio_host * host)
1889{ 1674{
1890 int cnt, i; 1675 int cnt, i;
1891 1676
@@ -1893,1139 +1678,1029 @@ int tul_state_7(HCS * pCurHcb)
1893 printk("-s7-"); 1678 printk("-s7-");
1894#endif 1679#endif
1895 /* flush SCSI FIFO */ 1680 /* flush SCSI FIFO */
1896 cnt = TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F; 1681 cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
1897 if (cnt) { 1682 if (cnt) {
1898 for (i = 0; i < cnt; i++) 1683 for (i = 0; i < cnt; i++)
1899 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 1684 inb(host->addr + TUL_SFifo);
1900 } 1685 }
1901 switch (pCurHcb->HCS_Phase) { 1686 switch (host->phase) {
1902 case DATA_IN: /* Data in phase */ 1687 case DATA_IN: /* Data in phase */
1903 case DATA_OUT: /* Data out phase */ 1688 case DATA_OUT: /* Data out phase */
1904 return (tul_bad_seq(pCurHcb)); 1689 return initio_bad_seq(host);
1905 default: 1690 default:
1906 return (6); /* Go to state 6 */ 1691 return 6; /* Go to state 6 */
1907 } 1692 }
1908} 1693}
1909 1694
1910/***************************************************************************/ 1695/**
1911int tul_xfer_data_in(HCS * pCurHcb) 1696 * initio_xfer_data_in - Commence data input
1697 * @host: InitIO host in use
1698 *
1699 * Commence a block of data transfer. The transfer itself will
1700 * be managed by the controller and we will get a completion (or
1701 * failure) interrupt.
1702 */
1703static int initio_xfer_data_in(struct initio_host * host)
1912{ 1704{
1913 SCB *pCurScb = pCurHcb->HCS_ActScb; 1705 struct scsi_ctrl_blk *scb = host->active;
1914 1706
1915 if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DOUT) { 1707 if ((scb->flags & SCF_DIR) == SCF_DOUT)
1916 return (6); /* wrong direction */ 1708 return 6; /* wrong direction */
1917 }
1918 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
1919 1709
1920 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_IN); /* 7/25/95 */ 1710 outl(scb->buflen, host->addr + TUL_SCnt0);
1711 outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
1921 1712
1922 if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */ 1713 if (scb->flags & SCF_SG) { /* S/G xfer */
1923 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3); 1714 outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
1924 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1715 outl(scb->bufptr, host->addr + TUL_XAddH);
1925 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_IN); 1716 outb(TAX_SG_IN, host->addr + TUL_XCmd);
1926 } else { 1717 } else {
1927 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen); 1718 outl(scb->buflen, host->addr + TUL_XCntH);
1928 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1719 outl(scb->bufptr, host->addr + TUL_XAddH);
1929 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_IN); 1720 outb(TAX_X_IN, host->addr + TUL_XCmd);
1930 } 1721 }
1931 pCurScb->SCB_NxtStat = 0x5; 1722 scb->next_state = 0x5;
1932 return (0); /* return to OS, wait xfer done , let jas_isr come in */ 1723 return 0; /* return to OS, wait xfer done , let jas_isr come in */
1933} 1724}
1934 1725
1726/**
1727 * initio_xfer_data_out - Commence data output
1728 * @host: InitIO host in use
1729 *
1730 * Commence a block of data transfer. The transfer itself will
1731 * be managed by the controller and we will get a completion (or
1732 * failure) interrupt.
1733 */
1935 1734
1936/***************************************************************************/ 1735static int initio_xfer_data_out(struct initio_host * host)
1937int tul_xfer_data_out(HCS * pCurHcb)
1938{ 1736{
1939 SCB *pCurScb = pCurHcb->HCS_ActScb; 1737 struct scsi_ctrl_blk *scb = host->active;
1940 1738
1941 if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DIN) { 1739 if ((scb->flags & SCF_DIR) == SCF_DIN)
1942 return (6); /* wrong direction */ 1740 return 6; /* wrong direction */
1943 }
1944 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
1945 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_OUT);
1946 1741
1947 if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */ 1742 outl(scb->buflen, host->addr + TUL_SCnt0);
1948 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3); 1743 outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
1949 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1744
1950 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_OUT); 1745 if (scb->flags & SCF_SG) { /* S/G xfer */
1746 outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
1747 outl(scb->bufptr, host->addr + TUL_XAddH);
1748 outb(TAX_SG_OUT, host->addr + TUL_XCmd);
1951 } else { 1749 } else {
1952 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen); 1750 outl(scb->buflen, host->addr + TUL_XCntH);
1953 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1751 outl(scb->bufptr, host->addr + TUL_XAddH);
1954 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_OUT); 1752 outb(TAX_X_OUT, host->addr + TUL_XCmd);
1955 } 1753 }
1956 1754
1957 pCurScb->SCB_NxtStat = 0x5; 1755 scb->next_state = 0x5;
1958 return (0); /* return to OS, wait xfer done , let jas_isr come in */ 1756 return 0; /* return to OS, wait xfer done , let jas_isr come in */
1959} 1757}
1960 1758
1961 1759int initio_xpad_in(struct initio_host * host)
1962/***************************************************************************/
1963int tul_xpad_in(HCS * pCurHcb)
1964{ 1760{
1965 SCB *pCurScb = pCurHcb->HCS_ActScb; 1761 struct scsi_ctrl_blk *scb = host->active;
1966 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1762 struct target_control *active_tc = host->active_tc;
1967 1763
1968 if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) { 1764 if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
1969 pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */ 1765 scb->hastat = HOST_DO_DU; /* over run */
1970 }
1971 for (;;) { 1766 for (;;) {
1972 if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI) 1767 if (active_tc->js_period & TSC_WIDE_SCSI)
1973 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2); 1768 outl(2, host->addr + TUL_SCnt0);
1974 else 1769 else
1975 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1770 outl(1, host->addr + TUL_SCnt0);
1976 1771
1977 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 1772 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
1978 if ((wait_tulip(pCurHcb)) == -1) { 1773 if (wait_tulip(host) == -1)
1979 return (-1); 1774 return -1;
1775 if (host->phase != DATA_IN) {
1776 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1777 return 6;
1980 } 1778 }
1981 if (pCurHcb->HCS_Phase != DATA_IN) { 1779 inb(host->addr + TUL_SFifo);
1982 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
1983 return (6);
1984 }
1985 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
1986 } 1780 }
1987} 1781}
1988 1782
1989int tul_xpad_out(HCS * pCurHcb) 1783int initio_xpad_out(struct initio_host * host)
1990{ 1784{
1991 SCB *pCurScb = pCurHcb->HCS_ActScb; 1785 struct scsi_ctrl_blk *scb = host->active;
1992 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1786 struct target_control *active_tc = host->active_tc;
1993 1787
1994 if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) { 1788 if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
1995 pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */ 1789 scb->hastat = HOST_DO_DU; /* over run */
1996 }
1997 for (;;) { 1790 for (;;) {
1998 if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI) 1791 if (active_tc->js_period & TSC_WIDE_SCSI)
1999 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2); 1792 outl(2, host->addr + TUL_SCnt0);
2000 else 1793 else
2001 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1794 outl(1, host->addr + TUL_SCnt0);
2002 1795
2003 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0); 1796 outb(0, host->addr + TUL_SFifo);
2004 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1797 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2005 if ((wait_tulip(pCurHcb)) == -1) { 1798 if ((wait_tulip(host)) == -1)
2006 return (-1); 1799 return -1;
2007 } 1800 if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
2008 if (pCurHcb->HCS_Phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */ 1801 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
2009 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); 1802 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2010 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1803 return 6;
2011 return (6);
2012 } 1804 }
2013 } 1805 }
2014} 1806}
2015 1807
2016 1808int initio_status_msg(struct initio_host * host)
2017/***************************************************************************/
2018int tul_status_msg(HCS * pCurHcb)
2019{ /* status & MSG_IN */ 1809{ /* status & MSG_IN */
2020 SCB *pCurScb = pCurHcb->HCS_ActScb; 1810 struct scsi_ctrl_blk *scb = host->active;
2021 BYTE msg; 1811 u8 msg;
1812
1813 outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
1814 if (wait_tulip(host) == -1)
1815 return -1;
2022 1816
2023 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_CMD_COMP);
2024 if ((wait_tulip(pCurHcb)) == -1) {
2025 return (-1);
2026 }
2027 /* get status */ 1817 /* get status */
2028 pCurScb->SCB_TaStat = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 1818 scb->tastat = inb(host->addr + TUL_SFifo);
2029 1819
2030 if (pCurHcb->HCS_Phase == MSG_OUT) { 1820 if (host->phase == MSG_OUT) {
2031 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { 1821 if (host->jsstatus0 & TSS_PAR_ERROR)
2032 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY); 1822 outb(MSG_PARITY, host->addr + TUL_SFifo);
2033 } else { 1823 else
2034 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); 1824 outb(MSG_NOP, host->addr + TUL_SFifo);
2035 } 1825 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2036 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1826 return wait_tulip(host);
2037 return (wait_tulip(pCurHcb)); 1827 }
2038 } 1828 if (host->phase == MSG_IN) {
2039 if (pCurHcb->HCS_Phase == MSG_IN) { 1829 msg = inb(host->addr + TUL_SFifo);
2040 msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 1830 if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
2041 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { /* Parity error */ 1831 if ((initio_msgin_accept(host)) == -1)
2042 if ((tul_msgin_accept(pCurHcb)) == -1) 1832 return -1;
2043 return (-1); 1833 if (host->phase != MSG_OUT)
2044 if (pCurHcb->HCS_Phase != MSG_OUT) 1834 return initio_bad_seq(host);
2045 return (tul_bad_seq(pCurHcb)); 1835 outb(MSG_PARITY, host->addr + TUL_SFifo);
2046 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY); 1836 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2047 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1837 return wait_tulip(host);
2048 return (wait_tulip(pCurHcb));
2049 } 1838 }
2050 if (msg == 0) { /* Command complete */ 1839 if (msg == 0) { /* Command complete */
2051 1840
2052 if ((pCurScb->SCB_TaStat & 0x18) == 0x10) { /* No link support */ 1841 if ((scb->tastat & 0x18) == 0x10) /* No link support */
2053 return (tul_bad_seq(pCurHcb)); 1842 return initio_bad_seq(host);
2054 } 1843 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2055 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1844 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2056 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 1845 return initio_wait_done_disc(host);
2057 return tul_wait_done_disc(pCurHcb);
2058 1846
2059 } 1847 }
2060 if ((msg == MSG_LINK_COMP) || (msg == MSG_LINK_FLAG)) { 1848 if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
2061 if ((pCurScb->SCB_TaStat & 0x18) == 0x10) 1849 if ((scb->tastat & 0x18) == 0x10)
2062 return (tul_msgin_accept(pCurHcb)); 1850 return initio_msgin_accept(host);
2063 } 1851 }
2064 } 1852 }
2065 return (tul_bad_seq(pCurHcb)); 1853 return initio_bad_seq(host);
2066} 1854}
2067 1855
2068 1856
2069/***************************************************************************/
2070/* scsi bus free */ 1857/* scsi bus free */
2071int int_tul_busfree(HCS * pCurHcb) 1858int int_initio_busfree(struct initio_host * host)
2072{ 1859{
2073 SCB *pCurScb = pCurHcb->HCS_ActScb; 1860 struct scsi_ctrl_blk *scb = host->active;
2074 1861
2075 if (pCurScb != NULL) { 1862 if (scb != NULL) {
2076 if (pCurScb->SCB_Status & SCB_SELECT) { /* selection timeout */ 1863 if (scb->status & SCB_SELECT) { /* selection timeout */
2077 tul_unlink_pend_scb(pCurHcb, pCurScb); 1864 initio_unlink_pend_scb(host, scb);
2078 pCurScb->SCB_HaStat = HOST_SEL_TOUT; 1865 scb->hastat = HOST_SEL_TOUT;
2079 tul_append_done_scb(pCurHcb, pCurScb); 1866 initio_append_done_scb(host, scb);
2080 } else { /* Unexpected bus free */ 1867 } else { /* Unexpected bus free */
2081 tul_unlink_busy_scb(pCurHcb, pCurScb); 1868 initio_unlink_busy_scb(host, scb);
2082 pCurScb->SCB_HaStat = HOST_BUS_FREE; 1869 scb->hastat = HOST_BUS_FREE;
2083 tul_append_done_scb(pCurHcb, pCurScb); 1870 initio_append_done_scb(host, scb);
2084 } 1871 }
2085 pCurHcb->HCS_ActScb = NULL; 1872 host->active = NULL;
2086 pCurHcb->HCS_ActTcs = NULL; 1873 host->active_tc = NULL;
2087 } 1874 }
2088 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 1875 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2089 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 1876 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2090 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */ 1877 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2091 return (-1); 1878 return -1;
2092} 1879}
2093 1880
2094 1881
2095/***************************************************************************/ 1882/**
2096/* scsi bus reset */ 1883 * int_initio_scsi_rst - SCSI reset occurred
2097static int int_tul_scsi_rst(HCS * pCurHcb) 1884 * @host: Host seeing the reset
1885 *
1886 * A SCSI bus reset has occurred. Clean up any pending transfer
1887 * the hardware is doing by DMA and then abort all active and
1888 * disconnected commands. The mid layer should sort the rest out
1889 * for us
1890 */
1891
1892static int int_initio_scsi_rst(struct initio_host * host)
2098{ 1893{
2099 SCB *pCurScb; 1894 struct scsi_ctrl_blk *scb;
2100 int i; 1895 int i;
2101 1896
2102 /* if DMA xfer is pending, abort DMA xfer */ 1897 /* if DMA xfer is pending, abort DMA xfer */
2103 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & 0x01) { 1898 if (inb(host->addr + TUL_XStatus) & 0x01) {
2104 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO); 1899 outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
2105 /* wait Abort DMA xfer done */ 1900 /* wait Abort DMA xfer done */
2106 while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & 0x04) == 0); 1901 while ((inb(host->addr + TUL_Int) & 0x04) == 0)
2107 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1902 cpu_relax();
1903 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2108 } 1904 }
2109 /* Abort all active & disconnected scb */ 1905 /* Abort all active & disconnected scb */
2110 while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) { 1906 while ((scb = initio_pop_busy_scb(host)) != NULL) {
2111 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 1907 scb->hastat = HOST_BAD_PHAS;
2112 tul_append_done_scb(pCurHcb, pCurScb); 1908 initio_append_done_scb(host, scb);
2113 } 1909 }
2114 pCurHcb->HCS_ActScb = NULL; 1910 host->active = NULL;
2115 pCurHcb->HCS_ActTcs = NULL; 1911 host->active_tc = NULL;
2116 1912
2117 /* clr sync nego. done flag */ 1913 /* clr sync nego. done flag */
2118 for (i = 0; i < pCurHcb->HCS_MaxTar; i++) { 1914 for (i = 0; i < host->max_tar; i++)
2119 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); 1915 host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2120 } 1916 return -1;
2121 return (-1);
2122} 1917}
2123 1918
1919/**
1920 * int_initio_scsi_resel - Reselection occured
1921 * @host: InitIO host adapter
1922 *
1923 * A SCSI reselection event has been signalled and the interrupt
1924 * is now being processed. Work out which command block needs attention
1925 * and continue processing that command.
1926 */
2124 1927
2125/***************************************************************************/ 1928int int_initio_resel(struct initio_host * host)
2126/* scsi reselection */
2127int int_tul_resel(HCS * pCurHcb)
2128{ 1929{
2129 SCB *pCurScb; 1930 struct scsi_ctrl_blk *scb;
2130 TCS *pCurTcb; 1931 struct target_control *active_tc;
2131 BYTE tag, msg = 0; 1932 u8 tag, msg = 0;
2132 BYTE tar, lun; 1933 u8 tar, lun;
2133 1934
2134 if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) { 1935 if ((scb = host->active) != NULL) {
2135 if (pCurScb->SCB_Status & SCB_SELECT) { /* if waiting for selection complete */ 1936 /* FIXME: Why check and not just clear ? */
2136 pCurScb->SCB_Status &= ~SCB_SELECT; 1937 if (scb->status & SCB_SELECT) /* if waiting for selection complete */
2137 } 1938 scb->status &= ~SCB_SELECT;
2138 pCurHcb->HCS_ActScb = NULL; 1939 host->active = NULL;
2139 } 1940 }
2140 /* --------- get target id---------------------- */ 1941 /* --------- get target id---------------------- */
2141 tar = TUL_RD(pCurHcb->HCS_Base, TUL_SBusId); 1942 tar = inb(host->addr + TUL_SBusId);
2142 /* ------ get LUN from Identify message----------- */ 1943 /* ------ get LUN from Identify message----------- */
2143 lun = TUL_RD(pCurHcb->HCS_Base, TUL_SIdent) & 0x0F; 1944 lun = inb(host->addr + TUL_SIdent) & 0x0F;
2144 /* 07/22/98 from 0x1F -> 0x0F */ 1945 /* 07/22/98 from 0x1F -> 0x0F */
2145 pCurTcb = &pCurHcb->HCS_Tcs[tar]; 1946 active_tc = &host->targets[tar];
2146 pCurHcb->HCS_ActTcs = pCurTcb; 1947 host->active_tc = active_tc;
2147 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0); 1948 outb(active_tc->sconfig0, host->addr + TUL_SConfig);
2148 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period); 1949 outb(active_tc->js_period, host->addr + TUL_SPeriod);
2149
2150 1950
2151 /* ------------- tag queueing ? ------------------- */ 1951 /* ------------- tag queueing ? ------------------- */
2152 if (pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG) { 1952 if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
2153 if ((tul_msgin_accept(pCurHcb)) == -1) 1953 if ((initio_msgin_accept(host)) == -1)
2154 return (-1); 1954 return -1;
2155 if (pCurHcb->HCS_Phase != MSG_IN) 1955 if (host->phase != MSG_IN)
2156 goto no_tag; 1956 goto no_tag;
2157 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1957 outl(1, host->addr + TUL_SCnt0);
2158 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 1958 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2159 if ((wait_tulip(pCurHcb)) == -1) 1959 if (wait_tulip(host) == -1)
2160 return (-1); 1960 return -1;
2161 msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag Message */ 1961 msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
2162 1962
2163 if ((msg < MSG_STAG) || (msg > MSG_OTAG)) /* Is simple Tag */ 1963 if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
2164 goto no_tag; 1964 goto no_tag;
2165 1965
2166 if ((tul_msgin_accept(pCurHcb)) == -1) 1966 if (initio_msgin_accept(host) == -1)
2167 return (-1); 1967 return -1;
2168 1968
2169 if (pCurHcb->HCS_Phase != MSG_IN) 1969 if (host->phase != MSG_IN)
2170 goto no_tag; 1970 goto no_tag;
2171 1971
2172 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1972 outl(1, host->addr + TUL_SCnt0);
2173 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 1973 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2174 if ((wait_tulip(pCurHcb)) == -1) 1974 if (wait_tulip(host) == -1)
2175 return (-1); 1975 return -1;
2176 tag = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag ID */ 1976 tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
2177 pCurScb = pCurHcb->HCS_Scb + tag; 1977 scb = host->scb + tag;
2178 if ((pCurScb->SCB_Target != tar) || (pCurScb->SCB_Lun != lun)) { 1978 if (scb->target != tar || scb->lun != lun) {
2179 return tul_msgout_abort_tag(pCurHcb); 1979 return initio_msgout_abort_tag(host);
2180 } 1980 }
2181 if (pCurScb->SCB_Status != SCB_BUSY) { /* 03/24/95 */ 1981 if (scb->status != SCB_BUSY) { /* 03/24/95 */
2182 return tul_msgout_abort_tag(pCurHcb); 1982 return initio_msgout_abort_tag(host);
2183 } 1983 }
2184 pCurHcb->HCS_ActScb = pCurScb; 1984 host->active = scb;
2185 if ((tul_msgin_accept(pCurHcb)) == -1) 1985 if ((initio_msgin_accept(host)) == -1)
2186 return (-1); 1986 return -1;
2187 } else { /* No tag */ 1987 } else { /* No tag */
2188 no_tag: 1988 no_tag:
2189 if ((pCurScb = tul_find_busy_scb(pCurHcb, tar | (lun << 8))) == NULL) { 1989 if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
2190 return tul_msgout_abort_targ(pCurHcb); 1990 return initio_msgout_abort_targ(host);
2191 } 1991 }
2192 pCurHcb->HCS_ActScb = pCurScb; 1992 host->active = scb;
2193 if (!(pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG)) { 1993 if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
2194 if ((tul_msgin_accept(pCurHcb)) == -1) 1994 if ((initio_msgin_accept(host)) == -1)
2195 return (-1); 1995 return -1;
2196 } 1996 }
2197 } 1997 }
2198 return 0; 1998 return 0;
2199} 1999}
2200 2000
2001/**
2002 * int_initio_bad_seq - out of phase
2003 * @host: InitIO host flagging event
2004 *
2005 * We have ended up out of phase somehow. Reset the host controller
2006 * and throw all our toys out of the pram. Let the midlayer clean up
2007 */
2201 2008
2202/***************************************************************************/ 2009static int int_initio_bad_seq(struct initio_host * host)
2203static int int_tul_bad_seq(HCS * pCurHcb)
2204{ /* target wrong phase */ 2010{ /* target wrong phase */
2205 SCB *pCurScb; 2011 struct scsi_ctrl_blk *scb;
2206 int i; 2012 int i;
2207 2013
2208 tul_reset_scsi(pCurHcb, 10); 2014 initio_reset_scsi(host, 10);
2209 2015
2210 while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) { 2016 while ((scb = initio_pop_busy_scb(host)) != NULL) {
2211 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 2017 scb->hastat = HOST_BAD_PHAS;
2212 tul_append_done_scb(pCurHcb, pCurScb); 2018 initio_append_done_scb(host, scb);
2213 }
2214 for (i = 0; i < pCurHcb->HCS_MaxTar; i++) {
2215 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2216 } 2019 }
2217 return (-1); 2020 for (i = 0; i < host->max_tar; i++)
2021 host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2022 return -1;
2218} 2023}
2219 2024
2220 2025
2221/***************************************************************************/ 2026/**
2222int tul_msgout_abort_targ(HCS * pCurHcb) 2027 * initio_msgout_abort_targ - abort a tag
2028 * @host: InitIO host
2029 *
2030 * Abort when the target/lun does not match or when our SCB is not
2031 * busy. Used by untagged commands.
2032 */
2033
2034static int initio_msgout_abort_targ(struct initio_host * host)
2223{ 2035{
2224 2036
2225 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2037 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2226 if (tul_msgin_accept(pCurHcb) == -1) 2038 if (initio_msgin_accept(host) == -1)
2227 return (-1); 2039 return -1;
2228 if (pCurHcb->HCS_Phase != MSG_OUT) 2040 if (host->phase != MSG_OUT)
2229 return (tul_bad_seq(pCurHcb)); 2041 return initio_bad_seq(host);
2230 2042
2231 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT); 2043 outb(MSG_ABORT, host->addr + TUL_SFifo);
2232 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2044 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2233 2045
2234 return tul_wait_disc(pCurHcb); 2046 return initio_wait_disc(host);
2235} 2047}
2236 2048
2237/***************************************************************************/ 2049/**
2238int tul_msgout_abort_tag(HCS * pCurHcb) 2050 * initio_msgout_abort_tag - abort a tag
2051 * @host: InitIO host
2052 *
2053 * Abort when the target/lun does not match or when our SCB is not
2054 * busy. Used for tagged commands.
2055 */
2056
2057static int initio_msgout_abort_tag(struct initio_host * host)
2239{ 2058{
2240 2059
2241 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2060 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2242 if (tul_msgin_accept(pCurHcb) == -1) 2061 if (initio_msgin_accept(host) == -1)
2243 return (-1); 2062 return -1;
2244 if (pCurHcb->HCS_Phase != MSG_OUT) 2063 if (host->phase != MSG_OUT)
2245 return (tul_bad_seq(pCurHcb)); 2064 return initio_bad_seq(host);
2246 2065
2247 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT_TAG); 2066 outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
2248 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2067 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2249 2068
2250 return tul_wait_disc(pCurHcb); 2069 return initio_wait_disc(host);
2251 2070
2252} 2071}
2253 2072
2254/***************************************************************************/ 2073/**
2255int tul_msgin(HCS * pCurHcb) 2074 * initio_msgin - Message in
2075 * @host: InitIO Host
2076 *
2077 * Process incoming message
2078 */
2079static int initio_msgin(struct initio_host * host)
2256{ 2080{
2257 TCS *pCurTcb; 2081 struct target_control *active_tc;
2258 2082
2259 for (;;) { 2083 for (;;) {
2084 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2260 2085
2261 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 2086 outl(1, host->addr + TUL_SCnt0);
2262 2087 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2263 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 2088 if (wait_tulip(host) == -1)
2264 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 2089 return -1;
2265 if ((wait_tulip(pCurHcb)) == -1)
2266 return (-1);
2267 2090
2268 switch (TUL_RD(pCurHcb->HCS_Base, TUL_SFifo)) { 2091 switch (inb(host->addr + TUL_SFifo)) {
2269 case MSG_DISC: /* Disconnect msg */ 2092 case MSG_DISC: /* Disconnect msg */
2270 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 2093 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2271 2094 return initio_wait_disc(host);
2272 return tul_wait_disc(pCurHcb);
2273
2274 case MSG_SDP: 2095 case MSG_SDP:
2275 case MSG_RESTORE: 2096 case MSG_RESTORE:
2276 case MSG_NOP: 2097 case MSG_NOP:
2277 tul_msgin_accept(pCurHcb); 2098 initio_msgin_accept(host);
2278 break; 2099 break;
2279
2280 case MSG_REJ: /* Clear ATN first */ 2100 case MSG_REJ: /* Clear ATN first */
2281 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, 2101 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
2282 (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7))); 2102 host->addr + TUL_SSignal);
2283 pCurTcb = pCurHcb->HCS_ActTcs; 2103 active_tc = host->active_tc;
2284 if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync nego */ 2104 if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
2285 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2105 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
2286 } 2106 host->addr + TUL_SSignal);
2287 tul_msgin_accept(pCurHcb); 2107 initio_msgin_accept(host);
2288 break; 2108 break;
2289
2290 case MSG_EXTEND: /* extended msg */ 2109 case MSG_EXTEND: /* extended msg */
2291 tul_msgin_extend(pCurHcb); 2110 initio_msgin_extend(host);
2292 break; 2111 break;
2293
2294 case MSG_IGNOREWIDE: 2112 case MSG_IGNOREWIDE:
2295 tul_msgin_accept(pCurHcb); 2113 initio_msgin_accept(host);
2296 break; 2114 break;
2297
2298 /* get */
2299 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
2300 if (wait_tulip(pCurHcb) == -1)
2301 return -1;
2302
2303 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0); /* put pad */
2304 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get IGNORE field */
2305 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get pad */
2306
2307 tul_msgin_accept(pCurHcb);
2308 break;
2309
2310 case MSG_COMP: 2115 case MSG_COMP:
2311 { 2116 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2312 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 2117 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2313 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 2118 return initio_wait_done_disc(host);
2314 return tul_wait_done_disc(pCurHcb);
2315 }
2316 default: 2119 default:
2317 tul_msgout_reject(pCurHcb); 2120 initio_msgout_reject(host);
2318 break; 2121 break;
2319 } 2122 }
2320 if (pCurHcb->HCS_Phase != MSG_IN) 2123 if (host->phase != MSG_IN)
2321 return (pCurHcb->HCS_Phase); 2124 return host->phase;
2322 } 2125 }
2323 /* statement won't reach here */ 2126 /* statement won't reach here */
2324} 2127}
2325 2128
2326 2129static int initio_msgout_reject(struct initio_host * host)
2327
2328
2329/***************************************************************************/
2330int tul_msgout_reject(HCS * pCurHcb)
2331{ 2130{
2131 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2332 2132
2333 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2133 if (initio_msgin_accept(host) == -1)
2334 2134 return -1;
2335 if ((tul_msgin_accept(pCurHcb)) == -1)
2336 return (-1);
2337 2135
2338 if (pCurHcb->HCS_Phase == MSG_OUT) { 2136 if (host->phase == MSG_OUT) {
2339 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_REJ); /* Msg reject */ 2137 outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
2340 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2138 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2341 return (wait_tulip(pCurHcb)); 2139 return wait_tulip(host);
2342 } 2140 }
2343 return (pCurHcb->HCS_Phase); 2141 return host->phase;
2344} 2142}
2345 2143
2346 2144static int initio_msgout_ide(struct initio_host * host)
2347
2348/***************************************************************************/
2349int tul_msgout_ide(HCS * pCurHcb)
2350{ 2145{
2351 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_IDE); /* Initiator Detected Error */ 2146 outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
2352 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2147 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2353 return (wait_tulip(pCurHcb)); 2148 return wait_tulip(host);
2354} 2149}
2355 2150
2356 2151static int initio_msgin_extend(struct initio_host * host)
2357/***************************************************************************/
2358int tul_msgin_extend(HCS * pCurHcb)
2359{ 2152{
2360 BYTE len, idx; 2153 u8 len, idx;
2361 2154
2362 if (tul_msgin_accept(pCurHcb) != MSG_IN) 2155 if (initio_msgin_accept(host) != MSG_IN)
2363 return (pCurHcb->HCS_Phase); 2156 return host->phase;
2364 2157
2365 /* Get extended msg length */ 2158 /* Get extended msg length */
2366 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 2159 outl(1, host->addr + TUL_SCnt0);
2367 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 2160 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2368 if (wait_tulip(pCurHcb) == -1) 2161 if (wait_tulip(host) == -1)
2369 return (-1); 2162 return -1;
2370 2163
2371 len = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 2164 len = inb(host->addr + TUL_SFifo);
2372 pCurHcb->HCS_Msg[0] = len; 2165 host->msg[0] = len;
2373 for (idx = 1; len != 0; len--) { 2166 for (idx = 1; len != 0; len--) {
2374 2167
2375 if ((tul_msgin_accept(pCurHcb)) != MSG_IN) 2168 if ((initio_msgin_accept(host)) != MSG_IN)
2376 return (pCurHcb->HCS_Phase); 2169 return host->phase;
2377 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 2170 outl(1, host->addr + TUL_SCnt0);
2378 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 2171 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2379 if (wait_tulip(pCurHcb) == -1) 2172 if (wait_tulip(host) == -1)
2380 return (-1); 2173 return -1;
2381 pCurHcb->HCS_Msg[idx++] = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 2174 host->msg[idx++] = inb(host->addr + TUL_SFifo);
2382 } 2175 }
2383 if (pCurHcb->HCS_Msg[1] == 1) { /* if it's synchronous data transfer request */ 2176 if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
2384 if (pCurHcb->HCS_Msg[0] != 3) /* if length is not right */ 2177 u8 r;
2385 return (tul_msgout_reject(pCurHcb)); 2178 if (host->msg[0] != 3) /* if length is not right */
2386 if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */ 2179 return initio_msgout_reject(host);
2387 pCurHcb->HCS_Msg[3] = 0; 2180 if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
2181 host->msg[3] = 0;
2388 } else { 2182 } else {
2389 if ((tul_msgin_sync(pCurHcb) == 0) && 2183 if (initio_msgin_sync(host) == 0 &&
2390 (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SYNC_DONE)) { 2184 (host->active_tc->flags & TCF_SYNC_DONE)) {
2391 tul_sync_done(pCurHcb); 2185 initio_sync_done(host);
2392 return (tul_msgin_accept(pCurHcb)); 2186 return initio_msgin_accept(host);
2393 } 2187 }
2394 } 2188 }
2395 2189
2396 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2190 r = inb(host->addr + TUL_SSignal);
2397 if ((tul_msgin_accept(pCurHcb)) != MSG_OUT) 2191 outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
2398 return (pCurHcb->HCS_Phase); 2192 host->addr + TUL_SSignal);
2193 if (initio_msgin_accept(host) != MSG_OUT)
2194 return host->phase;
2399 /* sync msg out */ 2195 /* sync msg out */
2400 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 2196 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2401 2197
2402 tul_sync_done(pCurHcb); 2198 initio_sync_done(host);
2403 2199
2404 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 2200 outb(MSG_EXTEND, host->addr + TUL_SFifo);
2405 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); 2201 outb(3, host->addr + TUL_SFifo);
2406 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); 2202 outb(1, host->addr + TUL_SFifo);
2407 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]); 2203 outb(host->msg[2], host->addr + TUL_SFifo);
2408 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[3]); 2204 outb(host->msg[3], host->addr + TUL_SFifo);
2409 2205 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2410 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2206 return wait_tulip(host);
2411 return (wait_tulip(pCurHcb));
2412 } 2207 }
2413 if ((pCurHcb->HCS_Msg[0] != 2) || (pCurHcb->HCS_Msg[1] != 3)) 2208 if (host->msg[0] != 2 || host->msg[1] != 3)
2414 return (tul_msgout_reject(pCurHcb)); 2209 return initio_msgout_reject(host);
2415 /* if it's WIDE DATA XFER REQ */ 2210 /* if it's WIDE DATA XFER REQ */
2416 if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) { 2211 if (host->active_tc->flags & TCF_NO_WDTR) {
2417 pCurHcb->HCS_Msg[2] = 0; 2212 host->msg[2] = 0;
2418 } else { 2213 } else {
2419 if (pCurHcb->HCS_Msg[2] > 2) /* > 32 bits */ 2214 if (host->msg[2] > 2) /* > 32 bits */
2420 return (tul_msgout_reject(pCurHcb)); 2215 return initio_msgout_reject(host);
2421 if (pCurHcb->HCS_Msg[2] == 2) { /* == 32 */ 2216 if (host->msg[2] == 2) { /* == 32 */
2422 pCurHcb->HCS_Msg[2] = 1; 2217 host->msg[2] = 1;
2423 } else { 2218 } else {
2424 if ((pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) == 0) { 2219 if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
2425 wdtr_done(pCurHcb); 2220 wdtr_done(host);
2426 if ((pCurHcb->HCS_ActTcs->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) 2221 if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
2427 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2222 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2428 return (tul_msgin_accept(pCurHcb)); 2223 return initio_msgin_accept(host);
2429 } 2224 }
2430 } 2225 }
2431 } 2226 }
2432 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2227 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2433 2228
2434 if (tul_msgin_accept(pCurHcb) != MSG_OUT) 2229 if (initio_msgin_accept(host) != MSG_OUT)
2435 return (pCurHcb->HCS_Phase); 2230 return host->phase;
2436 /* WDTR msg out */ 2231 /* WDTR msg out */
2437 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 2232 outb(MSG_EXTEND, host->addr + TUL_SFifo);
2438 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2); 2233 outb(2, host->addr + TUL_SFifo);
2439 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); 2234 outb(3, host->addr + TUL_SFifo);
2440 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]); 2235 outb(host->msg[2], host->addr + TUL_SFifo);
2441 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2236 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2442 return (wait_tulip(pCurHcb)); 2237 return wait_tulip(host);
2443} 2238}
2444 2239
2445/***************************************************************************/ 2240static int initio_msgin_sync(struct initio_host * host)
2446int tul_msgin_sync(HCS * pCurHcb)
2447{ 2241{
2448 char default_period; 2242 char default_period;
2449 2243
2450 default_period = tul_rate_tbl[pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SCSI_RATE]; 2244 default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
2451 if (pCurHcb->HCS_Msg[3] > MAX_OFFSET) { 2245 if (host->msg[3] > MAX_OFFSET) {
2452 pCurHcb->HCS_Msg[3] = MAX_OFFSET; 2246 host->msg[3] = MAX_OFFSET;
2453 if (pCurHcb->HCS_Msg[2] < default_period) { 2247 if (host->msg[2] < default_period) {
2454 pCurHcb->HCS_Msg[2] = default_period; 2248 host->msg[2] = default_period;
2455 return 1; 2249 return 1;
2456 } 2250 }
2457 if (pCurHcb->HCS_Msg[2] >= 59) { /* Change to async */ 2251 if (host->msg[2] >= 59) /* Change to async */
2458 pCurHcb->HCS_Msg[3] = 0; 2252 host->msg[3] = 0;
2459 }
2460 return 1; 2253 return 1;
2461 } 2254 }
2462 /* offset requests asynchronous transfers ? */ 2255 /* offset requests asynchronous transfers ? */
2463 if (pCurHcb->HCS_Msg[3] == 0) { 2256 if (host->msg[3] == 0) {
2464 return 0; 2257 return 0;
2465 } 2258 }
2466 if (pCurHcb->HCS_Msg[2] < default_period) { 2259 if (host->msg[2] < default_period) {
2467 pCurHcb->HCS_Msg[2] = default_period; 2260 host->msg[2] = default_period;
2468 return 1; 2261 return 1;
2469 } 2262 }
2470 if (pCurHcb->HCS_Msg[2] >= 59) { 2263 if (host->msg[2] >= 59) {
2471 pCurHcb->HCS_Msg[3] = 0; 2264 host->msg[3] = 0;
2472 return 1; 2265 return 1;
2473 } 2266 }
2474 return 0; 2267 return 0;
2475} 2268}
2476 2269
2477 2270static int wdtr_done(struct initio_host * host)
2478/***************************************************************************/
2479int wdtr_done(HCS * pCurHcb)
2480{ 2271{
2481 pCurHcb->HCS_ActTcs->TCS_Flags &= ~TCF_SYNC_DONE; 2272 host->active_tc->flags &= ~TCF_SYNC_DONE;
2482 pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_WDTR_DONE; 2273 host->active_tc->flags |= TCF_WDTR_DONE;
2483 2274
2484 pCurHcb->HCS_ActTcs->TCS_JS_Period = 0; 2275 host->active_tc->js_period = 0;
2485 if (pCurHcb->HCS_Msg[2]) { /* if 16 bit */ 2276 if (host->msg[2]) /* if 16 bit */
2486 pCurHcb->HCS_ActTcs->TCS_JS_Period |= TSC_WIDE_SCSI; 2277 host->active_tc->js_period |= TSC_WIDE_SCSI;
2487 } 2278 host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
2488 pCurHcb->HCS_ActTcs->TCS_SConfig0 &= ~TSC_ALT_PERIOD; 2279 outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
2489 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0); 2280 outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
2490 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period);
2491 2281
2492 return 1; 2282 return 1;
2493} 2283}
2494 2284
2495/***************************************************************************/ 2285static int initio_sync_done(struct initio_host * host)
2496int tul_sync_done(HCS * pCurHcb)
2497{ 2286{
2498 int i; 2287 int i;
2499 2288
2500 pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_SYNC_DONE; 2289 host->active_tc->flags |= TCF_SYNC_DONE;
2501 2290
2502 if (pCurHcb->HCS_Msg[3]) { 2291 if (host->msg[3]) {
2503 pCurHcb->HCS_ActTcs->TCS_JS_Period |= pCurHcb->HCS_Msg[3]; 2292 host->active_tc->js_period |= host->msg[3];
2504 for (i = 0; i < 8; i++) { 2293 for (i = 0; i < 8; i++) {
2505 if (tul_rate_tbl[i] >= pCurHcb->HCS_Msg[2]) /* pick the big one */ 2294 if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
2506 break; 2295 break;
2507 } 2296 }
2508 pCurHcb->HCS_ActTcs->TCS_JS_Period |= (i << 4); 2297 host->active_tc->js_period |= (i << 4);
2509 pCurHcb->HCS_ActTcs->TCS_SConfig0 |= TSC_ALT_PERIOD; 2298 host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
2510 } 2299 }
2511 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0); 2300 outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
2512 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period); 2301 outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
2513 2302
2514 return (-1); 2303 return -1;
2515} 2304}
2516 2305
2517 2306
2518int tul_post_scsi_rst(HCS * pCurHcb) 2307static int initio_post_scsi_rst(struct initio_host * host)
2519{ 2308{
2520 SCB *pCurScb; 2309 struct scsi_ctrl_blk *scb;
2521 TCS *pCurTcb; 2310 struct target_control *active_tc;
2522 int i; 2311 int i;
2523 2312
2524 pCurHcb->HCS_ActScb = NULL; 2313 host->active = NULL;
2525 pCurHcb->HCS_ActTcs = NULL; 2314 host->active_tc = NULL;
2526 pCurHcb->HCS_Flags = 0; 2315 host->flags = 0;
2527 2316
2528 while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) { 2317 while ((scb = initio_pop_busy_scb(host)) != NULL) {
2529 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 2318 scb->hastat = HOST_BAD_PHAS;
2530 tul_append_done_scb(pCurHcb, pCurScb); 2319 initio_append_done_scb(host, scb);
2531 } 2320 }
2532 /* clear sync done flag */ 2321 /* clear sync done flag */
2533 pCurTcb = &pCurHcb->HCS_Tcs[0]; 2322 active_tc = &host->targets[0];
2534 for (i = 0; i < pCurHcb->HCS_MaxTar; pCurTcb++, i++) { 2323 for (i = 0; i < host->max_tar; active_tc++, i++) {
2535 pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); 2324 active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2536 /* Initialize the sync. xfer register values to an asyn xfer */ 2325 /* Initialize the sync. xfer register values to an asyn xfer */
2537 pCurTcb->TCS_JS_Period = 0; 2326 active_tc->js_period = 0;
2538 pCurTcb->TCS_SConfig0 = pCurHcb->HCS_SConf1; 2327 active_tc->sconfig0 = host->sconf1;
2539 pCurHcb->HCS_ActTags[0] = 0; /* 07/22/98 */ 2328 host->act_tags[0] = 0; /* 07/22/98 */
2540 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY; /* 07/22/98 */ 2329 host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
2541 } /* for */ 2330 } /* for */
2542 2331
2543 return (-1); 2332 return -1;
2544} 2333}
2545 2334
2546/***************************************************************************/ 2335static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
2547void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb)
2548{ 2336{
2549 pCurScb->SCB_Status |= SCB_SELECT; 2337 scb->status |= SCB_SELECT;
2550 pCurScb->SCB_NxtStat = 0x1; 2338 scb->next_state = 0x1;
2551 pCurHcb->HCS_ActScb = pCurScb; 2339 host->active = scb;
2552 pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 2340 host->active_tc = &host->targets[scb->target];
2553 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SELATNSTOP); 2341 outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
2554 return;
2555} 2342}
2556 2343
2557 2344
2558/***************************************************************************/ 2345static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
2559void tul_select_atn(HCS * pCurHcb, SCB * pCurScb)
2560{ 2346{
2561 int i; 2347 int i;
2562 2348
2563 pCurScb->SCB_Status |= SCB_SELECT; 2349 scb->status |= SCB_SELECT;
2564 pCurScb->SCB_NxtStat = 0x2; 2350 scb->next_state = 0x2;
2565 2351
2566 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident); 2352 outb(scb->ident, host->addr + TUL_SFifo);
2567 for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++) 2353 for (i = 0; i < (int) scb->cdblen; i++)
2568 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]); 2354 outb(scb->cdb[i], host->addr + TUL_SFifo);
2569 pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 2355 host->active_tc = &host->targets[scb->target];
2570 pCurHcb->HCS_ActScb = pCurScb; 2356 host->active = scb;
2571 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN); 2357 outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
2572 return;
2573} 2358}
2574 2359
2575/***************************************************************************/ 2360static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
2576void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb)
2577{ 2361{
2578 int i; 2362 int i;
2579 2363
2580 pCurScb->SCB_Status |= SCB_SELECT; 2364 scb->status |= SCB_SELECT;
2581 pCurScb->SCB_NxtStat = 0x2; 2365 scb->next_state = 0x2;
2582 2366
2583 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident); 2367 outb(scb->ident, host->addr + TUL_SFifo);
2584 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg); 2368 outb(scb->tagmsg, host->addr + TUL_SFifo);
2585 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId); 2369 outb(scb->tagid, host->addr + TUL_SFifo);
2586 for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++) 2370 for (i = 0; i < scb->cdblen; i++)
2587 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]); 2371 outb(scb->cdb[i], host->addr + TUL_SFifo);
2588 pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 2372 host->active_tc = &host->targets[scb->target];
2589 pCurHcb->HCS_ActScb = pCurScb; 2373 host->active = scb;
2590 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN3); 2374 outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
2591 return;
2592} 2375}
2593 2376
2594/***************************************************************************/ 2377/**
2595/* SCSI Bus Device Reset */ 2378 * initio_bus_device_reset - SCSI Bus Device Reset
2596int tul_bus_device_reset(HCS * pCurHcb) 2379 * @host: InitIO host to reset
2380 *
2381 * Perform a device reset and abort all pending SCBs for the
2382 * victim device
2383 */
2384int initio_bus_device_reset(struct initio_host * host)
2597{ 2385{
2598 SCB *pCurScb = pCurHcb->HCS_ActScb; 2386 struct scsi_ctrl_blk *scb = host->active;
2599 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 2387 struct target_control *active_tc = host->active_tc;
2600 SCB *pTmpScb, *pPrevScb; 2388 struct scsi_ctrl_blk *tmp, *prev;
2601 BYTE tar; 2389 u8 tar;
2602 2390
2603 if (pCurHcb->HCS_Phase != MSG_OUT) { 2391 if (host->phase != MSG_OUT)
2604 return (int_tul_bad_seq(pCurHcb)); /* Unexpected phase */ 2392 return int_initio_bad_seq(host); /* Unexpected phase */
2605 } 2393
2606 tul_unlink_pend_scb(pCurHcb, pCurScb); 2394 initio_unlink_pend_scb(host, scb);
2607 tul_release_scb(pCurHcb, pCurScb); 2395 initio_release_scb(host, scb);
2608 2396
2609 2397
2610 tar = pCurScb->SCB_Target; /* target */ 2398 tar = scb->target; /* target */
2611 pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY); 2399 active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
2612 /* clr sync. nego & WDTR flags 07/22/98 */ 2400 /* clr sync. nego & WDTR flags 07/22/98 */
2613 2401
2614 /* abort all SCB with same target */ 2402 /* abort all SCB with same target */
2615 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */ 2403 prev = tmp = host->first_busy; /* Check Busy queue */
2616 while (pTmpScb != NULL) { 2404 while (tmp != NULL) {
2617 2405 if (tmp->target == tar) {
2618 if (pTmpScb->SCB_Target == tar) {
2619 /* unlink it */ 2406 /* unlink it */
2620 if (pTmpScb == pCurHcb->HCS_FirstBusy) { 2407 if (tmp == host->first_busy) {
2621 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 2408 if ((host->first_busy = tmp->next) == NULL)
2622 pCurHcb->HCS_LastBusy = NULL; 2409 host->last_busy = NULL;
2623 } else { 2410 } else {
2624 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 2411 prev->next = tmp->next;
2625 if (pTmpScb == pCurHcb->HCS_LastBusy) 2412 if (tmp == host->last_busy)
2626 pCurHcb->HCS_LastBusy = pPrevScb; 2413 host->last_busy = prev;
2627 } 2414 }
2628 pTmpScb->SCB_HaStat = HOST_ABORTED; 2415 tmp->hastat = HOST_ABORTED;
2629 tul_append_done_scb(pCurHcb, pTmpScb); 2416 initio_append_done_scb(host, tmp);
2630 } 2417 }
2631 /* Previous haven't change */ 2418 /* Previous haven't change */
2632 else { 2419 else {
2633 pPrevScb = pTmpScb; 2420 prev = tmp;
2634 } 2421 }
2635 pTmpScb = pTmpScb->SCB_NxtScb; 2422 tmp = tmp->next;
2636 } 2423 }
2637 2424 outb(MSG_DEVRST, host->addr + TUL_SFifo);
2638 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_DEVRST); 2425 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2639 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2426 return initio_wait_disc(host);
2640
2641 return tul_wait_disc(pCurHcb);
2642 2427
2643} 2428}
2644 2429
2645/***************************************************************************/ 2430static int initio_msgin_accept(struct initio_host * host)
2646int tul_msgin_accept(HCS * pCurHcb)
2647{ 2431{
2648 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 2432 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2649 return (wait_tulip(pCurHcb)); 2433 return wait_tulip(host);
2650} 2434}
2651 2435
2652/***************************************************************************/ 2436static int wait_tulip(struct initio_host * host)
2653int wait_tulip(HCS * pCurHcb)
2654{ 2437{
2655 2438
2656 while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) 2439 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
2657 & TSS_INT_PENDING)); 2440 & TSS_INT_PENDING))
2441 cpu_relax();
2658 2442
2659 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 2443 host->jsint = inb(host->addr + TUL_SInt);
2660 pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK; 2444 host->phase = host->jsstatus0 & TSS_PH_MASK;
2661 pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1); 2445 host->jsstatus1 = inb(host->addr + TUL_SStatus1);
2662 2446
2663 if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if SCSI bus reset detected */ 2447 if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
2664 return (int_tul_resel(pCurHcb)); 2448 return int_initio_resel(host);
2665 } 2449 if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
2666 if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) { /* if selected/reselected timeout interrupt */ 2450 return int_initio_busfree(host);
2667 return (int_tul_busfree(pCurHcb)); 2451 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
2668 } 2452 return int_initio_scsi_rst(host);
2669 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */ 2453
2670 return (int_tul_scsi_rst(pCurHcb)); 2454 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
2671 } 2455 if (host->flags & HCF_EXPECT_DONE_DISC) {
2672 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */ 2456 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2673 if (pCurHcb->HCS_Flags & HCF_EXPECT_DONE_DISC) { 2457 initio_unlink_busy_scb(host, host->active);
2674 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 2458 host->active->hastat = 0;
2675 tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb); 2459 initio_append_done_scb(host, host->active);
2676 pCurHcb->HCS_ActScb->SCB_HaStat = 0; 2460 host->active = NULL;
2677 tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb); 2461 host->active_tc = NULL;
2678 pCurHcb->HCS_ActScb = NULL; 2462 host->flags &= ~HCF_EXPECT_DONE_DISC;
2679 pCurHcb->HCS_ActTcs = NULL; 2463 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2680 pCurHcb->HCS_Flags &= ~HCF_EXPECT_DONE_DISC; 2464 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2681 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 2465 return -1;
2682 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
2683 return (-1);
2684 } 2466 }
2685 if (pCurHcb->HCS_Flags & HCF_EXPECT_DISC) { 2467 if (host->flags & HCF_EXPECT_DISC) {
2686 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 2468 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2687 pCurHcb->HCS_ActScb = NULL; 2469 host->active = NULL;
2688 pCurHcb->HCS_ActTcs = NULL; 2470 host->active_tc = NULL;
2689 pCurHcb->HCS_Flags &= ~HCF_EXPECT_DISC; 2471 host->flags &= ~HCF_EXPECT_DISC;
2690 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 2472 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2691 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */ 2473 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2692 return (-1); 2474 return -1;
2693 } 2475 }
2694 return (int_tul_busfree(pCurHcb)); 2476 return int_initio_busfree(host);
2695 }
2696 if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) {
2697 return (pCurHcb->HCS_Phase);
2698 } 2477 }
2699 return (pCurHcb->HCS_Phase); 2478 /* The old code really does the below. Can probably be removed */
2479 if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
2480 return host->phase;
2481 return host->phase;
2700} 2482}
2701/***************************************************************************/
2702int tul_wait_disc(HCS * pCurHcb)
2703{
2704
2705 while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
2706 & TSS_INT_PENDING));
2707 2483
2484static int initio_wait_disc(struct initio_host * host)
2485{
2486 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
2487 cpu_relax();
2708 2488
2709 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 2489 host->jsint = inb(host->addr + TUL_SInt);
2710 2490
2711 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */ 2491 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
2712 return (int_tul_scsi_rst(pCurHcb)); 2492 return int_initio_scsi_rst(host);
2713 } 2493 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
2714 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */ 2494 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2715 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 2495 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2716 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 2496 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2717 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */ 2497 host->active = NULL;
2718 pCurHcb->HCS_ActScb = NULL; 2498 return -1;
2719 return (-1);
2720 } 2499 }
2721 return (tul_bad_seq(pCurHcb)); 2500 return initio_bad_seq(host);
2722} 2501}
2723 2502
2724/***************************************************************************/ 2503static int initio_wait_done_disc(struct initio_host * host)
2725int tul_wait_done_disc(HCS * pCurHcb)
2726{ 2504{
2505 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
2506 & TSS_INT_PENDING))
2507 cpu_relax();
2727 2508
2509 host->jsint = inb(host->addr + TUL_SInt);
2728 2510
2729 while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) 2511 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
2730 & TSS_INT_PENDING)); 2512 return int_initio_scsi_rst(host);
2731 2513 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
2732 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 2514 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2733 2515 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2734 2516 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2735 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */ 2517 initio_unlink_busy_scb(host, host->active);
2736 return (int_tul_scsi_rst(pCurHcb));
2737 }
2738 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
2739 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
2740 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
2741 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
2742 tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb);
2743 2518
2744 tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb); 2519 initio_append_done_scb(host, host->active);
2745 pCurHcb->HCS_ActScb = NULL; 2520 host->active = NULL;
2746 return (-1); 2521 return -1;
2747 } 2522 }
2748 return (tul_bad_seq(pCurHcb)); 2523 return initio_bad_seq(host);
2749} 2524}
2750 2525
2526/**
2527 * i91u_intr - IRQ handler
2528 * @irqno: IRQ number
2529 * @dev_id: IRQ identifier
2530 *
2531 * Take the relevant locks and then invoke the actual isr processing
2532 * code under the lock.
2533 */
2534
2751static irqreturn_t i91u_intr(int irqno, void *dev_id) 2535static irqreturn_t i91u_intr(int irqno, void *dev_id)
2752{ 2536{
2753 struct Scsi_Host *dev = dev_id; 2537 struct Scsi_Host *dev = dev_id;
2754 unsigned long flags; 2538 unsigned long flags;
2539 int r;
2755 2540
2756 spin_lock_irqsave(dev->host_lock, flags); 2541 spin_lock_irqsave(dev->host_lock, flags);
2757 tul_isr((HCS *)dev->base); 2542 r = initio_isr((struct initio_host *)dev->hostdata);
2758 spin_unlock_irqrestore(dev->host_lock, flags); 2543 spin_unlock_irqrestore(dev->host_lock, flags);
2759 return IRQ_HANDLED; 2544 if (r)
2760} 2545 return IRQ_HANDLED;
2761 2546 else
2762static int tul_NewReturnNumberOfAdapters(void) 2547 return IRQ_NONE;
2763{
2764 struct pci_dev *pDev = NULL; /* Start from none */
2765 int iAdapters = 0;
2766 long dRegValue;
2767 WORD wBIOS;
2768 int i = 0;
2769
2770 init_i91uAdapter_table();
2771
2772 for (i = 0; i < ARRAY_SIZE(i91u_pci_devices); i++)
2773 {
2774 while ((pDev = pci_find_device(i91u_pci_devices[i].vendor, i91u_pci_devices[i].device, pDev)) != NULL) {
2775 if (pci_enable_device(pDev))
2776 continue;
2777 pci_read_config_dword(pDev, 0x44, (u32 *) & dRegValue);
2778 wBIOS = (UWORD) (dRegValue & 0xFF);
2779 if (((dRegValue & 0xFF00) >> 8) == 0xFF)
2780 dRegValue = 0;
2781 wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8));
2782 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) {
2783 printk(KERN_WARNING
2784 "i91u: Could not set 32 bit DMA mask\n");
2785 continue;
2786 }
2787
2788 if (Addi91u_into_Adapter_table(wBIOS,
2789 (pDev->resource[0].start),
2790 pDev->irq,
2791 pDev->bus->number,
2792 (pDev->devfn >> 3)
2793 ) == 0)
2794 iAdapters++;
2795 }
2796 }
2797
2798 return (iAdapters);
2799} 2548}
2800 2549
2801static int i91u_detect(struct scsi_host_template * tpnt)
2802{
2803 HCS *pHCB;
2804 struct Scsi_Host *hreg;
2805 unsigned long i; /* 01/14/98 */
2806 int ok = 0, iAdapters;
2807 ULONG dBiosAdr;
2808 BYTE *pbBiosAdr;
2809
2810 /* Get total number of adapters in the motherboard */
2811 iAdapters = tul_NewReturnNumberOfAdapters();
2812 if (iAdapters == 0) /* If no tulip founded, return */
2813 return (0);
2814
2815 tul_num_ch = (iAdapters > tul_num_ch) ? tul_num_ch : iAdapters;
2816 /* Update actually channel number */
2817 if (tul_tag_enable) { /* 1.01i */
2818 tul_num_scb = MAX_TARGETS * i91u_MAXQUEUE;
2819 } else {
2820 tul_num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
2821 } /* Update actually SCBs per adapter */
2822
2823 /* Get total memory needed for HCS */
2824 i = tul_num_ch * sizeof(HCS);
2825 memset((unsigned char *) &tul_hcs[0], 0, i); /* Initialize tul_hcs 0 */
2826 /* Get total memory needed for SCB */
2827
2828 for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) {
2829 i = tul_num_ch * tul_num_scb * sizeof(SCB);
2830 if ((tul_scb = kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL)
2831 break;
2832 }
2833 if (tul_scb == NULL) {
2834 printk("i91u: SCB memory allocation error\n");
2835 return (0);
2836 }
2837 memset((unsigned char *) tul_scb, 0, i);
2838 2550
2839 for (i = 0, pHCB = &tul_hcs[0]; /* Get pointer for control block */ 2551/**
2840 i < tul_num_ch; 2552 * initio_build_scb - Build the mappings and SCB
2841 i++, pHCB++) { 2553 * @host: InitIO host taking the command
2842 get_tulipPCIConfig(pHCB, i); 2554 * @cblk: Firmware command block
2843 2555 * @cmnd: SCSI midlayer command block
2844 dBiosAdr = pHCB->HCS_BIOS; 2556 *
2845 dBiosAdr = (dBiosAdr << 4); 2557 * Translate the abstract SCSI command into a firmware command block
2846 2558 * suitable for feeding to the InitIO host controller. This also requires
2847 pbBiosAdr = phys_to_virt(dBiosAdr); 2559 * we build the scatter gather lists and ensure they are mapped properly.
2848 2560 */
2849 init_tulip(pHCB, tul_scb + (i * tul_num_scb), tul_num_scb, pbBiosAdr, 10);
2850 request_region(pHCB->HCS_Base, 256, "i91u"); /* Register */
2851
2852 pHCB->HCS_Index = i; /* 7/29/98 */
2853 hreg = scsi_register(tpnt, sizeof(HCS));
2854 if(hreg == NULL) {
2855 release_region(pHCB->HCS_Base, 256);
2856 return 0;
2857 }
2858 hreg->io_port = pHCB->HCS_Base;
2859 hreg->n_io_port = 0xff;
2860 hreg->can_queue = tul_num_scb; /* 03/05/98 */
2861 hreg->unique_id = pHCB->HCS_Base;
2862 hreg->max_id = pHCB->HCS_MaxTar;
2863 hreg->max_lun = 32; /* 10/21/97 */
2864 hreg->irq = pHCB->HCS_Intr;
2865 hreg->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */
2866 hreg->base = (unsigned long)pHCB;
2867 hreg->sg_tablesize = TOTAL_SG_ENTRY; /* Maximun support is 32 */
2868
2869 /* Initial tulip chip */
2870 ok = request_irq(pHCB->HCS_Intr, i91u_intr, IRQF_DISABLED | IRQF_SHARED, "i91u", hreg);
2871 if (ok < 0) {
2872 printk(KERN_WARNING "i91u: unable to request IRQ %d\n\n", pHCB->HCS_Intr);
2873 return 0;
2874 }
2875 }
2876
2877 tpnt->this_id = -1;
2878 tpnt->can_queue = 1;
2879
2880 return 1;
2881}
2882 2561
2883static void i91uBuildSCB(HCS * pHCB, SCB * pSCB, struct scsi_cmnd * SCpnt) 2562static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
2884{ /* Create corresponding SCB */ 2563{ /* Create corresponding SCB */
2885 struct scatterlist *pSrbSG; 2564 struct scatterlist *sglist;
2886 SG *pSG; /* Pointer to SG list */ 2565 struct sg_entry *sg; /* Pointer to SG list */
2887 int i; 2566 int i, nseg;
2888 long TotalLen; 2567 long total_len;
2889 dma_addr_t dma_addr; 2568 dma_addr_t dma_addr;
2890 2569
2891 pSCB->SCB_Post = i91uSCBPost; /* i91u's callback routine */ 2570 /* Fill in the command headers */
2892 pSCB->SCB_Srb = SCpnt; 2571 cblk->post = i91uSCBPost; /* i91u's callback routine */
2893 pSCB->SCB_Opcode = ExecSCSI; 2572 cblk->srb = cmnd;
2894 pSCB->SCB_Flags = SCF_POST; /* After SCSI done, call post routine */ 2573 cblk->opcode = ExecSCSI;
2895 pSCB->SCB_Target = SCpnt->device->id; 2574 cblk->flags = SCF_POST; /* After SCSI done, call post routine */
2896 pSCB->SCB_Lun = SCpnt->device->lun; 2575 cblk->target = cmnd->device->id;
2897 pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW; 2576 cblk->lun = cmnd->device->lun;
2898 2577 cblk->ident = cmnd->device->lun | DISC_ALLOW;
2899 pSCB->SCB_Flags |= SCF_SENSE; /* Turn on auto request sense */
2900 dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->sense_buffer,
2901 SENSE_SIZE, DMA_FROM_DEVICE);
2902 pSCB->SCB_SensePtr = cpu_to_le32((u32)dma_addr);
2903 pSCB->SCB_SenseLen = cpu_to_le32(SENSE_SIZE);
2904 SCpnt->SCp.ptr = (char *)(unsigned long)dma_addr;
2905 2578
2906 pSCB->SCB_CDBLen = SCpnt->cmd_len; 2579 cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
2907 pSCB->SCB_HaStat = 0;
2908 pSCB->SCB_TaStat = 0;
2909 memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, SCpnt->cmd_len);
2910 2580
2911 if (SCpnt->device->tagged_supported) { /* Tag Support */ 2581 /* Map the sense buffer into bus memory */
2912 pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ 2582 dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
2583 SENSE_SIZE, DMA_FROM_DEVICE);
2584 cblk->senseptr = cpu_to_le32((u32)dma_addr);
2585 cblk->senselen = cpu_to_le32(SENSE_SIZE);
2586 cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
2587 cblk->cdblen = cmnd->cmd_len;
2588
2589 /* Clear the returned status */
2590 cblk->hastat = 0;
2591 cblk->tastat = 0;
2592 /* Command the command */
2593 memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len);
2594
2595 /* Set up tags */
2596 if (cmnd->device->tagged_supported) { /* Tag Support */
2597 cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
2913 } else { 2598 } else {
2914 pSCB->SCB_TagMsg = 0; /* No tag support */ 2599 cblk->tagmsg = 0; /* No tag support */
2915 } 2600 }
2601
2916 /* todo handle map_sg error */ 2602 /* todo handle map_sg error */
2917 if (SCpnt->use_sg) { 2603 nseg = scsi_dma_map(cmnd);
2918 dma_addr = dma_map_single(&pHCB->pci_dev->dev, &pSCB->SCB_SGList[0], 2604 BUG_ON(nseg < 0);
2919 sizeof(struct SG_Struc) * TOTAL_SG_ENTRY, 2605 if (nseg) {
2606 dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
2607 sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
2920 DMA_BIDIRECTIONAL); 2608 DMA_BIDIRECTIONAL);
2921 pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr); 2609 cblk->bufptr = cpu_to_le32((u32)dma_addr);
2922 SCpnt->SCp.dma_handle = dma_addr; 2610 cmnd->SCp.dma_handle = dma_addr;
2923 2611
2924 pSrbSG = (struct scatterlist *) SCpnt->request_buffer; 2612
2925 pSCB->SCB_SGLen = dma_map_sg(&pHCB->pci_dev->dev, pSrbSG, 2613 cblk->flags |= SCF_SG; /* Turn on SG list flag */
2926 SCpnt->use_sg, SCpnt->sc_data_direction); 2614 total_len = 0;
2927 2615 sg = &cblk->sglist[0];
2928 pSCB->SCB_Flags |= SCF_SG; /* Turn on SG list flag */ 2616 scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
2929 for (i = 0, TotalLen = 0, pSG = &pSCB->SCB_SGList[0]; /* 1.01g */ 2617 sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
2930 i < pSCB->SCB_SGLen; i++, pSG++, pSrbSG++) { 2618 total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
2931 pSG->SG_Ptr = cpu_to_le32((u32)sg_dma_address(pSrbSG));
2932 TotalLen += pSG->SG_Len = cpu_to_le32((u32)sg_dma_len(pSrbSG));
2933 } 2619 }
2934 2620
2935 pSCB->SCB_BufLen = (SCpnt->request_bufflen > TotalLen) ? 2621 cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
2936 TotalLen : SCpnt->request_bufflen; 2622 total_len : scsi_bufflen(cmnd);
2937 } else if (SCpnt->request_bufflen) { /* Non SG */ 2623 } else { /* No data transfer required */
2938 dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->request_buffer, 2624 cblk->buflen = 0;
2939 SCpnt->request_bufflen, 2625 cblk->sglen = 0;
2940 SCpnt->sc_data_direction);
2941 SCpnt->SCp.dma_handle = dma_addr;
2942 pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr);
2943 pSCB->SCB_BufLen = cpu_to_le32((u32)SCpnt->request_bufflen);
2944 pSCB->SCB_SGLen = 0;
2945 } else {
2946 pSCB->SCB_BufLen = 0;
2947 pSCB->SCB_SGLen = 0;
2948 } 2626 }
2949} 2627}
2950 2628
2629/**
2630 * i91u_queuecommand - Queue a new command if possible
2631 * @cmd: SCSI command block from the mid layer
2632 * @done: Completion handler
2633 *
2634 * Attempts to queue a new command with the host adapter. Will return
2635 * zero if successful or indicate a host busy condition if not (which
2636 * will cause the mid layer to call us again later with the command)
2637 */
2638
2951static int i91u_queuecommand(struct scsi_cmnd *cmd, 2639static int i91u_queuecommand(struct scsi_cmnd *cmd,
2952 void (*done)(struct scsi_cmnd *)) 2640 void (*done)(struct scsi_cmnd *))
2953{ 2641{
2954 HCS *pHCB = (HCS *) cmd->device->host->base; 2642 struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
2955 register SCB *pSCB; 2643 struct scsi_ctrl_blk *cmnd;
2956 2644
2957 cmd->scsi_done = done; 2645 cmd->scsi_done = done;
2958 2646
2959 pSCB = tul_alloc_scb(pHCB); 2647 cmnd = initio_alloc_scb(host);
2960 if (!pSCB) 2648 if (!cmnd)
2961 return SCSI_MLQUEUE_HOST_BUSY; 2649 return SCSI_MLQUEUE_HOST_BUSY;
2962 2650
2963 i91uBuildSCB(pHCB, pSCB, cmd); 2651 initio_build_scb(host, cmnd, cmd);
2964 tul_exec_scb(pHCB, pSCB); 2652 initio_exec_scb(host, cmnd);
2965 return 0; 2653 return 0;
2966} 2654}
2967 2655
2968#if 0 /* no new EH yet */ 2656/**
2969/* 2657 * i91u_bus_reset - reset the SCSI bus
2970 * Abort a queued command 2658 * @cmnd: Command block we want to trigger the reset for
2971 * (commands that are on the bus can't be aborted easily) 2659 *
2972 */ 2660 * Initiate a SCSI bus reset sequence
2973static int i91u_abort(struct scsi_cmnd * SCpnt)
2974{
2975 HCS *pHCB;
2976
2977 pHCB = (HCS *) SCpnt->device->host->base;
2978 return tul_abort_srb(pHCB, SCpnt);
2979}
2980
2981/*
2982 * Reset registers, reset a hanging bus and
2983 * kill active and disconnected commands for target w/o soft reset
2984 */ 2661 */
2985static int i91u_reset(struct scsi_cmnd * SCpnt, unsigned int reset_flags)
2986{ /* I need Host Control Block Information */
2987 HCS *pHCB;
2988
2989 pHCB = (HCS *) SCpnt->device->host->base;
2990 2662
2991 if (reset_flags & (SCSI_RESET_SUGGEST_BUS_RESET | SCSI_RESET_SUGGEST_HOST_RESET)) 2663static int i91u_bus_reset(struct scsi_cmnd * cmnd)
2992 return tul_reset_scsi_bus(pHCB);
2993 else
2994 return tul_device_reset(pHCB, SCpnt, SCpnt->device->id, reset_flags);
2995}
2996#endif
2997
2998static int i91u_bus_reset(struct scsi_cmnd * SCpnt)
2999{ 2664{
3000 HCS *pHCB; 2665 struct initio_host *host;
3001 2666
3002 pHCB = (HCS *) SCpnt->device->host->base; 2667 host = (struct initio_host *) cmnd->device->host->hostdata;
3003 2668
3004 spin_lock_irq(SCpnt->device->host->host_lock); 2669 spin_lock_irq(cmnd->device->host->host_lock);
3005 tul_reset_scsi(pHCB, 0); 2670 initio_reset_scsi(host, 0);
3006 spin_unlock_irq(SCpnt->device->host->host_lock); 2671 spin_unlock_irq(cmnd->device->host->host_lock);
3007 2672
3008 return SUCCESS; 2673 return SUCCESS;
3009} 2674}
3010 2675
3011/* 2676/**
3012 * Return the "logical geometry" 2677 * i91u_biospararm - return the "logical geometry
2678 * @sdev: SCSI device
2679 * @dev; Matching block device
2680 * @capacity: Sector size of drive
2681 * @info_array: Return space for BIOS geometry
2682 *
2683 * Map the device geometry in a manner compatible with the host
2684 * controller BIOS behaviour.
2685 *
2686 * FIXME: limited to 2^32 sector devices.
3013 */ 2687 */
2688
3014static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev, 2689static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
3015 sector_t capacity, int *info_array) 2690 sector_t capacity, int *info_array)
3016{ 2691{
3017 HCS *pHcb; /* Point to Host adapter control block */ 2692 struct initio_host *host; /* Point to Host adapter control block */
3018 TCS *pTcb; 2693 struct target_control *tc;
3019 2694
3020 pHcb = (HCS *) sdev->host->base; 2695 host = (struct initio_host *) sdev->host->hostdata;
3021 pTcb = &pHcb->HCS_Tcs[sdev->id]; 2696 tc = &host->targets[sdev->id];
3022 2697
3023 if (pTcb->TCS_DrvHead) { 2698 if (tc->heads) {
3024 info_array[0] = pTcb->TCS_DrvHead; 2699 info_array[0] = tc->heads;
3025 info_array[1] = pTcb->TCS_DrvSector; 2700 info_array[1] = tc->sectors;
3026 info_array[2] = (unsigned long)capacity / pTcb->TCS_DrvHead / pTcb->TCS_DrvSector; 2701 info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
3027 } else { 2702 } else {
3028 if (pTcb->TCS_DrvFlags & TCF_DRV_255_63) { 2703 if (tc->drv_flags & TCF_DRV_255_63) {
3029 info_array[0] = 255; 2704 info_array[0] = 255;
3030 info_array[1] = 63; 2705 info_array[1] = 63;
3031 info_array[2] = (unsigned long)capacity / 255 / 63; 2706 info_array[2] = (unsigned long)capacity / 255 / 63;
@@ -3047,7 +2722,16 @@ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
3047 return 0; 2722 return 0;
3048} 2723}
3049 2724
3050static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd) 2725/**
2726 * i91u_unmap_scb - Unmap a command
2727 * @pci_dev: PCI device the command is for
2728 * @cmnd: The command itself
2729 *
2730 * Unmap any PCI mapping/IOMMU resources allocated when the command
2731 * was mapped originally as part of initio_build_scb
2732 */
2733
2734static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
3051{ 2735{
3052 /* auto sense buffer */ 2736 /* auto sense buffer */
3053 if (cmnd->SCp.ptr) { 2737 if (cmnd->SCp.ptr) {
@@ -3058,65 +2742,63 @@ static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
3058 } 2742 }
3059 2743
3060 /* request buffer */ 2744 /* request buffer */
3061 if (cmnd->use_sg) { 2745 if (scsi_sg_count(cmnd)) {
3062 dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle, 2746 dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
3063 sizeof(struct SG_Struc) * TOTAL_SG_ENTRY, 2747 sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
3064 DMA_BIDIRECTIONAL); 2748 DMA_BIDIRECTIONAL);
3065 2749
3066 dma_unmap_sg(&pci_dev->dev, cmnd->request_buffer, 2750 scsi_dma_unmap(cmnd);
3067 cmnd->use_sg,
3068 cmnd->sc_data_direction);
3069 } else if (cmnd->request_bufflen) {
3070 dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
3071 cmnd->request_bufflen,
3072 cmnd->sc_data_direction);
3073 } 2751 }
3074} 2752}
3075 2753
3076/***************************************************************************** 2754/**
3077 Function name : i91uSCBPost 2755 * i91uSCBPost - SCSI callback
3078 Description : This is callback routine be called when tulip finish one 2756 * @host: Pointer to host adapter control block.
3079 SCSI command. 2757 * @cmnd: Pointer to SCSI control block.
3080 Input : pHCB - Pointer to host adapter control block. 2758 *
3081 pSCB - Pointer to SCSI control block. 2759 * This is callback routine be called when tulip finish one
3082 Output : None. 2760 * SCSI command.
3083 Return : None. 2761 */
3084*****************************************************************************/
3085static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
3086{
3087 struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */
3088 HCS *pHCB;
3089 SCB *pSCB;
3090 2762
3091 pHCB = (HCS *) pHcb; 2763static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
3092 pSCB = (SCB *) pScb; 2764{
3093 if ((pSRB = pSCB->SCB_Srb) == 0) { 2765 struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
3094 printk("i91uSCBPost: SRB pointer is empty\n"); 2766 struct initio_host *host;
2767 struct scsi_ctrl_blk *cblk;
3095 2768
3096 tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */ 2769 host = (struct initio_host *) host_mem;
2770 cblk = (struct scsi_ctrl_blk *) cblk_mem;
2771 if ((cmnd = cblk->srb) == NULL) {
2772 printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
2773 WARN_ON(1);
2774 initio_release_scb(host, cblk); /* Release SCB for current channel */
3097 return; 2775 return;
3098 } 2776 }
3099 switch (pSCB->SCB_HaStat) { 2777
2778 /*
2779 * Remap the firmware error status into a mid layer one
2780 */
2781 switch (cblk->hastat) {
3100 case 0x0: 2782 case 0x0:
3101 case 0xa: /* Linked command complete without error and linked normally */ 2783 case 0xa: /* Linked command complete without error and linked normally */
3102 case 0xb: /* Linked command complete without error interrupt generated */ 2784 case 0xb: /* Linked command complete without error interrupt generated */
3103 pSCB->SCB_HaStat = 0; 2785 cblk->hastat = 0;
3104 break; 2786 break;
3105 2787
3106 case 0x11: /* Selection time out-The initiator selection or target 2788 case 0x11: /* Selection time out-The initiator selection or target
3107 reselection was not complete within the SCSI Time out period */ 2789 reselection was not complete within the SCSI Time out period */
3108 pSCB->SCB_HaStat = DID_TIME_OUT; 2790 cblk->hastat = DID_TIME_OUT;
3109 break; 2791 break;
3110 2792
3111 case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus 2793 case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
3112 phase sequence was requested by the target. The host adapter 2794 phase sequence was requested by the target. The host adapter
3113 will generate a SCSI Reset Condition, notifying the host with 2795 will generate a SCSI Reset Condition, notifying the host with
3114 a SCRD interrupt */ 2796 a SCRD interrupt */
3115 pSCB->SCB_HaStat = DID_RESET; 2797 cblk->hastat = DID_RESET;
3116 break; 2798 break;
3117 2799
3118 case 0x1a: /* SCB Aborted. 07/21/98 */ 2800 case 0x1a: /* SCB Aborted. 07/21/98 */
3119 pSCB->SCB_HaStat = DID_ABORT; 2801 cblk->hastat = DID_ABORT;
3120 break; 2802 break;
3121 2803
3122 case 0x12: /* Data overrun/underrun-The target attempted to transfer more data 2804 case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -3126,49 +2808,196 @@ static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
3126 case 0x16: /* Invalid SCB Operation Code. */ 2808 case 0x16: /* Invalid SCB Operation Code. */
3127 2809
3128 default: 2810 default:
3129 printk("ini9100u: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat); 2811 printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
3130 pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */ 2812 cblk->hastat = DID_ERROR; /* Couldn't find any better */
3131 break; 2813 break;
3132 } 2814 }
3133 2815
3134 pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16); 2816 cmnd->result = cblk->tastat | (cblk->hastat << 16);
2817 WARN_ON(cmnd == NULL);
2818 i91u_unmap_scb(host->pci_dev, cmnd);
2819 cmnd->scsi_done(cmnd); /* Notify system DONE */
2820 initio_release_scb(host, cblk); /* Release SCB for current channel */
2821}
2822
2823static struct scsi_host_template initio_template = {
2824 .proc_name = "INI9100U",
2825 .name = "Initio INI-9X00U/UW SCSI device driver",
2826 .queuecommand = i91u_queuecommand,
2827 .eh_bus_reset_handler = i91u_bus_reset,
2828 .bios_param = i91u_biosparam,
2829 .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
2830 .this_id = 1,
2831 .sg_tablesize = SG_ALL,
2832 .cmd_per_lun = 1,
2833 .use_clustering = ENABLE_CLUSTERING,
2834};
2835
2836static int initio_probe_one(struct pci_dev *pdev,
2837 const struct pci_device_id *id)
2838{
2839 struct Scsi_Host *shost;
2840 struct initio_host *host;
2841 u32 reg;
2842 u16 bios_seg;
2843 struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
2844 int num_scb, i, error;
2845
2846 error = pci_enable_device(pdev);
2847 if (error)
2848 return error;
2849
2850 pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
2851 bios_seg = (u16) (reg & 0xFF);
2852 if (((reg & 0xFF00) >> 8) == 0xFF)
2853 reg = 0;
2854 bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
2855
2856 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
2857 printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
2858 error = -ENODEV;
2859 goto out_disable_device;
2860 }
2861 shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
2862 if (!shost) {
2863 printk(KERN_WARNING "initio: Could not allocate host structure.\n");
2864 error = -ENOMEM;
2865 goto out_disable_device;
2866 }
2867 host = (struct initio_host *)shost->hostdata;
2868 memset(host, 0, sizeof(struct initio_host));
3135 2869
3136 if (pSRB == NULL) { 2870 if (!request_region(host->addr, 256, "i91u")) {
3137 printk("pSRB is NULL\n"); 2871 printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
2872 error = -ENODEV;
2873 goto out_host_put;
3138 } 2874 }
3139 2875
3140 i91u_unmap_cmnd(pHCB->pci_dev, pSRB); 2876 if (initio_tag_enable) /* 1.01i */
3141 pSRB->scsi_done(pSRB); /* Notify system DONE */ 2877 num_scb = MAX_TARGETS * i91u_MAXQUEUE;
2878 else
2879 num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
3142 2880
3143 tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */ 2881 for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
3144} 2882 i = num_scb * sizeof(struct scsi_ctrl_blk);
2883 if ((scb = kzalloc(i, GFP_DMA)) != NULL)
2884 break;
2885 }
2886
2887 if (!scb) {
2888 printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
2889 error = -ENOMEM;
2890 goto out_release_region;
2891 }
3145 2892
3146/* 2893 host->num_scbs = num_scb;
3147 * Release ressources 2894 host->scb = scb;
2895 host->next_pending = scb;
2896 host->next_avail = scb;
2897 for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
2898 tmp->tagid = i;
2899 if (i != 0)
2900 prev->next = tmp;
2901 prev = tmp;
2902 }
2903 prev->next = NULL;
2904 host->scb_end = tmp;
2905 host->first_avail = scb;
2906 host->last_avail = prev;
2907
2908 initio_init(host, phys_to_virt(bios_seg << 4));
2909
2910 host->jsstatus0 = 0;
2911
2912 shost->io_port = host->addr;
2913 shost->n_io_port = 0xff;
2914 shost->can_queue = num_scb; /* 03/05/98 */
2915 shost->unique_id = host->addr;
2916 shost->max_id = host->max_tar;
2917 shost->max_lun = 32; /* 10/21/97 */
2918 shost->irq = pdev->irq;
2919 shost->this_id = host->scsi_id; /* Assign HCS index */
2920 shost->base = host->addr;
2921 shost->sg_tablesize = TOTAL_SG_ENTRY;
2922
2923 error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
2924 if (error < 0) {
2925 printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
2926 goto out_free_scbs;
2927 }
2928
2929 pci_set_drvdata(pdev, shost);
2930 host->pci_dev = pdev;
2931
2932 error = scsi_add_host(shost, &pdev->dev);
2933 if (error)
2934 goto out_free_irq;
2935 scsi_scan_host(shost);
2936 return 0;
2937out_free_irq:
2938 free_irq(pdev->irq, shost);
2939out_free_scbs:
2940 kfree(host->scb);
2941out_release_region:
2942 release_region(host->addr, 256);
2943out_host_put:
2944 scsi_host_put(shost);
2945out_disable_device:
2946 pci_disable_device(pdev);
2947 return error;
2948}
2949
2950/**
2951 * initio_remove_one - control shutdown
2952 * @pdev: PCI device being released
2953 *
2954 * Release the resources assigned to this adapter after it has
2955 * finished being used.
3148 */ 2956 */
3149static int i91u_release(struct Scsi_Host *hreg) 2957
2958static void initio_remove_one(struct pci_dev *pdev)
3150{ 2959{
3151 free_irq(hreg->irq, hreg); 2960 struct Scsi_Host *host = pci_get_drvdata(pdev);
3152 release_region(hreg->io_port, 256); 2961 struct initio_host *s = (struct initio_host *)host->hostdata;
3153 return 0; 2962 scsi_remove_host(host);
2963 free_irq(pdev->irq, host);
2964 release_region(s->addr, 256);
2965 scsi_host_put(host);
2966 pci_disable_device(pdev);
3154} 2967}
3155MODULE_LICENSE("Dual BSD/GPL"); 2968
3156 2969MODULE_LICENSE("GPL");
3157static struct scsi_host_template driver_template = { 2970
3158 .proc_name = "INI9100U", 2971static struct pci_device_id initio_pci_tbl[] = {
3159 .name = i91u_REVID, 2972 {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3160 .detect = i91u_detect, 2973 {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3161 .release = i91u_release, 2974 {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3162 .queuecommand = i91u_queuecommand, 2975 {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3163// .abort = i91u_abort, 2976 {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3164// .reset = i91u_reset, 2977 {0,}
3165 .eh_bus_reset_handler = i91u_bus_reset, 2978};
3166 .bios_param = i91u_biosparam, 2979MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
3167 .can_queue = 1, 2980
3168 .this_id = 1, 2981static struct pci_driver initio_pci_driver = {
3169 .sg_tablesize = SG_ALL, 2982 .name = "initio",
3170 .cmd_per_lun = 1, 2983 .id_table = initio_pci_tbl,
3171 .use_clustering = ENABLE_CLUSTERING, 2984 .probe = initio_probe_one,
2985 .remove = __devexit_p(initio_remove_one),
3172}; 2986};
3173#include "scsi_module.c"
3174 2987
2988static int __init initio_init_driver(void)
2989{
2990 return pci_register_driver(&initio_pci_driver);
2991}
2992
2993static void __exit initio_exit_driver(void)
2994{
2995 pci_unregister_driver(&initio_pci_driver);
2996}
2997
2998MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
2999MODULE_AUTHOR("Initio Corporation");
3000MODULE_LICENSE("GPL");
3001
3002module_init(initio_init_driver);
3003module_exit(initio_exit_driver);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index acb67a4af2cc..cb48efa81fe2 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,6 +4,8 @@
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
8 *
7 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option) 11 * the Free Software Foundation; either version 2, or (at your option)
@@ -18,27 +20,6 @@
18 * along with this program; see the file COPYING. If not, write to 20 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 * 22 *
21 * --------------------------------------------------------------------------
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions, and the following disclaimer,
28 * without modification, immediately at the beginning of the file.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. The name of the author may not be used to endorse or promote products
33 * derived from this software without specific prior written permission.
34 *
35 * Where this Software is combined with software released under the terms of
36 * the GNU General Public License ("GPL") and the terms of the GPL would require the
37 * combined work to also be released under the terms of the GPL, the terms
38 * and conditions of this License will apply in addition to those of the
39 * GPL with the exception of any terms or conditions of this License that
40 * conflict with, or are expressly prohibited by, the GPL.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -56,17 +37,6 @@
56 37
57#include <linux/types.h> 38#include <linux/types.h>
58 39
59#define ULONG unsigned long
60#define USHORT unsigned short
61#define UCHAR unsigned char
62#define BYTE unsigned char
63#define WORD unsigned short
64#define DWORD unsigned long
65#define UBYTE unsigned char
66#define UWORD unsigned short
67#define UDWORD unsigned long
68#define U32 u32
69
70#define TOTAL_SG_ENTRY 32 40#define TOTAL_SG_ENTRY 32
71#define MAX_SUPPORTED_ADAPTERS 8 41#define MAX_SUPPORTED_ADAPTERS 8
72#define MAX_OFFSET 15 42#define MAX_OFFSET 15
@@ -368,55 +338,55 @@ typedef struct {
368/************************************************************************/ 338/************************************************************************/
369/* Scatter-Gather Element Structure */ 339/* Scatter-Gather Element Structure */
370/************************************************************************/ 340/************************************************************************/
371typedef struct SG_Struc { 341struct sg_entry {
372 U32 SG_Ptr; /* Data Pointer */ 342 u32 data; /* Data Pointer */
373 U32 SG_Len; /* Data Length */ 343 u32 len; /* Data Length */
374} SG; 344};
375 345
376/*********************************************************************** 346/***********************************************************************
377 SCSI Control Block 347 SCSI Control Block
378************************************************************************/ 348************************************************************************/
379typedef struct Scsi_Ctrl_Blk { 349struct scsi_ctrl_blk {
380 struct Scsi_Ctrl_Blk *SCB_NxtScb; 350 struct scsi_ctrl_blk *next;
381 UBYTE SCB_Status; /*4 */ 351 u8 status; /*4 */
382 UBYTE SCB_NxtStat; /*5 */ 352 u8 next_state; /*5 */
383 UBYTE SCB_Mode; /*6 */ 353 u8 mode; /*6 */
384 UBYTE SCB_Msgin; /*7 SCB_Res0 */ 354 u8 msgin; /*7 SCB_Res0 */
385 UWORD SCB_SGIdx; /*8 */ 355 u16 sgidx; /*8 */
386 UWORD SCB_SGMax; /*A */ 356 u16 sgmax; /*A */
387#ifdef ALPHA 357#ifdef ALPHA
388 U32 SCB_Reserved[2]; /*C */ 358 u32 reserved[2]; /*C */
389#else 359#else
390 U32 SCB_Reserved[3]; /*C */ 360 u32 reserved[3]; /*C */
391#endif 361#endif
392 362
393 U32 SCB_XferLen; /*18 Current xfer len */ 363 u32 xferlen; /*18 Current xfer len */
394 U32 SCB_TotXLen; /*1C Total xfer len */ 364 u32 totxlen; /*1C Total xfer len */
395 U32 SCB_PAddr; /*20 SCB phy. Addr. */ 365 u32 paddr; /*20 SCB phy. Addr. */
396 366
397 UBYTE SCB_Opcode; /*24 SCB command code */ 367 u8 opcode; /*24 SCB command code */
398 UBYTE SCB_Flags; /*25 SCB Flags */ 368 u8 flags; /*25 SCB Flags */
399 UBYTE SCB_Target; /*26 Target Id */ 369 u8 target; /*26 Target Id */
400 UBYTE SCB_Lun; /*27 Lun */ 370 u8 lun; /*27 Lun */
401 U32 SCB_BufPtr; /*28 Data Buffer Pointer */ 371 u32 bufptr; /*28 Data Buffer Pointer */
402 U32 SCB_BufLen; /*2C Data Allocation Length */ 372 u32 buflen; /*2C Data Allocation Length */
403 UBYTE SCB_SGLen; /*30 SG list # */ 373 u8 sglen; /*30 SG list # */
404 UBYTE SCB_SenseLen; /*31 Sense Allocation Length */ 374 u8 senselen; /*31 Sense Allocation Length */
405 UBYTE SCB_HaStat; /*32 */ 375 u8 hastat; /*32 */
406 UBYTE SCB_TaStat; /*33 */ 376 u8 tastat; /*33 */
407 UBYTE SCB_CDBLen; /*34 CDB Length */ 377 u8 cdblen; /*34 CDB Length */
408 UBYTE SCB_Ident; /*35 Identify */ 378 u8 ident; /*35 Identify */
409 UBYTE SCB_TagMsg; /*36 Tag Message */ 379 u8 tagmsg; /*36 Tag Message */
410 UBYTE SCB_TagId; /*37 Queue Tag */ 380 u8 tagid; /*37 Queue Tag */
411 UBYTE SCB_CDB[12]; /*38 */ 381 u8 cdb[12]; /*38 */
412 U32 SCB_SGPAddr; /*44 SG List/Sense Buf phy. Addr. */ 382 u32 sgpaddr; /*44 SG List/Sense Buf phy. Addr. */
413 U32 SCB_SensePtr; /*48 Sense data pointer */ 383 u32 senseptr; /*48 Sense data pointer */
414 void (*SCB_Post) (BYTE *, BYTE *); /*4C POST routine */ 384 void (*post) (u8 *, u8 *); /*4C POST routine */
415 struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */ 385 struct scsi_cmnd *srb; /*50 SRB Pointer */
416 SG SCB_SGList[TOTAL_SG_ENTRY]; /*54 Start of SG list */ 386 struct sg_entry sglist[TOTAL_SG_ENTRY]; /*54 Start of SG list */
417} SCB; 387};
418 388
419/* Bit Definition for SCB_Status */ 389/* Bit Definition for status */
420#define SCB_RENT 0x01 390#define SCB_RENT 0x01
421#define SCB_PEND 0x02 391#define SCB_PEND 0x02
422#define SCB_CONTIG 0x04 /* Contigent Allegiance */ 392#define SCB_CONTIG 0x04 /* Contigent Allegiance */
@@ -425,17 +395,17 @@ typedef struct Scsi_Ctrl_Blk {
425#define SCB_DONE 0x20 395#define SCB_DONE 0x20
426 396
427 397
428/* Opcodes of SCB_Opcode */ 398/* Opcodes for opcode */
429#define ExecSCSI 0x1 399#define ExecSCSI 0x1
430#define BusDevRst 0x2 400#define BusDevRst 0x2
431#define AbortCmd 0x3 401#define AbortCmd 0x3
432 402
433 403
434/* Bit Definition for SCB_Mode */ 404/* Bit Definition for mode */
435#define SCM_RSENS 0x01 /* request sense mode */ 405#define SCM_RSENS 0x01 /* request sense mode */
436 406
437 407
438/* Bit Definition for SCB_Flags */ 408/* Bit Definition for flags */
439#define SCF_DONE 0x01 409#define SCF_DONE 0x01
440#define SCF_POST 0x02 410#define SCF_POST 0x02
441#define SCF_SENSE 0x04 411#define SCF_SENSE 0x04
@@ -492,15 +462,14 @@ typedef struct Scsi_Ctrl_Blk {
492 Target Device Control Structure 462 Target Device Control Structure
493**********************************************************************/ 463**********************************************************************/
494 464
495typedef struct Tar_Ctrl_Struc { 465struct target_control {
496 UWORD TCS_Flags; /* 0 */ 466 u16 flags;
497 UBYTE TCS_JS_Period; /* 2 */ 467 u8 js_period;
498 UBYTE TCS_SConfig0; /* 3 */ 468 u8 sconfig0;
499 469 u16 drv_flags;
500 UWORD TCS_DrvFlags; /* 4 */ 470 u8 heads;
501 UBYTE TCS_DrvHead; /* 6 */ 471 u8 sectors;
502 UBYTE TCS_DrvSector; /* 7 */ 472};
503} TCS;
504 473
505/*********************************************************************** 474/***********************************************************************
506 Target Device Control Structure 475 Target Device Control Structure
@@ -523,62 +492,53 @@ typedef struct Tar_Ctrl_Struc {
523#define TCF_DRV_EN_TAG 0x0800 492#define TCF_DRV_EN_TAG 0x0800
524#define TCF_DRV_255_63 0x0400 493#define TCF_DRV_255_63 0x0400
525 494
526typedef struct I91u_Adpt_Struc {
527 UWORD ADPT_BIOS; /* 0 */
528 UWORD ADPT_BASE; /* 1 */
529 UBYTE ADPT_Bus; /* 2 */
530 UBYTE ADPT_Device; /* 3 */
531 UBYTE ADPT_INTR; /* 4 */
532} INI_ADPT_STRUCT;
533
534
535/*********************************************************************** 495/***********************************************************************
536 Host Adapter Control Structure 496 Host Adapter Control Structure
537************************************************************************/ 497************************************************************************/
538typedef struct Ha_Ctrl_Struc { 498struct initio_host {
539 UWORD HCS_Base; /* 00 */ 499 u16 addr; /* 00 */
540 UWORD HCS_BIOS; /* 02 */ 500 u16 bios_addr; /* 02 */
541 UBYTE HCS_Intr; /* 04 */ 501 u8 irq; /* 04 */
542 UBYTE HCS_SCSI_ID; /* 05 */ 502 u8 scsi_id; /* 05 */
543 UBYTE HCS_MaxTar; /* 06 */ 503 u8 max_tar; /* 06 */
544 UBYTE HCS_NumScbs; /* 07 */ 504 u8 num_scbs; /* 07 */
545 505
546 UBYTE HCS_Flags; /* 08 */ 506 u8 flags; /* 08 */
547 UBYTE HCS_Index; /* 09 */ 507 u8 index; /* 09 */
548 UBYTE HCS_HaId; /* 0A */ 508 u8 ha_id; /* 0A */
549 UBYTE HCS_Config; /* 0B */ 509 u8 config; /* 0B */
550 UWORD HCS_IdMask; /* 0C */ 510 u16 idmask; /* 0C */
551 UBYTE HCS_Semaph; /* 0E */ 511 u8 semaph; /* 0E */
552 UBYTE HCS_Phase; /* 0F */ 512 u8 phase; /* 0F */
553 UBYTE HCS_JSStatus0; /* 10 */ 513 u8 jsstatus0; /* 10 */
554 UBYTE HCS_JSInt; /* 11 */ 514 u8 jsint; /* 11 */
555 UBYTE HCS_JSStatus1; /* 12 */ 515 u8 jsstatus1; /* 12 */
556 UBYTE HCS_SConf1; /* 13 */ 516 u8 sconf1; /* 13 */
557 517
558 UBYTE HCS_Msg[8]; /* 14 */ 518 u8 msg[8]; /* 14 */
559 SCB *HCS_NxtAvail; /* 1C */ 519 struct scsi_ctrl_blk *next_avail; /* 1C */
560 SCB *HCS_Scb; /* 20 */ 520 struct scsi_ctrl_blk *scb; /* 20 */
561 SCB *HCS_ScbEnd; /* 24 */ 521 struct scsi_ctrl_blk *scb_end; /* 24 */ /*UNUSED*/
562 SCB *HCS_NxtPend; /* 28 */ 522 struct scsi_ctrl_blk *next_pending; /* 28 */
563 SCB *HCS_NxtContig; /* 2C */ 523 struct scsi_ctrl_blk *next_contig; /* 2C */ /*UNUSED*/
564 SCB *HCS_ActScb; /* 30 */ 524 struct scsi_ctrl_blk *active; /* 30 */
565 TCS *HCS_ActTcs; /* 34 */ 525 struct target_control *active_tc; /* 34 */
566 526
567 SCB *HCS_FirstAvail; /* 38 */ 527 struct scsi_ctrl_blk *first_avail; /* 38 */
568 SCB *HCS_LastAvail; /* 3C */ 528 struct scsi_ctrl_blk *last_avail; /* 3C */
569 SCB *HCS_FirstPend; /* 40 */ 529 struct scsi_ctrl_blk *first_pending; /* 40 */
570 SCB *HCS_LastPend; /* 44 */ 530 struct scsi_ctrl_blk *last_pending; /* 44 */
571 SCB *HCS_FirstBusy; /* 48 */ 531 struct scsi_ctrl_blk *first_busy; /* 48 */
572 SCB *HCS_LastBusy; /* 4C */ 532 struct scsi_ctrl_blk *last_busy; /* 4C */
573 SCB *HCS_FirstDone; /* 50 */ 533 struct scsi_ctrl_blk *first_done; /* 50 */
574 SCB *HCS_LastDone; /* 54 */ 534 struct scsi_ctrl_blk *last_done; /* 54 */
575 UBYTE HCS_MaxTags[16]; /* 58 */ 535 u8 max_tags[16]; /* 58 */
576 UBYTE HCS_ActTags[16]; /* 68 */ 536 u8 act_tags[16]; /* 68 */
577 TCS HCS_Tcs[MAX_TARGETS]; /* 78 */ 537 struct target_control targets[MAX_TARGETS]; /* 78 */
578 spinlock_t HCS_AvailLock; 538 spinlock_t avail_lock;
579 spinlock_t HCS_SemaphLock; 539 spinlock_t semaph_lock;
580 struct pci_dev *pci_dev; 540 struct pci_dev *pci_dev;
581} HCS; 541};
582 542
583/* Bit Definition for HCB_Config */ 543/* Bit Definition for HCB_Config */
584#define HCC_SCSI_RESET 0x01 544#define HCC_SCSI_RESET 0x01
@@ -599,47 +559,47 @@ typedef struct Ha_Ctrl_Struc {
599*******************************************************************/ 559*******************************************************************/
600 560
601typedef struct _NVRAM_SCSI { /* SCSI channel configuration */ 561typedef struct _NVRAM_SCSI { /* SCSI channel configuration */
602 UCHAR NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */ 562 u8 NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
603 UCHAR NVM_ChConfig1; /* 0Dh -> Channel config 1 */ 563 u8 NVM_ChConfig1; /* 0Dh -> Channel config 1 */
604 UCHAR NVM_ChConfig2; /* 0Eh -> Channel config 2 */ 564 u8 NVM_ChConfig2; /* 0Eh -> Channel config 2 */
605 UCHAR NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */ 565 u8 NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
606 /* SCSI target configuration */ 566 /* SCSI target configuration */
607 UCHAR NVM_Targ0Config; /* 10h -> Target 0 configuration */ 567 u8 NVM_Targ0Config; /* 10h -> Target 0 configuration */
608 UCHAR NVM_Targ1Config; /* 11h -> Target 1 configuration */ 568 u8 NVM_Targ1Config; /* 11h -> Target 1 configuration */
609 UCHAR NVM_Targ2Config; /* 12h -> Target 2 configuration */ 569 u8 NVM_Targ2Config; /* 12h -> Target 2 configuration */
610 UCHAR NVM_Targ3Config; /* 13h -> Target 3 configuration */ 570 u8 NVM_Targ3Config; /* 13h -> Target 3 configuration */
611 UCHAR NVM_Targ4Config; /* 14h -> Target 4 configuration */ 571 u8 NVM_Targ4Config; /* 14h -> Target 4 configuration */
612 UCHAR NVM_Targ5Config; /* 15h -> Target 5 configuration */ 572 u8 NVM_Targ5Config; /* 15h -> Target 5 configuration */
613 UCHAR NVM_Targ6Config; /* 16h -> Target 6 configuration */ 573 u8 NVM_Targ6Config; /* 16h -> Target 6 configuration */
614 UCHAR NVM_Targ7Config; /* 17h -> Target 7 configuration */ 574 u8 NVM_Targ7Config; /* 17h -> Target 7 configuration */
615 UCHAR NVM_Targ8Config; /* 18h -> Target 8 configuration */ 575 u8 NVM_Targ8Config; /* 18h -> Target 8 configuration */
616 UCHAR NVM_Targ9Config; /* 19h -> Target 9 configuration */ 576 u8 NVM_Targ9Config; /* 19h -> Target 9 configuration */
617 UCHAR NVM_TargAConfig; /* 1Ah -> Target A configuration */ 577 u8 NVM_TargAConfig; /* 1Ah -> Target A configuration */
618 UCHAR NVM_TargBConfig; /* 1Bh -> Target B configuration */ 578 u8 NVM_TargBConfig; /* 1Bh -> Target B configuration */
619 UCHAR NVM_TargCConfig; /* 1Ch -> Target C configuration */ 579 u8 NVM_TargCConfig; /* 1Ch -> Target C configuration */
620 UCHAR NVM_TargDConfig; /* 1Dh -> Target D configuration */ 580 u8 NVM_TargDConfig; /* 1Dh -> Target D configuration */
621 UCHAR NVM_TargEConfig; /* 1Eh -> Target E configuration */ 581 u8 NVM_TargEConfig; /* 1Eh -> Target E configuration */
622 UCHAR NVM_TargFConfig; /* 1Fh -> Target F configuration */ 582 u8 NVM_TargFConfig; /* 1Fh -> Target F configuration */
623} NVRAM_SCSI; 583} NVRAM_SCSI;
624 584
625typedef struct _NVRAM { 585typedef struct _NVRAM {
626/*----------header ---------------*/ 586/*----------header ---------------*/
627 USHORT NVM_Signature; /* 0,1: Signature */ 587 u16 NVM_Signature; /* 0,1: Signature */
628 UCHAR NVM_Size; /* 2: Size of data structure */ 588 u8 NVM_Size; /* 2: Size of data structure */
629 UCHAR NVM_Revision; /* 3: Revision of data structure */ 589 u8 NVM_Revision; /* 3: Revision of data structure */
630 /* ----Host Adapter Structure ---- */ 590 /* ----Host Adapter Structure ---- */
631 UCHAR NVM_ModelByte0; /* 4: Model number (byte 0) */ 591 u8 NVM_ModelByte0; /* 4: Model number (byte 0) */
632 UCHAR NVM_ModelByte1; /* 5: Model number (byte 1) */ 592 u8 NVM_ModelByte1; /* 5: Model number (byte 1) */
633 UCHAR NVM_ModelInfo; /* 6: Model information */ 593 u8 NVM_ModelInfo; /* 6: Model information */
634 UCHAR NVM_NumOfCh; /* 7: Number of SCSI channel */ 594 u8 NVM_NumOfCh; /* 7: Number of SCSI channel */
635 UCHAR NVM_BIOSConfig1; /* 8: BIOS configuration 1 */ 595 u8 NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
636 UCHAR NVM_BIOSConfig2; /* 9: BIOS configuration 2 */ 596 u8 NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
637 UCHAR NVM_HAConfig1; /* A: Hoat adapter configuration 1 */ 597 u8 NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
638 UCHAR NVM_HAConfig2; /* B: Hoat adapter configuration 2 */ 598 u8 NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
639 NVRAM_SCSI NVM_SCSIInfo[2]; 599 NVRAM_SCSI NVM_SCSIInfo[2];
640 UCHAR NVM_reserved[10]; 600 u8 NVM_reserved[10];
641 /* ---------- CheckSum ---------- */ 601 /* ---------- CheckSum ---------- */
642 USHORT NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */ 602 u16 NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
643} NVRAM, *PNVRAM; 603} NVRAM, *PNVRAM;
644 604
645/* Bios Configuration for nvram->BIOSConfig1 */ 605/* Bios Configuration for nvram->BIOSConfig1 */
@@ -681,19 +641,6 @@ typedef struct _NVRAM {
681#define DISC_ALLOW 0xC0 /* Disconnect is allowed */ 641#define DISC_ALLOW 0xC0 /* Disconnect is allowed */
682#define SCSICMD_RequestSense 0x03 642#define SCSICMD_RequestSense 0x03
683 643
684typedef struct _HCSinfo {
685 ULONG base;
686 UCHAR vec;
687 UCHAR bios; /* High byte of BIOS address */
688 USHORT BaseAndBios; /* high byte: pHcsInfo->bios,low byte:pHcsInfo->base */
689} HCSINFO;
690
691#define TUL_RD(x,y) (UCHAR)(inb( (int)((ULONG)(x+y)) ))
692#define TUL_RDLONG(x,y) (ULONG)(inl((int)((ULONG)(x+y)) ))
693#define TUL_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
694#define TUL_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
695#define TUL_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
696
697#define SCSI_ABORT_SNOOZE 0 644#define SCSI_ABORT_SNOOZE 0
698#define SCSI_ABORT_SUCCESS 1 645#define SCSI_ABORT_SUCCESS 1
699#define SCSI_ABORT_PENDING 2 646#define SCSI_ABORT_PENDING 2
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b3bf77f1ec05..f142eafb6fc7 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
540} 540}
541 541
542/** 542/**
543 * ipr_unmap_sglist - Unmap scatterlist if mapped
544 * @ioa_cfg: ioa config struct
545 * @ipr_cmd: ipr command struct
546 *
547 * Return value:
548 * nothing
549 **/
550static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
551 struct ipr_cmnd *ipr_cmd)
552{
553 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
554
555 if (ipr_cmd->dma_use_sg) {
556 if (scsi_cmd->use_sg > 0) {
557 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
558 scsi_cmd->use_sg,
559 scsi_cmd->sc_data_direction);
560 } else {
561 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
562 scsi_cmd->request_bufflen,
563 scsi_cmd->sc_data_direction);
564 }
565 }
566}
567
568/**
569 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
570 * @ioa_cfg: ioa config struct 544 * @ioa_cfg: ioa config struct
571 * @clr_ints: interrupts to clear 545 * @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
677 651
678 scsi_cmd->result |= (DID_ERROR << 16); 652 scsi_cmd->result |= (DID_ERROR << 16);
679 653
680 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 654 scsi_dma_unmap(ipr_cmd->scsi_cmd);
681 scsi_cmd->scsi_done(scsi_cmd); 655 scsi_cmd->scsi_done(scsi_cmd);
682 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683} 657}
@@ -4298,93 +4272,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4298static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4272static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4299 struct ipr_cmnd *ipr_cmd) 4273 struct ipr_cmnd *ipr_cmd)
4300{ 4274{
4301 int i; 4275 int i, nseg;
4302 struct scatterlist *sglist; 4276 struct scatterlist *sg;
4303 u32 length; 4277 u32 length;
4304 u32 ioadl_flags = 0; 4278 u32 ioadl_flags = 0;
4305 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4279 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4306 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4280 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4307 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 4281 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4308 4282
4309 length = scsi_cmd->request_bufflen; 4283 length = scsi_bufflen(scsi_cmd);
4310 4284 if (!length)
4311 if (length == 0)
4312 return 0; 4285 return 0;
4313 4286
4314 if (scsi_cmd->use_sg) { 4287 nseg = scsi_dma_map(scsi_cmd);
4315 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, 4288 if (nseg < 0) {
4316 scsi_cmd->request_buffer, 4289 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4317 scsi_cmd->use_sg, 4290 return -1;
4318 scsi_cmd->sc_data_direction); 4291 }
4319
4320 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4321 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4323 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4324 ioarcb->write_ioadl_len =
4325 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4326 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4327 ioadl_flags = IPR_IOADL_FLAGS_READ;
4328 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4329 ioarcb->read_ioadl_len =
4330 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4331 }
4332
4333 sglist = scsi_cmd->request_buffer;
4334 4292
4335 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 4293 ipr_cmd->dma_use_sg = nseg;
4336 ioadl = ioarcb->add_data.u.ioadl;
4337 ioarcb->write_ioadl_addr =
4338 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4339 offsetof(struct ipr_ioarcb, add_data));
4340 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4341 }
4342 4294
4343 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4295 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4344 ioadl[i].flags_and_data_len = 4296 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4345 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4297 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4346 ioadl[i].address = 4298 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4347 cpu_to_be32(sg_dma_address(&sglist[i])); 4299 ioarcb->write_ioadl_len =
4348 } 4300 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4301 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4302 ioadl_flags = IPR_IOADL_FLAGS_READ;
4303 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4304 ioarcb->read_ioadl_len =
4305 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4306 }
4349 4307
4350 if (likely(ipr_cmd->dma_use_sg)) { 4308 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4351 ioadl[i-1].flags_and_data_len |= 4309 ioadl = ioarcb->add_data.u.ioadl;
4352 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4310 ioarcb->write_ioadl_addr =
4353 return 0; 4311 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4354 } else 4312 offsetof(struct ipr_ioarcb, add_data));
4355 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 4313 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4356 } else { 4314 }
4357 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4358 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4359 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4360 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4361 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4362 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4363 ioadl_flags = IPR_IOADL_FLAGS_READ;
4364 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4365 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4366 }
4367 4315
4368 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, 4316 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4369 scsi_cmd->request_buffer, length, 4317 ioadl[i].flags_and_data_len =
4370 scsi_cmd->sc_data_direction); 4318 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4371 4319 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4372 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4373 ioadl = ioarcb->add_data.u.ioadl;
4374 ioarcb->write_ioadl_addr =
4375 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4376 offsetof(struct ipr_ioarcb, add_data));
4377 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4378 ipr_cmd->dma_use_sg = 1;
4379 ioadl[0].flags_and_data_len =
4380 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4381 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4382 return 0;
4383 } else
4384 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4385 } 4320 }
4386 4321
4387 return -1; 4322 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4323 return 0;
4388} 4324}
4389 4325
4390/** 4326/**
@@ -4447,7 +4383,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4447 res->needs_sync_complete = 1; 4383 res->needs_sync_complete = 1;
4448 res->in_erp = 0; 4384 res->in_erp = 0;
4449 } 4385 }
4450 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4386 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4451 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4387 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4452 scsi_cmd->scsi_done(scsi_cmd); 4388 scsi_cmd->scsi_done(scsi_cmd);
4453} 4389}
@@ -4825,7 +4761,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4825 break; 4761 break;
4826 } 4762 }
4827 4763
4828 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4764 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4829 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4765 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4830 scsi_cmd->scsi_done(scsi_cmd); 4766 scsi_cmd->scsi_done(scsi_cmd);
4831} 4767}
@@ -4846,10 +4782,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4846 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4782 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4847 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4783 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4848 4784
4849 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); 4785 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4850 4786
4851 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 4787 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4852 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4788 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4853 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4789 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4854 scsi_cmd->scsi_done(scsi_cmd); 4790 scsi_cmd->scsi_done(scsi_cmd);
4855 } else 4791 } else
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 40f148e0833f..9f8ed6b81576 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -211,19 +211,6 @@ module_param(ips, charp, 0);
211#warning "This driver has only been tested on the x86/ia64/x86_64 platforms" 211#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
212#endif 212#endif
213 213
214#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
215#include <linux/blk.h>
216#include "sd.h"
217#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
218#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
219#ifndef __devexit_p
220#define __devexit_p(x) x
221#endif
222#else
223#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
224#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
225#endif
226
227#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ 214#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
228 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ 215 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
229 PCI_DMA_BIDIRECTIONAL : \ 216 PCI_DMA_BIDIRECTIONAL : \
@@ -381,24 +368,13 @@ static struct scsi_host_template ips_driver_template = {
381 .eh_abort_handler = ips_eh_abort, 368 .eh_abort_handler = ips_eh_abort,
382 .eh_host_reset_handler = ips_eh_reset, 369 .eh_host_reset_handler = ips_eh_reset,
383 .proc_name = "ips", 370 .proc_name = "ips",
384#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
385 .proc_info = ips_proc_info, 371 .proc_info = ips_proc_info,
386 .slave_configure = ips_slave_configure, 372 .slave_configure = ips_slave_configure,
387#else
388 .proc_info = ips_proc24_info,
389 .select_queue_depths = ips_select_queue_depth,
390#endif
391 .bios_param = ips_biosparam, 373 .bios_param = ips_biosparam,
392 .this_id = -1, 374 .this_id = -1,
393 .sg_tablesize = IPS_MAX_SG, 375 .sg_tablesize = IPS_MAX_SG,
394 .cmd_per_lun = 3, 376 .cmd_per_lun = 3,
395 .use_clustering = ENABLE_CLUSTERING, 377 .use_clustering = ENABLE_CLUSTERING,
396#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
397 .use_new_eh_code = 1,
398#endif
399#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
400 .highmem_io = 1,
401#endif
402}; 378};
403 379
404 380
@@ -731,7 +707,7 @@ ips_release(struct Scsi_Host *sh)
731 /* free IRQ */ 707 /* free IRQ */
732 free_irq(ha->irq, ha); 708 free_irq(ha->irq, ha);
733 709
734 IPS_REMOVE_HOST(sh); 710 scsi_remove_host(sh);
735 scsi_host_put(sh); 711 scsi_host_put(sh);
736 712
737 ips_released_controllers++; 713 ips_released_controllers++;
@@ -813,7 +789,6 @@ int ips_eh_abort(struct scsi_cmnd *SC)
813 ips_ha_t *ha; 789 ips_ha_t *ha;
814 ips_copp_wait_item_t *item; 790 ips_copp_wait_item_t *item;
815 int ret; 791 int ret;
816 unsigned long cpu_flags;
817 struct Scsi_Host *host; 792 struct Scsi_Host *host;
818 793
819 METHOD_TRACE("ips_eh_abort", 1); 794 METHOD_TRACE("ips_eh_abort", 1);
@@ -830,7 +805,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
830 if (!ha->active) 805 if (!ha->active)
831 return (FAILED); 806 return (FAILED);
832 807
833 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 808 spin_lock(host->host_lock);
834 809
835 /* See if the command is on the copp queue */ 810 /* See if the command is on the copp queue */
836 item = ha->copp_waitlist.head; 811 item = ha->copp_waitlist.head;
@@ -851,7 +826,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
851 ret = (FAILED); 826 ret = (FAILED);
852 } 827 }
853 828
854 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 829 spin_unlock(host->host_lock);
855 return ret; 830 return ret;
856} 831}
857 832
@@ -1129,7 +1104,7 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1129 /* A Reset IOCTL is only sent by the boot CD in extreme cases. */ 1104 /* A Reset IOCTL is only sent by the boot CD in extreme cases. */
1130 /* There can never be any system activity ( network or disk ), but check */ 1105 /* There can never be any system activity ( network or disk ), but check */
1131 /* anyway just as a good practice. */ 1106 /* anyway just as a good practice. */
1132 pt = (ips_passthru_t *) SC->request_buffer; 1107 pt = (ips_passthru_t *) scsi_sglist(SC);
1133 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) && 1108 if ((pt->CoppCP.cmd.reset.op_code == IPS_CMD_RESET_CHANNEL) &&
1134 (pt->CoppCP.cmd.reset.adapter_flag == 1)) { 1109 (pt->CoppCP.cmd.reset.adapter_flag == 1)) {
1135 if (ha->scb_activelist.count != 0) { 1110 if (ha->scb_activelist.count != 0) {
@@ -1176,18 +1151,10 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1176/* Set bios geometry for the controller */ 1151/* Set bios geometry for the controller */
1177/* */ 1152/* */
1178/****************************************************************************/ 1153/****************************************************************************/
1179static int 1154static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1180#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 1155 sector_t capacity, int geom[])
1181ips_biosparam(Disk * disk, kdev_t dev, int geom[])
1182{
1183 ips_ha_t *ha = (ips_ha_t *) disk->device->host->hostdata;
1184 unsigned long capacity = disk->capacity;
1185#else
1186ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1187 sector_t capacity, int geom[])
1188{ 1156{
1189 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata; 1157 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1190#endif
1191 int heads; 1158 int heads;
1192 int sectors; 1159 int sectors;
1193 int cylinders; 1160 int cylinders;
@@ -1225,70 +1192,6 @@ ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1225 return (0); 1192 return (0);
1226} 1193}
1227 1194
1228#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
1229
1230/* ips_proc24_info is a wrapper around ips_proc_info *
1231 * for compatibility with the 2.4 scsi parameters */
1232static int
1233ips_proc24_info(char *buffer, char **start, off_t offset, int length,
1234 int hostno, int func)
1235{
1236 int i;
1237
1238 for (i = 0; i < ips_next_controller; i++) {
1239 if (ips_sh[i] && ips_sh[i]->host_no == hostno) {
1240 return ips_proc_info(ips_sh[i], buffer, start,
1241 offset, length, func);
1242 }
1243 }
1244 return -EINVAL;
1245}
1246
1247/****************************************************************************/
1248/* */
1249/* Routine Name: ips_select_queue_depth */
1250/* */
1251/* Routine Description: */
1252/* */
1253/* Select queue depths for the devices on the contoller */
1254/* */
1255/****************************************************************************/
1256static void
1257ips_select_queue_depth(struct Scsi_Host *host, struct scsi_device * scsi_devs)
1258{
1259 struct scsi_device *device;
1260 ips_ha_t *ha;
1261 int count = 0;
1262 int min;
1263
1264 ha = IPS_HA(host);
1265 min = ha->max_cmds / 4;
1266
1267 for (device = scsi_devs; device; device = device->next) {
1268 if (device->host == host) {
1269 if ((device->channel == 0) && (device->type == 0))
1270 count++;
1271 }
1272 }
1273
1274 for (device = scsi_devs; device; device = device->next) {
1275 if (device->host == host) {
1276 if ((device->channel == 0) && (device->type == 0)) {
1277 device->queue_depth =
1278 (ha->max_cmds - 1) / count;
1279 if (device->queue_depth < min)
1280 device->queue_depth = min;
1281 } else {
1282 device->queue_depth = 2;
1283 }
1284
1285 if (device->queue_depth < 2)
1286 device->queue_depth = 2;
1287 }
1288 }
1289}
1290
1291#else
1292/****************************************************************************/ 1195/****************************************************************************/
1293/* */ 1196/* */
1294/* Routine Name: ips_slave_configure */ 1197/* Routine Name: ips_slave_configure */
@@ -1316,7 +1219,6 @@ ips_slave_configure(struct scsi_device * SDptr)
1316 SDptr->skip_ms_page_3f = 1; 1219 SDptr->skip_ms_page_3f = 1;
1317 return 0; 1220 return 0;
1318} 1221}
1319#endif
1320 1222
1321/****************************************************************************/ 1223/****************************************************************************/
1322/* */ 1224/* */
@@ -1331,7 +1233,6 @@ static irqreturn_t
1331do_ipsintr(int irq, void *dev_id) 1233do_ipsintr(int irq, void *dev_id)
1332{ 1234{
1333 ips_ha_t *ha; 1235 ips_ha_t *ha;
1334 unsigned long cpu_flags;
1335 struct Scsi_Host *host; 1236 struct Scsi_Host *host;
1336 int irqstatus; 1237 int irqstatus;
1337 1238
@@ -1347,16 +1248,16 @@ do_ipsintr(int irq, void *dev_id)
1347 return IRQ_HANDLED; 1248 return IRQ_HANDLED;
1348 } 1249 }
1349 1250
1350 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 1251 spin_lock(host->host_lock);
1351 1252
1352 if (!ha->active) { 1253 if (!ha->active) {
1353 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 1254 spin_unlock(host->host_lock);
1354 return IRQ_HANDLED; 1255 return IRQ_HANDLED;
1355 } 1256 }
1356 1257
1357 irqstatus = (*ha->func.intr) (ha); 1258 irqstatus = (*ha->func.intr) (ha);
1358 1259
1359 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 1260 spin_unlock(host->host_lock);
1360 1261
1361 /* start the next command */ 1262 /* start the next command */
1362 ips_next(ha, IPS_INTR_ON); 1263 ips_next(ha, IPS_INTR_ON);
@@ -1606,30 +1507,22 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
1606 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) && 1507 if ((SC->cmnd[0] == IPS_IOCTL_COMMAND) &&
1607 (SC->device->channel == 0) && 1508 (SC->device->channel == 0) &&
1608 (SC->device->id == IPS_ADAPTER_ID) && 1509 (SC->device->id == IPS_ADAPTER_ID) &&
1609 (SC->device->lun == 0) && SC->request_buffer) { 1510 (SC->device->lun == 0) && scsi_sglist(SC)) {
1610 if ((!SC->use_sg) && SC->request_bufflen && 1511 struct scatterlist *sg = scsi_sglist(SC);
1611 (((char *) SC->request_buffer)[0] == 'C') && 1512 char *buffer;
1612 (((char *) SC->request_buffer)[1] == 'O') && 1513
1613 (((char *) SC->request_buffer)[2] == 'P') && 1514 /* kmap_atomic() ensures addressability of the user buffer.*/
1614 (((char *) SC->request_buffer)[3] == 'P')) 1515 /* local_irq_save() protects the KM_IRQ0 address slot. */
1615 return 1; 1516 local_irq_save(flags);
1616 else if (SC->use_sg) { 1517 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1617 struct scatterlist *sg = SC->request_buffer; 1518 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1618 char *buffer; 1519 buffer[2] == 'P' && buffer[3] == 'P') {
1619 1520 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1620 /* kmap_atomic() ensures addressability of the user buffer.*/ 1521 local_irq_restore(flags);
1621 /* local_irq_save() protects the KM_IRQ0 address slot. */ 1522 return 1;
1622 local_irq_save(flags); 1523 }
1623 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1524 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1624 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1525 local_irq_restore(flags);
1625 buffer[2] == 'P' && buffer[3] == 'P') {
1626 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1627 local_irq_restore(flags);
1628 return 1;
1629 }
1630 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1631 local_irq_restore(flags);
1632 }
1633 } 1526 }
1634 return 0; 1527 return 0;
1635} 1528}
@@ -1680,18 +1573,14 @@ ips_make_passthru(ips_ha_t *ha, struct scsi_cmnd *SC, ips_scb_t *scb, int intr)
1680{ 1573{
1681 ips_passthru_t *pt; 1574 ips_passthru_t *pt;
1682 int length = 0; 1575 int length = 0;
1683 int ret; 1576 int i, ret;
1577 struct scatterlist *sg = scsi_sglist(SC);
1684 1578
1685 METHOD_TRACE("ips_make_passthru", 1); 1579 METHOD_TRACE("ips_make_passthru", 1);
1686 1580
1687 if (!SC->use_sg) { 1581 scsi_for_each_sg(SC, sg, scsi_sg_count(SC), i)
1688 length = SC->request_bufflen; 1582 length += sg[i].length;
1689 } else { 1583
1690 struct scatterlist *sg = SC->request_buffer;
1691 int i;
1692 for (i = 0; i < SC->use_sg; i++)
1693 length += sg[i].length;
1694 }
1695 if (length < sizeof (ips_passthru_t)) { 1584 if (length < sizeof (ips_passthru_t)) {
1696 /* wrong size */ 1585 /* wrong size */
1697 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size", 1586 DEBUG_VAR(1, "(%s%d) Passthru structure wrong size",
@@ -2115,7 +2004,7 @@ ips_cleanup_passthru(ips_ha_t * ha, ips_scb_t * scb)
2115 2004
2116 METHOD_TRACE("ips_cleanup_passthru", 1); 2005 METHOD_TRACE("ips_cleanup_passthru", 1);
2117 2006
2118 if ((!scb) || (!scb->scsi_cmd) || (!scb->scsi_cmd->request_buffer)) { 2007 if ((!scb) || (!scb->scsi_cmd) || (!scsi_sglist(scb->scsi_cmd))) {
2119 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru", 2008 DEBUG_VAR(1, "(%s%d) couldn't cleanup after passthru",
2120 ips_name, ha->host_num); 2009 ips_name, ha->host_num);
2121 2010
@@ -2730,7 +2619,6 @@ ips_next(ips_ha_t * ha, int intr)
2730 struct scsi_cmnd *q; 2619 struct scsi_cmnd *q;
2731 ips_copp_wait_item_t *item; 2620 ips_copp_wait_item_t *item;
2732 int ret; 2621 int ret;
2733 unsigned long cpu_flags = 0;
2734 struct Scsi_Host *host; 2622 struct Scsi_Host *host;
2735 METHOD_TRACE("ips_next", 1); 2623 METHOD_TRACE("ips_next", 1);
2736 2624
@@ -2742,7 +2630,7 @@ ips_next(ips_ha_t * ha, int intr)
2742 * this command won't time out 2630 * this command won't time out
2743 */ 2631 */
2744 if (intr == IPS_INTR_ON) 2632 if (intr == IPS_INTR_ON)
2745 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2633 spin_lock(host->host_lock);
2746 2634
2747 if ((ha->subsys->param[3] & 0x300000) 2635 if ((ha->subsys->param[3] & 0x300000)
2748 && (ha->scb_activelist.count == 0)) { 2636 && (ha->scb_activelist.count == 0)) {
@@ -2769,14 +2657,14 @@ ips_next(ips_ha_t * ha, int intr)
2769 item = ips_removeq_copp_head(&ha->copp_waitlist); 2657 item = ips_removeq_copp_head(&ha->copp_waitlist);
2770 ha->num_ioctl++; 2658 ha->num_ioctl++;
2771 if (intr == IPS_INTR_ON) 2659 if (intr == IPS_INTR_ON)
2772 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 2660 spin_unlock(host->host_lock);
2773 scb->scsi_cmd = item->scsi_cmd; 2661 scb->scsi_cmd = item->scsi_cmd;
2774 kfree(item); 2662 kfree(item);
2775 2663
2776 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); 2664 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2777 2665
2778 if (intr == IPS_INTR_ON) 2666 if (intr == IPS_INTR_ON)
2779 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2667 spin_lock(host->host_lock);
2780 switch (ret) { 2668 switch (ret) {
2781 case IPS_FAILURE: 2669 case IPS_FAILURE:
2782 if (scb->scsi_cmd) { 2670 if (scb->scsi_cmd) {
@@ -2846,7 +2734,7 @@ ips_next(ips_ha_t * ha, int intr)
2846 SC = ips_removeq_wait(&ha->scb_waitlist, q); 2734 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2847 2735
2848 if (intr == IPS_INTR_ON) 2736 if (intr == IPS_INTR_ON)
2849 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ 2737 spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
2850 2738
2851 SC->result = DID_OK; 2739 SC->result = DID_OK;
2852 SC->host_scribble = NULL; 2740 SC->host_scribble = NULL;
@@ -2866,41 +2754,26 @@ ips_next(ips_ha_t * ha, int intr)
2866 /* copy in the CDB */ 2754 /* copy in the CDB */
2867 memcpy(scb->cdb, SC->cmnd, SC->cmd_len); 2755 memcpy(scb->cdb, SC->cmnd, SC->cmd_len);
2868 2756
2869 /* Now handle the data buffer */ 2757 scb->sg_count = scsi_dma_map(SC);
2870 if (SC->use_sg) { 2758 BUG_ON(scb->sg_count < 0);
2759 if (scb->sg_count) {
2871 struct scatterlist *sg; 2760 struct scatterlist *sg;
2872 int i; 2761 int i;
2873 2762
2874 sg = SC->request_buffer;
2875 scb->sg_count = pci_map_sg(ha->pcidev, sg, SC->use_sg,
2876 SC->sc_data_direction);
2877 scb->flags |= IPS_SCB_MAP_SG; 2763 scb->flags |= IPS_SCB_MAP_SG;
2878 for (i = 0; i < scb->sg_count; i++) { 2764
2765 scsi_for_each_sg(SC, sg, scb->sg_count, i) {
2879 if (ips_fill_scb_sg_single 2766 if (ips_fill_scb_sg_single
2880 (ha, sg_dma_address(&sg[i]), scb, i, 2767 (ha, sg_dma_address(sg), scb, i,
2881 sg_dma_len(&sg[i])) < 0) 2768 sg_dma_len(sg)) < 0)
2882 break; 2769 break;
2883 } 2770 }
2884 scb->dcdb.transfer_length = scb->data_len; 2771 scb->dcdb.transfer_length = scb->data_len;
2885 } else { 2772 } else {
2886 if (SC->request_bufflen) { 2773 scb->data_busaddr = 0L;
2887 scb->data_busaddr = 2774 scb->sg_len = 0;
2888 pci_map_single(ha->pcidev, 2775 scb->data_len = 0;
2889 SC->request_buffer, 2776 scb->dcdb.transfer_length = 0;
2890 SC->request_bufflen,
2891 SC->sc_data_direction);
2892 scb->flags |= IPS_SCB_MAP_SINGLE;
2893 ips_fill_scb_sg_single(ha, scb->data_busaddr,
2894 scb, 0,
2895 SC->request_bufflen);
2896 scb->dcdb.transfer_length = scb->data_len;
2897 } else {
2898 scb->data_busaddr = 0L;
2899 scb->sg_len = 0;
2900 scb->data_len = 0;
2901 scb->dcdb.transfer_length = 0;
2902 }
2903
2904 } 2777 }
2905 2778
2906 scb->dcdb.cmd_attribute = 2779 scb->dcdb.cmd_attribute =
@@ -2919,7 +2792,7 @@ ips_next(ips_ha_t * ha, int intr)
2919 scb->dcdb.transfer_length = 0; 2792 scb->dcdb.transfer_length = 0;
2920 } 2793 }
2921 if (intr == IPS_INTR_ON) 2794 if (intr == IPS_INTR_ON)
2922 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2795 spin_lock(host->host_lock);
2923 2796
2924 ret = ips_send_cmd(ha, scb); 2797 ret = ips_send_cmd(ha, scb);
2925 2798
@@ -2958,7 +2831,7 @@ ips_next(ips_ha_t * ha, int intr)
2958 } /* end while */ 2831 } /* end while */
2959 2832
2960 if (intr == IPS_INTR_ON) 2833 if (intr == IPS_INTR_ON)
2961 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 2834 spin_unlock(host->host_lock);
2962} 2835}
2963 2836
2964/****************************************************************************/ 2837/****************************************************************************/
@@ -3377,52 +3250,32 @@ ips_done(ips_ha_t * ha, ips_scb_t * scb)
3377 * the rest of the data and continue. 3250 * the rest of the data and continue.
3378 */ 3251 */
3379 if ((scb->breakup) || (scb->sg_break)) { 3252 if ((scb->breakup) || (scb->sg_break)) {
3253 struct scatterlist *sg;
3254 int sg_dma_index, ips_sg_index = 0;
3255
3380 /* we had a data breakup */ 3256 /* we had a data breakup */
3381 scb->data_len = 0; 3257 scb->data_len = 0;
3382 3258
3383 if (scb->sg_count) { 3259 sg = scsi_sglist(scb->scsi_cmd);
3384 /* S/G request */
3385 struct scatterlist *sg;
3386 int ips_sg_index = 0;
3387 int sg_dma_index;
3388
3389 sg = scb->scsi_cmd->request_buffer;
3390
3391 /* Spin forward to last dma chunk */
3392 sg_dma_index = scb->breakup;
3393
3394 /* Take care of possible partial on last chunk */
3395 ips_fill_scb_sg_single(ha,
3396 sg_dma_address(&sg
3397 [sg_dma_index]),
3398 scb, ips_sg_index++,
3399 sg_dma_len(&sg
3400 [sg_dma_index]));
3401
3402 for (; sg_dma_index < scb->sg_count;
3403 sg_dma_index++) {
3404 if (ips_fill_scb_sg_single
3405 (ha,
3406 sg_dma_address(&sg[sg_dma_index]),
3407 scb, ips_sg_index++,
3408 sg_dma_len(&sg[sg_dma_index])) < 0)
3409 break;
3410 3260
3411 } 3261 /* Spin forward to last dma chunk */
3262 sg_dma_index = scb->breakup;
3412 3263
3413 } else { 3264 /* Take care of possible partial on last chunk */
3414 /* Non S/G Request */ 3265 ips_fill_scb_sg_single(ha,
3415 (void) ips_fill_scb_sg_single(ha, 3266 sg_dma_address(&sg[sg_dma_index]),
3416 scb-> 3267 scb, ips_sg_index++,
3417 data_busaddr + 3268 sg_dma_len(&sg[sg_dma_index]));
3418 (scb->sg_break * 3269
3419 ha->max_xfer), 3270 for (; sg_dma_index < scsi_sg_count(scb->scsi_cmd);
3420 scb, 0, 3271 sg_dma_index++) {
3421 scb->scsi_cmd-> 3272 if (ips_fill_scb_sg_single
3422 request_bufflen - 3273 (ha,
3423 (scb->sg_break * 3274 sg_dma_address(&sg[sg_dma_index]),
3424 ha->max_xfer)); 3275 scb, ips_sg_index++,
3425 } 3276 sg_dma_len(&sg[sg_dma_index])) < 0)
3277 break;
3278 }
3426 3279
3427 scb->dcdb.transfer_length = scb->data_len; 3280 scb->dcdb.transfer_length = scb->data_len;
3428 scb->dcdb.cmd_attribute |= 3281 scb->dcdb.cmd_attribute |=
@@ -3653,32 +3506,27 @@ ips_send_wait(ips_ha_t * ha, ips_scb_t * scb, int timeout, int intr)
3653static void 3506static void
3654ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count) 3507ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3655{ 3508{
3656 if (scmd->use_sg) { 3509 int i;
3657 int i; 3510 unsigned int min_cnt, xfer_cnt;
3658 unsigned int min_cnt, xfer_cnt; 3511 char *cdata = (char *) data;
3659 char *cdata = (char *) data; 3512 unsigned char *buffer;
3660 unsigned char *buffer; 3513 unsigned long flags;
3661 unsigned long flags; 3514 struct scatterlist *sg = scsi_sglist(scmd);
3662 struct scatterlist *sg = scmd->request_buffer; 3515
3663 for (i = 0, xfer_cnt = 0; 3516 for (i = 0, xfer_cnt = 0;
3664 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3517 (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
3665 min_cnt = min(count - xfer_cnt, sg[i].length); 3518 min_cnt = min(count - xfer_cnt, sg[i].length);
3666 3519
3667 /* kmap_atomic() ensures addressability of the data buffer.*/ 3520 /* kmap_atomic() ensures addressability of the data buffer.*/
3668 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3521 /* local_irq_save() protects the KM_IRQ0 address slot. */
3669 local_irq_save(flags); 3522 local_irq_save(flags);
3670 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3523 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3671 memcpy(buffer, &cdata[xfer_cnt], min_cnt); 3524 memcpy(buffer, &cdata[xfer_cnt], min_cnt);
3672 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3525 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3673 local_irq_restore(flags); 3526 local_irq_restore(flags);
3674 3527
3675 xfer_cnt += min_cnt; 3528 xfer_cnt += min_cnt;
3676 } 3529 }
3677
3678 } else {
3679 unsigned int min_cnt = min(count, scmd->request_bufflen);
3680 memcpy(scmd->request_buffer, data, min_cnt);
3681 }
3682} 3530}
3683 3531
3684/****************************************************************************/ 3532/****************************************************************************/
@@ -3691,32 +3539,27 @@ ips_scmd_buf_write(struct scsi_cmnd *scmd, void *data, unsigned int count)
3691static void 3539static void
3692ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count) 3540ips_scmd_buf_read(struct scsi_cmnd *scmd, void *data, unsigned int count)
3693{ 3541{
3694 if (scmd->use_sg) { 3542 int i;
3695 int i; 3543 unsigned int min_cnt, xfer_cnt;
3696 unsigned int min_cnt, xfer_cnt; 3544 char *cdata = (char *) data;
3697 char *cdata = (char *) data; 3545 unsigned char *buffer;
3698 unsigned char *buffer; 3546 unsigned long flags;
3699 unsigned long flags; 3547 struct scatterlist *sg = scsi_sglist(scmd);
3700 struct scatterlist *sg = scmd->request_buffer; 3548
3701 for (i = 0, xfer_cnt = 0; 3549 for (i = 0, xfer_cnt = 0;
3702 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3550 (i < scsi_sg_count(scmd)) && (xfer_cnt < count); i++) {
3703 min_cnt = min(count - xfer_cnt, sg[i].length); 3551 min_cnt = min(count - xfer_cnt, sg[i].length);
3704 3552
3705 /* kmap_atomic() ensures addressability of the data buffer.*/ 3553 /* kmap_atomic() ensures addressability of the data buffer.*/
3706 /* local_irq_save() protects the KM_IRQ0 address slot. */ 3554 /* local_irq_save() protects the KM_IRQ0 address slot. */
3707 local_irq_save(flags); 3555 local_irq_save(flags);
3708 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset; 3556 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3709 memcpy(&cdata[xfer_cnt], buffer, min_cnt); 3557 memcpy(&cdata[xfer_cnt], buffer, min_cnt);
3710 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0); 3558 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3711 local_irq_restore(flags); 3559 local_irq_restore(flags);
3712 3560
3713 xfer_cnt += min_cnt; 3561 xfer_cnt += min_cnt;
3714 } 3562 }
3715
3716 } else {
3717 unsigned int min_cnt = min(count, scmd->request_bufflen);
3718 memcpy(data, scmd->request_buffer, min_cnt);
3719 }
3720} 3563}
3721 3564
3722/****************************************************************************/ 3565/****************************************************************************/
@@ -4350,7 +4193,7 @@ ips_rdcap(ips_ha_t * ha, ips_scb_t * scb)
4350 4193
4351 METHOD_TRACE("ips_rdcap", 1); 4194 METHOD_TRACE("ips_rdcap", 1);
4352 4195
4353 if (scb->scsi_cmd->request_bufflen < 8) 4196 if (scsi_bufflen(scb->scsi_cmd) < 8)
4354 return (0); 4197 return (0);
4355 4198
4356 cap.lba = 4199 cap.lba =
@@ -4735,8 +4578,7 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
4735 4578
4736 METHOD_TRACE("ips_freescb", 1); 4579 METHOD_TRACE("ips_freescb", 1);
4737 if (scb->flags & IPS_SCB_MAP_SG) 4580 if (scb->flags & IPS_SCB_MAP_SG)
4738 pci_unmap_sg(ha->pcidev, scb->scsi_cmd->request_buffer, 4581 scsi_dma_unmap(scb->scsi_cmd);
4739 scb->scsi_cmd->use_sg, IPS_DMA_DIR(scb));
4740 else if (scb->flags & IPS_SCB_MAP_SINGLE) 4582 else if (scb->flags & IPS_SCB_MAP_SINGLE)
4741 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len, 4583 pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
4742 IPS_DMA_DIR(scb)); 4584 IPS_DMA_DIR(scb));
@@ -7004,7 +6846,6 @@ ips_register_scsi(int index)
7004 kfree(oldha); 6846 kfree(oldha);
7005 ips_sh[index] = sh; 6847 ips_sh[index] = sh;
7006 ips_ha[index] = ha; 6848 ips_ha[index] = ha;
7007 IPS_SCSI_SET_DEVICE(sh, ha);
7008 6849
7009 /* Store away needed values for later use */ 6850 /* Store away needed values for later use */
7010 sh->io_port = ha->io_addr; 6851 sh->io_port = ha->io_addr;
@@ -7016,17 +6857,16 @@ ips_register_scsi(int index)
7016 sh->cmd_per_lun = sh->hostt->cmd_per_lun; 6857 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
7017 sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; 6858 sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
7018 sh->use_clustering = sh->hostt->use_clustering; 6859 sh->use_clustering = sh->hostt->use_clustering;
7019
7020#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
7021 sh->max_sectors = 128; 6860 sh->max_sectors = 128;
7022#endif
7023 6861
7024 sh->max_id = ha->ntargets; 6862 sh->max_id = ha->ntargets;
7025 sh->max_lun = ha->nlun; 6863 sh->max_lun = ha->nlun;
7026 sh->max_channel = ha->nbus - 1; 6864 sh->max_channel = ha->nbus - 1;
7027 sh->can_queue = ha->max_cmds - 1; 6865 sh->can_queue = ha->max_cmds - 1;
7028 6866
7029 IPS_ADD_HOST(sh, NULL); 6867 scsi_add_host(sh, NULL);
6868 scsi_scan_host(sh);
6869
7030 return 0; 6870 return 0;
7031} 6871}
7032 6872
@@ -7069,7 +6909,7 @@ ips_module_init(void)
7069 return -ENODEV; 6909 return -ENODEV;
7070 ips_driver_template.module = THIS_MODULE; 6910 ips_driver_template.module = THIS_MODULE;
7071 ips_order_controllers(); 6911 ips_order_controllers();
7072 if (IPS_REGISTER_HOSTS(&ips_driver_template)) { 6912 if (!ips_detect(&ips_driver_template)) {
7073 pci_unregister_driver(&ips_pci_driver); 6913 pci_unregister_driver(&ips_pci_driver);
7074 return -ENODEV; 6914 return -ENODEV;
7075 } 6915 }
@@ -7087,7 +6927,6 @@ ips_module_init(void)
7087static void __exit 6927static void __exit
7088ips_module_exit(void) 6928ips_module_exit(void)
7089{ 6929{
7090 IPS_UNREGISTER_HOSTS(&ips_driver_template);
7091 pci_unregister_driver(&ips_pci_driver); 6930 pci_unregister_driver(&ips_pci_driver);
7092 unregister_reboot_notifier(&ips_notifier); 6931 unregister_reboot_notifier(&ips_notifier);
7093} 6932}
@@ -7436,15 +7275,9 @@ ips_init_phase2(int index)
7436 return SUCCESS; 7275 return SUCCESS;
7437} 7276}
7438 7277
7439#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
7440MODULE_LICENSE("GPL"); 7278MODULE_LICENSE("GPL");
7441#endif
7442
7443MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING); 7279MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7444
7445#ifdef MODULE_VERSION
7446MODULE_VERSION(IPS_VER_STRING); 7280MODULE_VERSION(IPS_VER_STRING);
7447#endif
7448 7281
7449 7282
7450/* 7283/*
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index b726dcc424b1..24123d537c58 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -58,10 +58,6 @@
58 /* 58 /*
59 * Some handy macros 59 * Some handy macros
60 */ 60 */
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) || defined CONFIG_HIGHIO
62 #define IPS_HIGHIO
63 #endif
64
65 #define IPS_HA(x) ((ips_ha_t *) x->hostdata) 61 #define IPS_HA(x) ((ips_ha_t *) x->hostdata)
66 #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs) 62 #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
67 #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \ 63 #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
@@ -84,38 +80,8 @@
84 #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \ 80 #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \
85 sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST)) 81 sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST))
86 82
87 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4) 83 #define IPS_PRINTK(level, pcidev, format, arg...) \
88 #define pci_set_dma_mask(dev,mask) ( mask > 0xffffffff ? 1:0 )
89 #define scsi_set_pci_device(sh,dev) (0)
90 #endif
91
92 #ifndef IRQ_NONE
93 typedef void irqreturn_t;
94 #define IRQ_NONE
95 #define IRQ_HANDLED
96 #define IRQ_RETVAL(x)
97 #endif
98
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
100 #define IPS_REGISTER_HOSTS(SHT) scsi_register_module(MODULE_SCSI_HA,SHT)
101 #define IPS_UNREGISTER_HOSTS(SHT) scsi_unregister_module(MODULE_SCSI_HA,SHT)
102 #define IPS_ADD_HOST(shost,device)
103 #define IPS_REMOVE_HOST(shost)
104 #define IPS_SCSI_SET_DEVICE(sh,ha) scsi_set_pci_device(sh, (ha)->pcidev)
105 #define IPS_PRINTK(level, pcidev, format, arg...) \
106 printk(level "%s %s:" format , "ips" , \
107 (pcidev)->slot_name , ## arg)
108 #define scsi_host_alloc(sh,size) scsi_register(sh,size)
109 #define scsi_host_put(sh) scsi_unregister(sh)
110 #else
111 #define IPS_REGISTER_HOSTS(SHT) (!ips_detect(SHT))
112 #define IPS_UNREGISTER_HOSTS(SHT)
113 #define IPS_ADD_HOST(shost,device) do { scsi_add_host(shost,device); scsi_scan_host(shost); } while (0)
114 #define IPS_REMOVE_HOST(shost) scsi_remove_host(shost)
115 #define IPS_SCSI_SET_DEVICE(sh,ha) do { } while (0)
116 #define IPS_PRINTK(level, pcidev, format, arg...) \
117 dev_printk(level , &((pcidev)->dev) , format , ## arg) 84 dev_printk(level , &((pcidev)->dev) , format , ## arg)
118 #endif
119 85
120 #define MDELAY(n) \ 86 #define MDELAY(n) \
121 do { \ 87 do { \
@@ -134,7 +100,7 @@
134 #define pci_dma_hi32(a) ((a >> 16) >> 16) 100 #define pci_dma_hi32(a) ((a >> 16) >> 16)
135 #define pci_dma_lo32(a) (a & 0xffffffff) 101 #define pci_dma_lo32(a) (a & 0xffffffff)
136 102
137 #if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO) 103 #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
138 #define IPS_ENABLE_DMA64 (1) 104 #define IPS_ENABLE_DMA64 (1)
139 #else 105 #else
140 #define IPS_ENABLE_DMA64 (0) 106 #define IPS_ENABLE_DMA64 (0)
@@ -451,16 +417,10 @@
451 /* 417 /*
452 * Scsi_Host Template 418 * Scsi_Host Template
453 */ 419 */
454#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
455 static int ips_proc24_info(char *, char **, off_t, int, int, int);
456 static void ips_select_queue_depth(struct Scsi_Host *, struct scsi_device *);
457 static int ips_biosparam(Disk *disk, kdev_t dev, int geom[]);
458#else
459 static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); 420 static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
460 static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, 421 static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
461 sector_t capacity, int geom[]); 422 sector_t capacity, int geom[]);
462 static int ips_slave_configure(struct scsi_device *SDptr); 423 static int ips_slave_configure(struct scsi_device *SDptr);
463#endif
464 424
465/* 425/*
466 * Raid Command Formats 426 * Raid Command Formats
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index c9a3abf9e7b6..aebcd5fcdc55 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -29,14 +29,15 @@
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/inet.h> 31#include <linux/inet.h>
32#include <linux/file.h>
32#include <linux/blkdev.h> 33#include <linux/blkdev.h>
33#include <linux/crypto.h> 34#include <linux/crypto.h>
34#include <linux/delay.h> 35#include <linux/delay.h>
35#include <linux/kfifo.h> 36#include <linux/kfifo.h>
36#include <linux/scatterlist.h> 37#include <linux/scatterlist.h>
37#include <linux/mutex.h>
38#include <net/tcp.h> 38#include <net/tcp.h>
39#include <scsi/scsi_cmnd.h> 39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
41#include <scsi/scsi.h> 42#include <scsi/scsi.h>
42#include <scsi/scsi_transport_iscsi.h> 43#include <scsi/scsi_transport_iscsi.h>
@@ -109,7 +110,7 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 110 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
110 111
111 crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc); 112 crypto_hash_digest(&tcp_conn->tx_hash, &buf->sg, buf->sg.length, crc);
112 buf->sg.length = tcp_conn->hdr_size; 113 buf->sg.length += sizeof(u32);
113} 114}
114 115
115static inline int 116static inline int
@@ -211,16 +212,14 @@ iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
211static int 212static int
212iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 213iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
213{ 214{
214 int rc;
215 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 215 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
216 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 216 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
217 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; 217 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr;
218 struct iscsi_session *session = conn->session; 218 struct iscsi_session *session = conn->session;
219 struct scsi_cmnd *sc = ctask->sc;
219 int datasn = be32_to_cpu(rhdr->datasn); 220 int datasn = be32_to_cpu(rhdr->datasn);
220 221
221 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); 222 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
222 if (rc)
223 return rc;
224 /* 223 /*
225 * setup Data-In byte counter (gets decremented..) 224 * setup Data-In byte counter (gets decremented..)
226 */ 225 */
@@ -229,31 +228,36 @@ iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
229 if (tcp_conn->in.datalen == 0) 228 if (tcp_conn->in.datalen == 0)
230 return 0; 229 return 0;
231 230
232 if (ctask->datasn != datasn) 231 if (tcp_ctask->exp_datasn != datasn) {
232 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->datasn(%d)\n",
233 __FUNCTION__, tcp_ctask->exp_datasn, datasn);
233 return ISCSI_ERR_DATASN; 234 return ISCSI_ERR_DATASN;
235 }
234 236
235 ctask->datasn++; 237 tcp_ctask->exp_datasn++;
236 238
237 tcp_ctask->data_offset = be32_to_cpu(rhdr->offset); 239 tcp_ctask->data_offset = be32_to_cpu(rhdr->offset);
238 if (tcp_ctask->data_offset + tcp_conn->in.datalen > ctask->total_length) 240 if (tcp_ctask->data_offset + tcp_conn->in.datalen > scsi_bufflen(sc)) {
241 debug_tcp("%s: data_offset(%d) + data_len(%d) > total_length_in(%d)\n",
242 __FUNCTION__, tcp_ctask->data_offset,
243 tcp_conn->in.datalen, scsi_bufflen(sc));
239 return ISCSI_ERR_DATA_OFFSET; 244 return ISCSI_ERR_DATA_OFFSET;
245 }
240 246
241 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) { 247 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
242 struct scsi_cmnd *sc = ctask->sc;
243
244 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 248 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
245 if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) { 249 if (rhdr->flags & ISCSI_FLAG_DATA_UNDERFLOW) {
246 int res_count = be32_to_cpu(rhdr->residual_count); 250 int res_count = be32_to_cpu(rhdr->residual_count);
247 251
248 if (res_count > 0 && 252 if (res_count > 0 &&
249 res_count <= sc->request_bufflen) { 253 res_count <= scsi_bufflen(sc)) {
250 sc->resid = res_count; 254 scsi_set_resid(sc, res_count);
251 sc->result = (DID_OK << 16) | rhdr->cmd_status; 255 sc->result = (DID_OK << 16) | rhdr->cmd_status;
252 } else 256 } else
253 sc->result = (DID_BAD_TARGET << 16) | 257 sc->result = (DID_BAD_TARGET << 16) |
254 rhdr->cmd_status; 258 rhdr->cmd_status;
255 } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) { 259 } else if (rhdr->flags & ISCSI_FLAG_DATA_OVERFLOW) {
256 sc->resid = be32_to_cpu(rhdr->residual_count); 260 scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
257 sc->result = (DID_OK << 16) | rhdr->cmd_status; 261 sc->result = (DID_OK << 16) | rhdr->cmd_status;
258 } else 262 } else
259 sc->result = (DID_OK << 16) | rhdr->cmd_status; 263 sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -281,6 +285,8 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
281{ 285{
282 struct iscsi_data *hdr; 286 struct iscsi_data *hdr;
283 struct scsi_cmnd *sc = ctask->sc; 287 struct scsi_cmnd *sc = ctask->sc;
288 int i, sg_count = 0;
289 struct scatterlist *sg;
284 290
285 hdr = &r2t->dtask.hdr; 291 hdr = &r2t->dtask.hdr;
286 memset(hdr, 0, sizeof(struct iscsi_data)); 292 memset(hdr, 0, sizeof(struct iscsi_data));
@@ -308,39 +314,30 @@ iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
308 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr, 314 iscsi_buf_init_iov(&r2t->headbuf, (char*)hdr,
309 sizeof(struct iscsi_hdr)); 315 sizeof(struct iscsi_hdr));
310 316
311 if (sc->use_sg) { 317 sg = scsi_sglist(sc);
312 int i, sg_count = 0; 318 r2t->sg = NULL;
313 struct scatterlist *sg = sc->request_buffer; 319 for (i = 0; i < scsi_sg_count(sc); i++, sg += 1) {
314 320 /* FIXME: prefetch ? */
315 r2t->sg = NULL; 321 if (sg_count + sg->length > r2t->data_offset) {
316 for (i = 0; i < sc->use_sg; i++, sg += 1) { 322 int page_offset;
317 /* FIXME: prefetch ? */
318 if (sg_count + sg->length > r2t->data_offset) {
319 int page_offset;
320 323
321 /* sg page found! */ 324 /* sg page found! */
322 325
323 /* offset within this page */ 326 /* offset within this page */
324 page_offset = r2t->data_offset - sg_count; 327 page_offset = r2t->data_offset - sg_count;
325 328
326 /* fill in this buffer */ 329 /* fill in this buffer */
327 iscsi_buf_init_sg(&r2t->sendbuf, sg); 330 iscsi_buf_init_sg(&r2t->sendbuf, sg);
328 r2t->sendbuf.sg.offset += page_offset; 331 r2t->sendbuf.sg.offset += page_offset;
329 r2t->sendbuf.sg.length -= page_offset; 332 r2t->sendbuf.sg.length -= page_offset;
330 333
331 /* xmit logic will continue with next one */ 334 /* xmit logic will continue with next one */
332 r2t->sg = sg + 1; 335 r2t->sg = sg + 1;
333 break; 336 break;
334 }
335 sg_count += sg->length;
336 } 337 }
337 BUG_ON(r2t->sg == NULL); 338 sg_count += sg->length;
338 } else {
339 iscsi_buf_init_iov(&r2t->sendbuf,
340 (char*)sc->request_buffer + r2t->data_offset,
341 r2t->data_count);
342 r2t->sg = NULL;
343 } 339 }
340 BUG_ON(r2t->sg == NULL);
344} 341}
345 342
346/** 343/**
@@ -365,17 +362,16 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
365 return ISCSI_ERR_DATALEN; 362 return ISCSI_ERR_DATALEN;
366 } 363 }
367 364
368 if (tcp_ctask->exp_r2tsn && tcp_ctask->exp_r2tsn != r2tsn) 365 if (tcp_ctask->exp_datasn != r2tsn){
366 debug_tcp("%s: ctask->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
367 __FUNCTION__, tcp_ctask->exp_datasn, r2tsn);
369 return ISCSI_ERR_R2TSN; 368 return ISCSI_ERR_R2TSN;
370 369 }
371 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
372 if (rc)
373 return rc;
374
375 /* FIXME: use R2TSN to detect missing R2T */
376 370
377 /* fill-in new R2T associated with the task */ 371 /* fill-in new R2T associated with the task */
378 spin_lock(&session->lock); 372 spin_lock(&session->lock);
373 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
374
379 if (!ctask->sc || ctask->mtask || 375 if (!ctask->sc || ctask->mtask ||
380 session->state != ISCSI_STATE_LOGGED_IN) { 376 session->state != ISCSI_STATE_LOGGED_IN) {
381 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in " 377 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
@@ -401,11 +397,11 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
401 r2t->data_length, session->max_burst); 397 r2t->data_length, session->max_burst);
402 398
403 r2t->data_offset = be32_to_cpu(rhdr->data_offset); 399 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
404 if (r2t->data_offset + r2t->data_length > ctask->total_length) { 400 if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
405 spin_unlock(&session->lock); 401 spin_unlock(&session->lock);
406 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at " 402 printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
407 "offset %u and total length %d\n", r2t->data_length, 403 "offset %u and total length %d\n", r2t->data_length,
408 r2t->data_offset, ctask->total_length); 404 r2t->data_offset, scsi_bufflen(ctask->sc));
409 return ISCSI_ERR_DATALEN; 405 return ISCSI_ERR_DATALEN;
410 } 406 }
411 407
@@ -414,9 +410,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
414 410
415 iscsi_solicit_data_init(conn, ctask, r2t); 411 iscsi_solicit_data_init(conn, ctask, r2t);
416 412
417 tcp_ctask->exp_r2tsn = r2tsn + 1; 413 tcp_ctask->exp_datasn = r2tsn + 1;
418 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)); 414 __kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
419 tcp_ctask->xmstate |= XMSTATE_SOL_HDR; 415 tcp_ctask->xmstate |= XMSTATE_SOL_HDR_INIT;
420 list_move_tail(&ctask->running, &conn->xmitqueue); 416 list_move_tail(&ctask->running, &conn->xmitqueue);
421 417
422 scsi_queue_work(session->host, &conn->xmitwork); 418 scsi_queue_work(session->host, &conn->xmitwork);
@@ -600,7 +596,7 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
600{ 596{
601 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 597 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
602 int buf_left = buf_size - (tcp_conn->data_copied + offset); 598 int buf_left = buf_size - (tcp_conn->data_copied + offset);
603 int size = min(tcp_conn->in.copy, buf_left); 599 unsigned size = min(tcp_conn->in.copy, buf_left);
604 int rc; 600 int rc;
605 601
606 size = min(size, ctask->data_count); 602 size = min(size, ctask->data_count);
@@ -609,7 +605,7 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
609 size, tcp_conn->in.offset, tcp_conn->in.copied); 605 size, tcp_conn->in.offset, tcp_conn->in.copied);
610 606
611 BUG_ON(size <= 0); 607 BUG_ON(size <= 0);
612 BUG_ON(tcp_ctask->sent + size > ctask->total_length); 608 BUG_ON(tcp_ctask->sent + size > scsi_bufflen(ctask->sc));
613 609
614 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset, 610 rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
615 (char*)buf + (offset + tcp_conn->data_copied), size); 611 (char*)buf + (offset + tcp_conn->data_copied), size);
@@ -707,25 +703,8 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
707 703
708 BUG_ON((void*)ctask != sc->SCp.ptr); 704 BUG_ON((void*)ctask != sc->SCp.ptr);
709 705
710 /*
711 * copying Data-In into the Scsi_Cmnd
712 */
713 if (!sc->use_sg) {
714 i = ctask->data_count;
715 rc = iscsi_ctask_copy(tcp_conn, ctask, sc->request_buffer,
716 sc->request_bufflen,
717 tcp_ctask->data_offset);
718 if (rc == -EAGAIN)
719 return rc;
720 if (conn->datadgst_en)
721 iscsi_recv_digest_update(tcp_conn, sc->request_buffer,
722 i);
723 rc = 0;
724 goto done;
725 }
726
727 offset = tcp_ctask->data_offset; 706 offset = tcp_ctask->data_offset;
728 sg = sc->request_buffer; 707 sg = scsi_sglist(sc);
729 708
730 if (tcp_ctask->data_offset) 709 if (tcp_ctask->data_offset)
731 for (i = 0; i < tcp_ctask->sg_count; i++) 710 for (i = 0; i < tcp_ctask->sg_count; i++)
@@ -734,7 +713,7 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
734 if (offset < 0) 713 if (offset < 0)
735 offset = 0; 714 offset = 0;
736 715
737 for (i = tcp_ctask->sg_count; i < sc->use_sg; i++) { 716 for (i = tcp_ctask->sg_count; i < scsi_sg_count(sc); i++) {
738 char *dest; 717 char *dest;
739 718
740 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0); 719 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
@@ -779,7 +758,6 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
779 } 758 }
780 BUG_ON(ctask->data_count); 759 BUG_ON(ctask->data_count);
781 760
782done:
783 /* check for non-exceptional status */ 761 /* check for non-exceptional status */
784 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) { 762 if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
785 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n", 763 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
@@ -895,11 +873,27 @@ more:
895 } 873 }
896 } 874 }
897 875
898 if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV) { 876 if (tcp_conn->in_progress == IN_PROGRESS_DDIGEST_RECV &&
877 tcp_conn->in.copy) {
899 uint32_t recv_digest; 878 uint32_t recv_digest;
900 879
901 debug_tcp("extra data_recv offset %d copy %d\n", 880 debug_tcp("extra data_recv offset %d copy %d\n",
902 tcp_conn->in.offset, tcp_conn->in.copy); 881 tcp_conn->in.offset, tcp_conn->in.copy);
882
883 if (!tcp_conn->data_copied) {
884 if (tcp_conn->in.padding) {
885 debug_tcp("padding -> %d\n",
886 tcp_conn->in.padding);
887 memset(pad, 0, tcp_conn->in.padding);
888 sg_init_one(&sg, pad, tcp_conn->in.padding);
889 crypto_hash_update(&tcp_conn->rx_hash,
890 &sg, sg.length);
891 }
892 crypto_hash_final(&tcp_conn->rx_hash,
893 (u8 *) &tcp_conn->in.datadgst);
894 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
895 }
896
903 rc = iscsi_tcp_copy(conn, sizeof(uint32_t)); 897 rc = iscsi_tcp_copy(conn, sizeof(uint32_t));
904 if (rc) { 898 if (rc) {
905 if (rc == -EAGAIN) 899 if (rc == -EAGAIN)
@@ -924,8 +918,7 @@ more:
924 } 918 }
925 919
926 if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV && 920 if (tcp_conn->in_progress == IN_PROGRESS_DATA_RECV &&
927 tcp_conn->in.copy) { 921 tcp_conn->in.copy) {
928
929 debug_tcp("data_recv offset %d copy %d\n", 922 debug_tcp("data_recv offset %d copy %d\n",
930 tcp_conn->in.offset, tcp_conn->in.copy); 923 tcp_conn->in.offset, tcp_conn->in.copy);
931 924
@@ -936,24 +929,32 @@ more:
936 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 929 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
937 return 0; 930 return 0;
938 } 931 }
939 tcp_conn->in.copy -= tcp_conn->in.padding; 932
940 tcp_conn->in.offset += tcp_conn->in.padding; 933 if (tcp_conn->in.padding)
941 if (conn->datadgst_en) { 934 tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
942 if (tcp_conn->in.padding) { 935 else if (conn->datadgst_en)
943 debug_tcp("padding -> %d\n",
944 tcp_conn->in.padding);
945 memset(pad, 0, tcp_conn->in.padding);
946 sg_init_one(&sg, pad, tcp_conn->in.padding);
947 crypto_hash_update(&tcp_conn->rx_hash,
948 &sg, sg.length);
949 }
950 crypto_hash_final(&tcp_conn->rx_hash,
951 (u8 *) &tcp_conn->in.datadgst);
952 debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
953 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; 936 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
954 tcp_conn->data_copied = 0; 937 else
955 } else 938 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
939 tcp_conn->data_copied = 0;
940 }
941
942 if (tcp_conn->in_progress == IN_PROGRESS_PAD_RECV &&
943 tcp_conn->in.copy) {
944 int copylen = min(tcp_conn->in.padding - tcp_conn->data_copied,
945 tcp_conn->in.copy);
946
947 tcp_conn->in.copy -= copylen;
948 tcp_conn->in.offset += copylen;
949 tcp_conn->data_copied += copylen;
950
951 if (tcp_conn->data_copied != tcp_conn->in.padding)
952 tcp_conn->in_progress = IN_PROGRESS_PAD_RECV;
953 else if (conn->datadgst_en)
954 tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
955 else
956 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 956 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
957 tcp_conn->data_copied = 0;
957 } 958 }
958 959
959 debug_tcp("f, processed %d from out of %d padding %d\n", 960 debug_tcp("f, processed %d from out of %d padding %d\n",
@@ -1215,7 +1216,6 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1215 struct iscsi_r2t_info *r2t, int left) 1216 struct iscsi_r2t_info *r2t, int left)
1216{ 1217{
1217 struct iscsi_data *hdr; 1218 struct iscsi_data *hdr;
1218 struct scsi_cmnd *sc = ctask->sc;
1219 int new_offset; 1219 int new_offset;
1220 1220
1221 hdr = &r2t->dtask.hdr; 1221 hdr = &r2t->dtask.hdr;
@@ -1245,15 +1245,8 @@ iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1245 if (iscsi_buf_left(&r2t->sendbuf)) 1245 if (iscsi_buf_left(&r2t->sendbuf))
1246 return; 1246 return;
1247 1247
1248 if (sc->use_sg) { 1248 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1249 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg); 1249 r2t->sg += 1;
1250 r2t->sg += 1;
1251 } else {
1252 iscsi_buf_init_iov(&r2t->sendbuf,
1253 (char*)sc->request_buffer + new_offset,
1254 r2t->data_count);
1255 r2t->sg = NULL;
1256 }
1257} 1250}
1258 1251
1259static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask, 1252static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
@@ -1277,41 +1270,10 @@ static void iscsi_set_padding(struct iscsi_tcp_cmd_task *tcp_ctask,
1277static void 1270static void
1278iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask) 1271iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1279{ 1272{
1280 struct scsi_cmnd *sc = ctask->sc;
1281 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; 1273 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1282 1274
1283 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue)); 1275 BUG_ON(__kfifo_len(tcp_ctask->r2tqueue));
1284 1276 tcp_ctask->xmstate = XMSTATE_CMD_HDR_INIT;
1285 tcp_ctask->sent = 0;
1286 tcp_ctask->sg_count = 0;
1287
1288 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1289 tcp_ctask->xmstate = XMSTATE_W_HDR;
1290 tcp_ctask->exp_r2tsn = 0;
1291 BUG_ON(ctask->total_length == 0);
1292
1293 if (sc->use_sg) {
1294 struct scatterlist *sg = sc->request_buffer;
1295
1296 iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
1297 tcp_ctask->sg = sg + 1;
1298 tcp_ctask->bad_sg = sg + sc->use_sg;
1299 } else {
1300 iscsi_buf_init_iov(&tcp_ctask->sendbuf,
1301 sc->request_buffer,
1302 sc->request_bufflen);
1303 tcp_ctask->sg = NULL;
1304 tcp_ctask->bad_sg = NULL;
1305 }
1306 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1307 "unsol count %d, unsol offset %d]\n",
1308 ctask->itt, ctask->total_length, ctask->imm_count,
1309 ctask->unsol_count, ctask->unsol_offset);
1310 } else
1311 tcp_ctask->xmstate = XMSTATE_R_HDR;
1312
1313 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
1314 sizeof(struct iscsi_hdr));
1315} 1277}
1316 1278
1317/** 1279/**
@@ -1324,9 +1286,11 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
1324 * call it again later, or recover. '0' return code means successful 1286 * call it again later, or recover. '0' return code means successful
1325 * xmit. 1287 * xmit.
1326 * 1288 *
1327 * Management xmit state machine consists of two states: 1289 * Management xmit state machine consists of these states:
1328 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress 1290 * XMSTATE_IMM_HDR_INIT - calculate digest of PDU Header
1329 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress 1291 * XMSTATE_IMM_HDR - PDU Header xmit in progress
1292 * XMSTATE_IMM_DATA - PDU Data xmit in progress
1293 * XMSTATE_IDLE - management PDU is done
1330 **/ 1294 **/
1331static int 1295static int
1332iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) 1296iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
@@ -1337,23 +1301,34 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1337 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n", 1301 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1338 conn->id, tcp_mtask->xmstate, mtask->itt); 1302 conn->id, tcp_mtask->xmstate, mtask->itt);
1339 1303
1340 if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) { 1304 if (tcp_mtask->xmstate & XMSTATE_IMM_HDR_INIT) {
1341 tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR; 1305 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
1342 if (mtask->data_count) 1306 sizeof(struct iscsi_hdr));
1307
1308 if (mtask->data_count) {
1343 tcp_mtask->xmstate |= XMSTATE_IMM_DATA; 1309 tcp_mtask->xmstate |= XMSTATE_IMM_DATA;
1310 iscsi_buf_init_iov(&tcp_mtask->sendbuf,
1311 (char*)mtask->data,
1312 mtask->data_count);
1313 }
1314
1344 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE && 1315 if (conn->c_stage != ISCSI_CONN_INITIAL_STAGE &&
1345 conn->stop_stage != STOP_CONN_RECOVER && 1316 conn->stop_stage != STOP_CONN_RECOVER &&
1346 conn->hdrdgst_en) 1317 conn->hdrdgst_en)
1347 iscsi_hdr_digest(conn, &tcp_mtask->headbuf, 1318 iscsi_hdr_digest(conn, &tcp_mtask->headbuf,
1348 (u8*)tcp_mtask->hdrext); 1319 (u8*)tcp_mtask->hdrext);
1320
1321 tcp_mtask->sent = 0;
1322 tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR_INIT;
1323 tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
1324 }
1325
1326 if (tcp_mtask->xmstate & XMSTATE_IMM_HDR) {
1349 rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf, 1327 rc = iscsi_sendhdr(conn, &tcp_mtask->headbuf,
1350 mtask->data_count); 1328 mtask->data_count);
1351 if (rc) { 1329 if (rc)
1352 tcp_mtask->xmstate |= XMSTATE_IMM_HDR;
1353 if (mtask->data_count)
1354 tcp_mtask->xmstate &= ~XMSTATE_IMM_DATA;
1355 return rc; 1330 return rc;
1356 } 1331 tcp_mtask->xmstate &= ~XMSTATE_IMM_HDR;
1357 } 1332 }
1358 1333
1359 if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) { 1334 if (tcp_mtask->xmstate & XMSTATE_IMM_DATA) {
@@ -1387,55 +1362,67 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1387 return 0; 1362 return 0;
1388} 1363}
1389 1364
1390static inline int 1365static int
1391iscsi_send_read_hdr(struct iscsi_conn *conn, 1366iscsi_send_cmd_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1392 struct iscsi_tcp_cmd_task *tcp_ctask)
1393{ 1367{
1394 int rc; 1368 struct scsi_cmnd *sc = ctask->sc;
1369 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1370 int rc = 0;
1395 1371
1396 tcp_ctask->xmstate &= ~XMSTATE_R_HDR; 1372 if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_INIT) {
1397 if (conn->hdrdgst_en) 1373 tcp_ctask->sent = 0;
1398 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1374 tcp_ctask->sg_count = 0;
1399 (u8*)tcp_ctask->hdrext); 1375 tcp_ctask->exp_datasn = 0;
1400 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, 0);
1401 if (!rc) {
1402 BUG_ON(tcp_ctask->xmstate != XMSTATE_IDLE);
1403 return 0; /* wait for Data-In */
1404 }
1405 tcp_ctask->xmstate |= XMSTATE_R_HDR;
1406 return rc;
1407}
1408 1376
1409static inline int 1377 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1410iscsi_send_write_hdr(struct iscsi_conn *conn, 1378 struct scatterlist *sg = scsi_sglist(sc);
1411 struct iscsi_cmd_task *ctask)
1412{
1413 struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
1414 int rc;
1415 1379
1416 tcp_ctask->xmstate &= ~XMSTATE_W_HDR; 1380 iscsi_buf_init_sg(&tcp_ctask->sendbuf, sg);
1417 if (conn->hdrdgst_en) 1381 tcp_ctask->sg = sg + 1;
1418 iscsi_hdr_digest(conn, &tcp_ctask->headbuf, 1382 tcp_ctask->bad_sg = sg + scsi_sg_count(sc);
1419 (u8*)tcp_ctask->hdrext); 1383
1420 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count); 1384 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1421 if (rc) { 1385 "unsol count %d, unsol offset %d]\n",
1422 tcp_ctask->xmstate |= XMSTATE_W_HDR; 1386 ctask->itt, scsi_bufflen(sc),
1423 return rc; 1387 ctask->imm_count, ctask->unsol_count,
1388 ctask->unsol_offset);
1389 }
1390
1391 iscsi_buf_init_iov(&tcp_ctask->headbuf, (char*)ctask->hdr,
1392 sizeof(struct iscsi_hdr));
1393
1394 if (conn->hdrdgst_en)
1395 iscsi_hdr_digest(conn, &tcp_ctask->headbuf,
1396 (u8*)tcp_ctask->hdrext);
1397 tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_INIT;
1398 tcp_ctask->xmstate |= XMSTATE_CMD_HDR_XMIT;
1424 } 1399 }
1425 1400
1426 if (ctask->imm_count) { 1401 if (tcp_ctask->xmstate & XMSTATE_CMD_HDR_XMIT) {
1427 tcp_ctask->xmstate |= XMSTATE_IMM_DATA; 1402 rc = iscsi_sendhdr(conn, &tcp_ctask->headbuf, ctask->imm_count);
1428 iscsi_set_padding(tcp_ctask, ctask->imm_count); 1403 if (rc)
1404 return rc;
1405 tcp_ctask->xmstate &= ~XMSTATE_CMD_HDR_XMIT;
1406
1407 if (sc->sc_data_direction != DMA_TO_DEVICE)
1408 return 0;
1409
1410 if (ctask->imm_count) {
1411 tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
1412 iscsi_set_padding(tcp_ctask, ctask->imm_count);
1429 1413
1430 if (ctask->conn->datadgst_en) { 1414 if (ctask->conn->datadgst_en) {
1431 iscsi_data_digest_init(ctask->conn->dd_data, tcp_ctask); 1415 iscsi_data_digest_init(ctask->conn->dd_data,
1432 tcp_ctask->immdigest = 0; 1416 tcp_ctask);
1417 tcp_ctask->immdigest = 0;
1418 }
1433 } 1419 }
1434 }
1435 1420
1436 if (ctask->unsol_count) 1421 if (ctask->unsol_count)
1437 tcp_ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT; 1422 tcp_ctask->xmstate |=
1438 return 0; 1423 XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1424 }
1425 return rc;
1439} 1426}
1440 1427
1441static int 1428static int
@@ -1624,9 +1611,7 @@ static int iscsi_send_sol_pdu(struct iscsi_conn *conn,
1624 struct iscsi_data_task *dtask; 1611 struct iscsi_data_task *dtask;
1625 int left, rc; 1612 int left, rc;
1626 1613
1627 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) { 1614 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR_INIT) {
1628 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1629 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1630 if (!tcp_ctask->r2t) { 1615 if (!tcp_ctask->r2t) {
1631 spin_lock_bh(&session->lock); 1616 spin_lock_bh(&session->lock);
1632 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t, 1617 __kfifo_get(tcp_ctask->r2tqueue, (void*)&tcp_ctask->r2t,
@@ -1640,12 +1625,19 @@ send_hdr:
1640 if (conn->hdrdgst_en) 1625 if (conn->hdrdgst_en)
1641 iscsi_hdr_digest(conn, &r2t->headbuf, 1626 iscsi_hdr_digest(conn, &r2t->headbuf,
1642 (u8*)dtask->hdrext); 1627 (u8*)dtask->hdrext);
1628 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR_INIT;
1629 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1630 }
1631
1632 if (tcp_ctask->xmstate & XMSTATE_SOL_HDR) {
1633 r2t = tcp_ctask->r2t;
1634 dtask = &r2t->dtask;
1635
1643 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count); 1636 rc = iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count);
1644 if (rc) { 1637 if (rc)
1645 tcp_ctask->xmstate &= ~XMSTATE_SOL_DATA;
1646 tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
1647 return rc; 1638 return rc;
1648 } 1639 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1640 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1649 1641
1650 if (conn->datadgst_en) { 1642 if (conn->datadgst_en) {
1651 iscsi_data_digest_init(conn->dd_data, tcp_ctask); 1643 iscsi_data_digest_init(conn->dd_data, tcp_ctask);
@@ -1677,8 +1669,6 @@ send_hdr:
1677 left = r2t->data_length - r2t->sent; 1669 left = r2t->data_length - r2t->sent;
1678 if (left) { 1670 if (left) {
1679 iscsi_solicit_data_cont(conn, ctask, r2t, left); 1671 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1680 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1681 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1682 goto send_hdr; 1672 goto send_hdr;
1683 } 1673 }
1684 1674
@@ -1693,8 +1683,6 @@ send_hdr:
1693 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, 1683 if (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t,
1694 sizeof(void*))) { 1684 sizeof(void*))) {
1695 tcp_ctask->r2t = r2t; 1685 tcp_ctask->r2t = r2t;
1696 tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
1697 tcp_ctask->xmstate &= ~XMSTATE_SOL_HDR;
1698 spin_unlock_bh(&session->lock); 1686 spin_unlock_bh(&session->lock);
1699 goto send_hdr; 1687 goto send_hdr;
1700 } 1688 }
@@ -1703,6 +1691,46 @@ send_hdr:
1703 return 0; 1691 return 0;
1704} 1692}
1705 1693
1694/**
1695 * iscsi_tcp_ctask_xmit - xmit normal PDU task
1696 * @conn: iscsi connection
1697 * @ctask: iscsi command task
1698 *
1699 * Notes:
1700 * The function can return -EAGAIN in which case caller must
1701 * call it again later, or recover. '0' return code means successful
1702 * xmit.
1703 * The function is devided to logical helpers (above) for the different
1704 * xmit stages.
1705 *
1706 *iscsi_send_cmd_hdr()
1707 * XMSTATE_CMD_HDR_INIT - prepare Header and Data buffers Calculate
1708 * Header Digest
1709 * XMSTATE_CMD_HDR_XMIT - Transmit header in progress
1710 *
1711 *iscsi_send_padding
1712 * XMSTATE_W_PAD - Prepare and send pading
1713 * XMSTATE_W_RESEND_PAD - retry send pading
1714 *
1715 *iscsi_send_digest
1716 * XMSTATE_W_RESEND_DATA_DIGEST - Finalize and send Data Digest
1717 * XMSTATE_W_RESEND_DATA_DIGEST - retry sending digest
1718 *
1719 *iscsi_send_unsol_hdr
1720 * XMSTATE_UNS_INIT - prepare un-solicit data header and digest
1721 * XMSTATE_UNS_HDR - send un-solicit header
1722 *
1723 *iscsi_send_unsol_pdu
1724 * XMSTATE_UNS_DATA - send un-solicit data in progress
1725 *
1726 *iscsi_send_sol_pdu
1727 * XMSTATE_SOL_HDR_INIT - solicit data header and digest initialize
1728 * XMSTATE_SOL_HDR - send solicit header
1729 * XMSTATE_SOL_DATA - send solicit data
1730 *
1731 *iscsi_tcp_ctask_xmit
1732 * XMSTATE_IMM_DATA - xmit managment data (??)
1733 **/
1706static int 1734static int
1707iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) 1735iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1708{ 1736{
@@ -1712,20 +1740,11 @@ iscsi_tcp_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1712 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n", 1740 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1713 conn->id, tcp_ctask->xmstate, ctask->itt); 1741 conn->id, tcp_ctask->xmstate, ctask->itt);
1714 1742
1715 /* 1743 rc = iscsi_send_cmd_hdr(conn, ctask);
1716 * serialize with TMF AbortTask 1744 if (rc)
1717 */
1718 if (ctask->mtask)
1719 return rc; 1745 return rc;
1720 1746 if (ctask->sc->sc_data_direction != DMA_TO_DEVICE)
1721 if (tcp_ctask->xmstate & XMSTATE_R_HDR) 1747 return 0;
1722 return iscsi_send_read_hdr(conn, tcp_ctask);
1723
1724 if (tcp_ctask->xmstate & XMSTATE_W_HDR) {
1725 rc = iscsi_send_write_hdr(conn, ctask);
1726 if (rc)
1727 return rc;
1728 }
1729 1748
1730 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) { 1749 if (tcp_ctask->xmstate & XMSTATE_IMM_DATA) {
1731 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg, 1750 rc = iscsi_send_data(ctask, &tcp_ctask->sendbuf, &tcp_ctask->sg,
@@ -1810,18 +1829,22 @@ tcp_conn_alloc_fail:
1810static void 1829static void
1811iscsi_tcp_release_conn(struct iscsi_conn *conn) 1830iscsi_tcp_release_conn(struct iscsi_conn *conn)
1812{ 1831{
1832 struct iscsi_session *session = conn->session;
1813 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1833 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1834 struct socket *sock = tcp_conn->sock;
1814 1835
1815 if (!tcp_conn->sock) 1836 if (!sock)
1816 return; 1837 return;
1817 1838
1818 sock_hold(tcp_conn->sock->sk); 1839 sock_hold(sock->sk);
1819 iscsi_conn_restore_callbacks(tcp_conn); 1840 iscsi_conn_restore_callbacks(tcp_conn);
1820 sock_put(tcp_conn->sock->sk); 1841 sock_put(sock->sk);
1821 1842
1822 sock_release(tcp_conn->sock); 1843 spin_lock_bh(&session->lock);
1823 tcp_conn->sock = NULL; 1844 tcp_conn->sock = NULL;
1824 conn->recv_lock = NULL; 1845 conn->recv_lock = NULL;
1846 spin_unlock_bh(&session->lock);
1847 sockfd_put(sock);
1825} 1848}
1826 1849
1827static void 1850static void
@@ -1852,6 +1875,46 @@ iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
1852 tcp_conn->hdr_size = sizeof(struct iscsi_hdr); 1875 tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
1853} 1876}
1854 1877
1878static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
1879 char *buf, int *port,
1880 int (*getname)(struct socket *, struct sockaddr *,
1881 int *addrlen))
1882{
1883 struct sockaddr_storage *addr;
1884 struct sockaddr_in6 *sin6;
1885 struct sockaddr_in *sin;
1886 int rc = 0, len;
1887
1888 addr = kmalloc(GFP_KERNEL, sizeof(*addr));
1889 if (!addr)
1890 return -ENOMEM;
1891
1892 if (getname(sock, (struct sockaddr *) addr, &len)) {
1893 rc = -ENODEV;
1894 goto free_addr;
1895 }
1896
1897 switch (addr->ss_family) {
1898 case AF_INET:
1899 sin = (struct sockaddr_in *)addr;
1900 spin_lock_bh(&conn->session->lock);
1901 sprintf(buf, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr));
1902 *port = be16_to_cpu(sin->sin_port);
1903 spin_unlock_bh(&conn->session->lock);
1904 break;
1905 case AF_INET6:
1906 sin6 = (struct sockaddr_in6 *)addr;
1907 spin_lock_bh(&conn->session->lock);
1908 sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr));
1909 *port = be16_to_cpu(sin6->sin6_port);
1910 spin_unlock_bh(&conn->session->lock);
1911 break;
1912 }
1913free_addr:
1914 kfree(addr);
1915 return rc;
1916}
1917
1855static int 1918static int
1856iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session, 1919iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1857 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, 1920 struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -1869,10 +1932,24 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1869 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err); 1932 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
1870 return -EEXIST; 1933 return -EEXIST;
1871 } 1934 }
1935 /*
1936 * copy these values now because if we drop the session
1937 * userspace may still want to query the values since we will
1938 * be using them for the reconnect
1939 */
1940 err = iscsi_tcp_get_addr(conn, sock, conn->portal_address,
1941 &conn->portal_port, kernel_getpeername);
1942 if (err)
1943 goto free_socket;
1944
1945 err = iscsi_tcp_get_addr(conn, sock, conn->local_address,
1946 &conn->local_port, kernel_getsockname);
1947 if (err)
1948 goto free_socket;
1872 1949
1873 err = iscsi_conn_bind(cls_session, cls_conn, is_leading); 1950 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
1874 if (err) 1951 if (err)
1875 return err; 1952 goto free_socket;
1876 1953
1877 /* bind iSCSI connection and socket */ 1954 /* bind iSCSI connection and socket */
1878 tcp_conn->sock = sock; 1955 tcp_conn->sock = sock;
@@ -1896,25 +1973,19 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
1896 * set receive state machine into initial state 1973 * set receive state machine into initial state
1897 */ 1974 */
1898 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER; 1975 tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1899
1900 return 0; 1976 return 0;
1977
1978free_socket:
1979 sockfd_put(sock);
1980 return err;
1901} 1981}
1902 1982
1903/* called with host lock */ 1983/* called with host lock */
1904static void 1984static void
1905iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask, 1985iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1906 char *data, uint32_t data_size)
1907{ 1986{
1908 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data; 1987 struct iscsi_tcp_mgmt_task *tcp_mtask = mtask->dd_data;
1909 1988 tcp_mtask->xmstate = XMSTATE_IMM_HDR_INIT;
1910 iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
1911 sizeof(struct iscsi_hdr));
1912 tcp_mtask->xmstate = XMSTATE_IMM_HDR;
1913 tcp_mtask->sent = 0;
1914
1915 if (mtask->data_count)
1916 iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
1917 mtask->data_count);
1918} 1989}
1919 1990
1920static int 1991static int
@@ -2026,41 +2097,18 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
2026 enum iscsi_param param, char *buf) 2097 enum iscsi_param param, char *buf)
2027{ 2098{
2028 struct iscsi_conn *conn = cls_conn->dd_data; 2099 struct iscsi_conn *conn = cls_conn->dd_data;
2029 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2030 struct inet_sock *inet;
2031 struct ipv6_pinfo *np;
2032 struct sock *sk;
2033 int len; 2100 int len;
2034 2101
2035 switch(param) { 2102 switch(param) {
2036 case ISCSI_PARAM_CONN_PORT: 2103 case ISCSI_PARAM_CONN_PORT:
2037 mutex_lock(&conn->xmitmutex); 2104 spin_lock_bh(&conn->session->lock);
2038 if (!tcp_conn->sock) { 2105 len = sprintf(buf, "%hu\n", conn->portal_port);
2039 mutex_unlock(&conn->xmitmutex); 2106 spin_unlock_bh(&conn->session->lock);
2040 return -EINVAL;
2041 }
2042
2043 inet = inet_sk(tcp_conn->sock->sk);
2044 len = sprintf(buf, "%hu\n", be16_to_cpu(inet->dport));
2045 mutex_unlock(&conn->xmitmutex);
2046 break; 2107 break;
2047 case ISCSI_PARAM_CONN_ADDRESS: 2108 case ISCSI_PARAM_CONN_ADDRESS:
2048 mutex_lock(&conn->xmitmutex); 2109 spin_lock_bh(&conn->session->lock);
2049 if (!tcp_conn->sock) { 2110 len = sprintf(buf, "%s\n", conn->portal_address);
2050 mutex_unlock(&conn->xmitmutex); 2111 spin_unlock_bh(&conn->session->lock);
2051 return -EINVAL;
2052 }
2053
2054 sk = tcp_conn->sock->sk;
2055 if (sk->sk_family == PF_INET) {
2056 inet = inet_sk(sk);
2057 len = sprintf(buf, NIPQUAD_FMT "\n",
2058 NIPQUAD(inet->daddr));
2059 } else {
2060 np = inet6_sk(sk);
2061 len = sprintf(buf, NIP6_FMT "\n", NIP6(np->daddr));
2062 }
2063 mutex_unlock(&conn->xmitmutex);
2064 break; 2112 break;
2065 default: 2113 default:
2066 return iscsi_conn_get_param(cls_conn, param, buf); 2114 return iscsi_conn_get_param(cls_conn, param, buf);
@@ -2069,6 +2117,29 @@ iscsi_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
2069 return len; 2117 return len;
2070} 2118}
2071 2119
2120static int
2121iscsi_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2122 char *buf)
2123{
2124 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2125 int len;
2126
2127 switch (param) {
2128 case ISCSI_HOST_PARAM_IPADDRESS:
2129 spin_lock_bh(&session->lock);
2130 if (!session->leadconn)
2131 len = -ENODEV;
2132 else
2133 len = sprintf(buf, "%s\n",
2134 session->leadconn->local_address);
2135 spin_unlock_bh(&session->lock);
2136 break;
2137 default:
2138 return iscsi_host_get_param(shost, param, buf);
2139 }
2140 return len;
2141}
2142
2072static void 2143static void
2073iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) 2144iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
2074{ 2145{
@@ -2096,6 +2167,7 @@ iscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
2096static struct iscsi_cls_session * 2167static struct iscsi_cls_session *
2097iscsi_tcp_session_create(struct iscsi_transport *iscsit, 2168iscsi_tcp_session_create(struct iscsi_transport *iscsit,
2098 struct scsi_transport_template *scsit, 2169 struct scsi_transport_template *scsit,
2170 uint16_t cmds_max, uint16_t qdepth,
2099 uint32_t initial_cmdsn, uint32_t *hostno) 2171 uint32_t initial_cmdsn, uint32_t *hostno)
2100{ 2172{
2101 struct iscsi_cls_session *cls_session; 2173 struct iscsi_cls_session *cls_session;
@@ -2103,7 +2175,7 @@ iscsi_tcp_session_create(struct iscsi_transport *iscsit,
2103 uint32_t hn; 2175 uint32_t hn;
2104 int cmd_i; 2176 int cmd_i;
2105 2177
2106 cls_session = iscsi_session_setup(iscsit, scsit, 2178 cls_session = iscsi_session_setup(iscsit, scsit, cmds_max, qdepth,
2107 sizeof(struct iscsi_tcp_cmd_task), 2179 sizeof(struct iscsi_tcp_cmd_task),
2108 sizeof(struct iscsi_tcp_mgmt_task), 2180 sizeof(struct iscsi_tcp_mgmt_task),
2109 initial_cmdsn, &hn); 2181 initial_cmdsn, &hn);
@@ -2142,17 +2214,24 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
2142 iscsi_session_teardown(cls_session); 2214 iscsi_session_teardown(cls_session);
2143} 2215}
2144 2216
2217static int iscsi_tcp_slave_configure(struct scsi_device *sdev)
2218{
2219 blk_queue_dma_alignment(sdev->request_queue, 0);
2220 return 0;
2221}
2222
2145static struct scsi_host_template iscsi_sht = { 2223static struct scsi_host_template iscsi_sht = {
2146 .name = "iSCSI Initiator over TCP/IP", 2224 .name = "iSCSI Initiator over TCP/IP",
2147 .queuecommand = iscsi_queuecommand, 2225 .queuecommand = iscsi_queuecommand,
2148 .change_queue_depth = iscsi_change_queue_depth, 2226 .change_queue_depth = iscsi_change_queue_depth,
2149 .can_queue = ISCSI_XMIT_CMDS_MAX - 1, 2227 .can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
2150 .sg_tablesize = ISCSI_SG_TABLESIZE, 2228 .sg_tablesize = ISCSI_SG_TABLESIZE,
2151 .max_sectors = 0xFFFF, 2229 .max_sectors = 0xFFFF,
2152 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN, 2230 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
2153 .eh_abort_handler = iscsi_eh_abort, 2231 .eh_abort_handler = iscsi_eh_abort,
2154 .eh_host_reset_handler = iscsi_eh_host_reset, 2232 .eh_host_reset_handler = iscsi_eh_host_reset,
2155 .use_clustering = DISABLE_CLUSTERING, 2233 .use_clustering = DISABLE_CLUSTERING,
2234 .slave_configure = iscsi_tcp_slave_configure,
2156 .proc_name = "iscsi_tcp", 2235 .proc_name = "iscsi_tcp",
2157 .this_id = -1, 2236 .this_id = -1,
2158}; 2237};
@@ -2179,8 +2258,12 @@ static struct iscsi_transport iscsi_tcp_transport = {
2179 ISCSI_EXP_STATSN | 2258 ISCSI_EXP_STATSN |
2180 ISCSI_PERSISTENT_PORT | 2259 ISCSI_PERSISTENT_PORT |
2181 ISCSI_PERSISTENT_ADDRESS | 2260 ISCSI_PERSISTENT_ADDRESS |
2182 ISCSI_TARGET_NAME | 2261 ISCSI_TARGET_NAME | ISCSI_TPGT |
2183 ISCSI_TPGT, 2262 ISCSI_USERNAME | ISCSI_PASSWORD |
2263 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN,
2264 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
2265 ISCSI_HOST_INITIATOR_NAME |
2266 ISCSI_HOST_NETDEV_NAME,
2184 .host_template = &iscsi_sht, 2267 .host_template = &iscsi_sht,
2185 .conndata_size = sizeof(struct iscsi_conn), 2268 .conndata_size = sizeof(struct iscsi_conn),
2186 .max_conn = 1, 2269 .max_conn = 1,
@@ -2197,6 +2280,9 @@ static struct iscsi_transport iscsi_tcp_transport = {
2197 .get_session_param = iscsi_session_get_param, 2280 .get_session_param = iscsi_session_get_param,
2198 .start_conn = iscsi_conn_start, 2281 .start_conn = iscsi_conn_start,
2199 .stop_conn = iscsi_tcp_conn_stop, 2282 .stop_conn = iscsi_tcp_conn_stop,
2283 /* iscsi host params */
2284 .get_host_param = iscsi_tcp_host_get_param,
2285 .set_host_param = iscsi_host_set_param,
2200 /* IO */ 2286 /* IO */
2201 .send_pdu = iscsi_conn_send_pdu, 2287 .send_pdu = iscsi_conn_send_pdu,
2202 .get_stats = iscsi_conn_get_stats, 2288 .get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 32736831790e..7eba44df0a7f 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -29,11 +29,12 @@
29#define IN_PROGRESS_HEADER_GATHER 0x1 29#define IN_PROGRESS_HEADER_GATHER 0x1
30#define IN_PROGRESS_DATA_RECV 0x2 30#define IN_PROGRESS_DATA_RECV 0x2
31#define IN_PROGRESS_DDIGEST_RECV 0x3 31#define IN_PROGRESS_DDIGEST_RECV 0x3
32#define IN_PROGRESS_PAD_RECV 0x4
32 33
33/* xmit state machine */ 34/* xmit state machine */
34#define XMSTATE_IDLE 0x0 35#define XMSTATE_IDLE 0x0
35#define XMSTATE_R_HDR 0x1 36#define XMSTATE_CMD_HDR_INIT 0x1
36#define XMSTATE_W_HDR 0x2 37#define XMSTATE_CMD_HDR_XMIT 0x2
37#define XMSTATE_IMM_HDR 0x4 38#define XMSTATE_IMM_HDR 0x4
38#define XMSTATE_IMM_DATA 0x8 39#define XMSTATE_IMM_DATA 0x8
39#define XMSTATE_UNS_INIT 0x10 40#define XMSTATE_UNS_INIT 0x10
@@ -44,6 +45,8 @@
44#define XMSTATE_W_PAD 0x200 45#define XMSTATE_W_PAD 0x200
45#define XMSTATE_W_RESEND_PAD 0x400 46#define XMSTATE_W_RESEND_PAD 0x400
46#define XMSTATE_W_RESEND_DATA_DIGEST 0x800 47#define XMSTATE_W_RESEND_DATA_DIGEST 0x800
48#define XMSTATE_IMM_HDR_INIT 0x1000
49#define XMSTATE_SOL_HDR_INIT 0x2000
47 50
48#define ISCSI_PAD_LEN 4 51#define ISCSI_PAD_LEN 4
49#define ISCSI_SG_TABLESIZE SG_ALL 52#define ISCSI_SG_TABLESIZE SG_ALL
@@ -152,7 +155,7 @@ struct iscsi_tcp_cmd_task {
152 struct scatterlist *sg; /* per-cmd SG list */ 155 struct scatterlist *sg; /* per-cmd SG list */
153 struct scatterlist *bad_sg; /* assert statement */ 156 struct scatterlist *bad_sg; /* assert statement */
154 int sg_count; /* SG's to process */ 157 int sg_count; /* SG's to process */
155 uint32_t exp_r2tsn; 158 uint32_t exp_datasn; /* expected target's R2TSN/DataSN */
156 int data_offset; 159 int data_offset;
157 struct iscsi_r2t_info *r2t; /* in progress R2T */ 160 struct iscsi_r2t_info *r2t; /* in progress R2T */
158 struct iscsi_queue r2tpool; 161 struct iscsi_queue r2tpool;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 81e497d9eae0..5d231015bb20 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
1/* jazz_esp.c: ESP front-end for MIPS JAZZ systems. 1/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
2 * 2 *
3 * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende) 3 * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
@@ -143,7 +143,7 @@ static int __devinit esp_jazz_probe(struct platform_device *dev)
143 goto fail; 143 goto fail;
144 144
145 host->max_id = 8; 145 host->max_id = 8;
146 esp = host_to_esp(host); 146 esp = shost_priv(host);
147 147
148 esp->host = host; 148 esp->host = host;
149 esp->dev = dev; 149 esp->dev = dev;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3f5b9b445b29..4d85ce100192 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -22,7 +22,6 @@
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */ 23 */
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/mutex.h>
26#include <linux/kfifo.h> 25#include <linux/kfifo.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <asm/unaligned.h> 27#include <asm/unaligned.h>
@@ -46,27 +45,53 @@ class_to_transport_session(struct iscsi_cls_session *cls_session)
46} 45}
47EXPORT_SYMBOL_GPL(class_to_transport_session); 46EXPORT_SYMBOL_GPL(class_to_transport_session);
48 47
49#define INVALID_SN_DELTA 0xffff 48/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
49#define SNA32_CHECK 2147483648UL
50 50
51int 51static int iscsi_sna_lt(u32 n1, u32 n2)
52iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) 52{
53 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
54 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
55}
56
57/* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
58static int iscsi_sna_lte(u32 n1, u32 n2)
59{
60 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
61 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
62}
63
64void
65iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
53{ 66{
54 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn); 67 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
55 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn); 68 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
56 69
57 if (max_cmdsn < exp_cmdsn -1 && 70 /*
58 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA) 71 * standard specifies this check for when to update expected and
59 return ISCSI_ERR_MAX_CMDSN; 72 * max sequence numbers
60 if (max_cmdsn > session->max_cmdsn || 73 */
61 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA) 74 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
62 session->max_cmdsn = max_cmdsn; 75 return;
63 if (exp_cmdsn > session->exp_cmdsn || 76
64 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA) 77 if (exp_cmdsn != session->exp_cmdsn &&
78 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
65 session->exp_cmdsn = exp_cmdsn; 79 session->exp_cmdsn = exp_cmdsn;
66 80
67 return 0; 81 if (max_cmdsn != session->max_cmdsn &&
82 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
83 session->max_cmdsn = max_cmdsn;
84 /*
85 * if the window closed with IO queued, then kick the
86 * xmit thread
87 */
88 if (!list_empty(&session->leadconn->xmitqueue) ||
89 __kfifo_len(session->leadconn->mgmtqueue))
90 scsi_queue_work(session->host,
91 &session->leadconn->xmitwork);
92 }
68} 93}
69EXPORT_SYMBOL_GPL(iscsi_check_assign_cmdsn); 94EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
70 95
71void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask, 96void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
72 struct iscsi_data *hdr) 97 struct iscsi_data *hdr)
@@ -115,14 +140,17 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
115 hdr->flags = ISCSI_ATTR_SIMPLE; 140 hdr->flags = ISCSI_ATTR_SIMPLE;
116 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); 141 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
117 hdr->itt = build_itt(ctask->itt, conn->id, session->age); 142 hdr->itt = build_itt(ctask->itt, conn->id, session->age);
118 hdr->data_length = cpu_to_be32(sc->request_bufflen); 143 hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
119 hdr->cmdsn = cpu_to_be32(session->cmdsn); 144 hdr->cmdsn = cpu_to_be32(session->cmdsn);
120 session->cmdsn++; 145 session->cmdsn++;
121 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn); 146 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
122 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len); 147 memcpy(hdr->cdb, sc->cmnd, sc->cmd_len);
123 memset(&hdr->cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len); 148 if (sc->cmd_len < MAX_COMMAND_SIZE)
149 memset(&hdr->cdb[sc->cmd_len], 0,
150 MAX_COMMAND_SIZE - sc->cmd_len);
124 151
125 ctask->data_count = 0; 152 ctask->data_count = 0;
153 ctask->imm_count = 0;
126 if (sc->sc_data_direction == DMA_TO_DEVICE) { 154 if (sc->sc_data_direction == DMA_TO_DEVICE) {
127 hdr->flags |= ISCSI_FLAG_CMD_WRITE; 155 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
128 /* 156 /*
@@ -139,25 +167,24 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
139 * 167 *
140 * pad_count bytes to be sent as zero-padding 168 * pad_count bytes to be sent as zero-padding
141 */ 169 */
142 ctask->imm_count = 0;
143 ctask->unsol_count = 0; 170 ctask->unsol_count = 0;
144 ctask->unsol_offset = 0; 171 ctask->unsol_offset = 0;
145 ctask->unsol_datasn = 0; 172 ctask->unsol_datasn = 0;
146 173
147 if (session->imm_data_en) { 174 if (session->imm_data_en) {
148 if (ctask->total_length >= session->first_burst) 175 if (scsi_bufflen(sc) >= session->first_burst)
149 ctask->imm_count = min(session->first_burst, 176 ctask->imm_count = min(session->first_burst,
150 conn->max_xmit_dlength); 177 conn->max_xmit_dlength);
151 else 178 else
152 ctask->imm_count = min(ctask->total_length, 179 ctask->imm_count = min(scsi_bufflen(sc),
153 conn->max_xmit_dlength); 180 conn->max_xmit_dlength);
154 hton24(ctask->hdr->dlength, ctask->imm_count); 181 hton24(ctask->hdr->dlength, ctask->imm_count);
155 } else 182 } else
156 zero_data(ctask->hdr->dlength); 183 zero_data(ctask->hdr->dlength);
157 184
158 if (!session->initial_r2t_en) { 185 if (!session->initial_r2t_en) {
159 ctask->unsol_count = min(session->first_burst, 186 ctask->unsol_count = min((session->first_burst),
160 ctask->total_length) - ctask->imm_count; 187 (scsi_bufflen(sc))) - ctask->imm_count;
161 ctask->unsol_offset = ctask->imm_count; 188 ctask->unsol_offset = ctask->imm_count;
162 } 189 }
163 190
@@ -165,7 +192,6 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
165 /* No unsolicit Data-Out's */ 192 /* No unsolicit Data-Out's */
166 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL; 193 ctask->hdr->flags |= ISCSI_FLAG_CMD_FINAL;
167 } else { 194 } else {
168 ctask->datasn = 0;
169 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 195 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
170 zero_data(hdr->dlength); 196 zero_data(hdr->dlength);
171 197
@@ -174,8 +200,13 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
174 } 200 }
175 201
176 conn->scsicmd_pdus_cnt++; 202 conn->scsicmd_pdus_cnt++;
203
204 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x len %d "
205 "cmdsn %d win %d]\n",
206 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
207 conn->id, sc, sc->cmnd[0], ctask->itt, scsi_bufflen(sc),
208 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
177} 209}
178EXPORT_SYMBOL_GPL(iscsi_prep_scsi_cmd_pdu);
179 210
180/** 211/**
181 * iscsi_complete_command - return command back to scsi-ml 212 * iscsi_complete_command - return command back to scsi-ml
@@ -204,26 +235,12 @@ static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
204 atomic_inc(&ctask->refcount); 235 atomic_inc(&ctask->refcount);
205} 236}
206 237
207static void iscsi_get_ctask(struct iscsi_cmd_task *ctask)
208{
209 spin_lock_bh(&ctask->conn->session->lock);
210 __iscsi_get_ctask(ctask);
211 spin_unlock_bh(&ctask->conn->session->lock);
212}
213
214static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask) 238static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
215{ 239{
216 if (atomic_dec_and_test(&ctask->refcount)) 240 if (atomic_dec_and_test(&ctask->refcount))
217 iscsi_complete_command(ctask); 241 iscsi_complete_command(ctask);
218} 242}
219 243
220static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
221{
222 spin_lock_bh(&ctask->conn->session->lock);
223 __iscsi_put_ctask(ctask);
224 spin_unlock_bh(&ctask->conn->session->lock);
225}
226
227/** 244/**
228 * iscsi_cmd_rsp - SCSI Command Response processing 245 * iscsi_cmd_rsp - SCSI Command Response processing
229 * @conn: iscsi connection 246 * @conn: iscsi connection
@@ -235,21 +252,15 @@ static void iscsi_put_ctask(struct iscsi_cmd_task *ctask)
235 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and 252 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
236 * then completes the command and task. 253 * then completes the command and task.
237 **/ 254 **/
238static int iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 255static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
239 struct iscsi_cmd_task *ctask, char *data, 256 struct iscsi_cmd_task *ctask, char *data,
240 int datalen) 257 int datalen)
241{ 258{
242 int rc;
243 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr; 259 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
244 struct iscsi_session *session = conn->session; 260 struct iscsi_session *session = conn->session;
245 struct scsi_cmnd *sc = ctask->sc; 261 struct scsi_cmnd *sc = ctask->sc;
246 262
247 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr); 263 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
248 if (rc) {
249 sc->result = DID_ERROR << 16;
250 goto out;
251 }
252
253 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 264 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
254 265
255 sc->result = (DID_OK << 16) | rhdr->cmd_status; 266 sc->result = (DID_OK << 16) | rhdr->cmd_status;
@@ -286,14 +297,14 @@ invalid_datalen:
286 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) { 297 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
287 int res_count = be32_to_cpu(rhdr->residual_count); 298 int res_count = be32_to_cpu(rhdr->residual_count);
288 299
289 if (res_count > 0 && res_count <= sc->request_bufflen) 300 if (res_count > 0 && res_count <= scsi_bufflen(sc))
290 sc->resid = res_count; 301 scsi_set_resid(sc, res_count);
291 else 302 else
292 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 303 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
293 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW) 304 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
294 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; 305 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
295 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW) 306 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
296 sc->resid = be32_to_cpu(rhdr->residual_count); 307 scsi_set_resid(sc, be32_to_cpu(rhdr->residual_count));
297 308
298out: 309out:
299 debug_scsi("done [sc %lx res %d itt 0x%x]\n", 310 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
@@ -301,7 +312,6 @@ out:
301 conn->scsirsp_pdus_cnt++; 312 conn->scsirsp_pdus_cnt++;
302 313
303 __iscsi_put_ctask(ctask); 314 __iscsi_put_ctask(ctask);
304 return rc;
305} 315}
306 316
307static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) 317static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -381,8 +391,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
381 switch(opcode) { 391 switch(opcode) {
382 case ISCSI_OP_SCSI_CMD_RSP: 392 case ISCSI_OP_SCSI_CMD_RSP:
383 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 393 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
384 rc = iscsi_scsi_cmd_rsp(conn, hdr, ctask, data, 394 iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
385 datalen); 395 datalen);
386 break; 396 break;
387 case ISCSI_OP_SCSI_DATA_IN: 397 case ISCSI_OP_SCSI_DATA_IN:
388 BUG_ON((void*)ctask != ctask->sc->SCp.ptr); 398 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
@@ -405,11 +415,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
405 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n", 415 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
406 opcode, conn->id, mtask->itt, datalen); 416 opcode, conn->id, mtask->itt, datalen);
407 417
408 rc = iscsi_check_assign_cmdsn(session, 418 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
409 (struct iscsi_nopin*)hdr);
410 if (rc)
411 goto done;
412
413 switch(opcode) { 419 switch(opcode) {
414 case ISCSI_OP_LOGOUT_RSP: 420 case ISCSI_OP_LOGOUT_RSP:
415 if (datalen) { 421 if (datalen) {
@@ -458,10 +464,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
458 break; 464 break;
459 } 465 }
460 } else if (itt == ~0U) { 466 } else if (itt == ~0U) {
461 rc = iscsi_check_assign_cmdsn(session, 467 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
462 (struct iscsi_nopin*)hdr);
463 if (rc)
464 goto done;
465 468
466 switch(opcode) { 469 switch(opcode) {
467 case ISCSI_OP_NOOP_IN: 470 case ISCSI_OP_NOOP_IN:
@@ -491,7 +494,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
491 } else 494 } else
492 rc = ISCSI_ERR_BAD_ITT; 495 rc = ISCSI_ERR_BAD_ITT;
493 496
494done:
495 return rc; 497 return rc;
496} 498}
497EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); 499EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -578,17 +580,47 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
578} 580}
579EXPORT_SYMBOL_GPL(iscsi_conn_failure); 581EXPORT_SYMBOL_GPL(iscsi_conn_failure);
580 582
583static void iscsi_prep_mtask(struct iscsi_conn *conn,
584 struct iscsi_mgmt_task *mtask)
585{
586 struct iscsi_session *session = conn->session;
587 struct iscsi_hdr *hdr = mtask->hdr;
588 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
589
590 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
591 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
592 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
593 /*
594 * pre-format CmdSN for outgoing PDU.
595 */
596 nop->cmdsn = cpu_to_be32(session->cmdsn);
597 if (hdr->itt != RESERVED_ITT) {
598 hdr->itt = build_itt(mtask->itt, conn->id, session->age);
599 if (conn->c_stage == ISCSI_CONN_STARTED &&
600 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
601 session->cmdsn++;
602 }
603
604 if (session->tt->init_mgmt_task)
605 session->tt->init_mgmt_task(conn, mtask);
606
607 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
608 hdr->opcode, hdr->itt, mtask->data_count);
609}
610
581static int iscsi_xmit_mtask(struct iscsi_conn *conn) 611static int iscsi_xmit_mtask(struct iscsi_conn *conn)
582{ 612{
583 struct iscsi_hdr *hdr = conn->mtask->hdr; 613 struct iscsi_hdr *hdr = conn->mtask->hdr;
584 int rc, was_logout = 0; 614 int rc, was_logout = 0;
585 615
616 spin_unlock_bh(&conn->session->lock);
586 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) { 617 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) {
587 conn->session->state = ISCSI_STATE_IN_RECOVERY; 618 conn->session->state = ISCSI_STATE_IN_RECOVERY;
588 iscsi_block_session(session_to_cls(conn->session)); 619 iscsi_block_session(session_to_cls(conn->session));
589 was_logout = 1; 620 was_logout = 1;
590 } 621 }
591 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask); 622 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
623 spin_lock_bh(&conn->session->lock);
592 if (rc) 624 if (rc)
593 return rc; 625 return rc;
594 626
@@ -602,6 +634,45 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
602 return 0; 634 return 0;
603} 635}
604 636
637static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
638{
639 struct iscsi_session *session = conn->session;
640
641 /*
642 * Check for iSCSI window and take care of CmdSN wrap-around
643 */
644 if (!iscsi_sna_lte(session->cmdsn, session->max_cmdsn)) {
645 debug_scsi("iSCSI CmdSN closed. MaxCmdSN %u CmdSN %u\n",
646 session->max_cmdsn, session->cmdsn);
647 return -ENOSPC;
648 }
649 return 0;
650}
651
652static int iscsi_xmit_ctask(struct iscsi_conn *conn)
653{
654 struct iscsi_cmd_task *ctask = conn->ctask;
655 int rc = 0;
656
657 /*
658 * serialize with TMF AbortTask
659 */
660 if (ctask->state == ISCSI_TASK_ABORTING)
661 goto done;
662
663 __iscsi_get_ctask(ctask);
664 spin_unlock_bh(&conn->session->lock);
665 rc = conn->session->tt->xmit_cmd_task(conn, ctask);
666 spin_lock_bh(&conn->session->lock);
667 __iscsi_put_ctask(ctask);
668
669done:
670 if (!rc)
671 /* done with this ctask */
672 conn->ctask = NULL;
673 return rc;
674}
675
605/** 676/**
606 * iscsi_data_xmit - xmit any command into the scheduled connection 677 * iscsi_data_xmit - xmit any command into the scheduled connection
607 * @conn: iscsi connection 678 * @conn: iscsi connection
@@ -613,106 +684,79 @@ static int iscsi_xmit_mtask(struct iscsi_conn *conn)
613 **/ 684 **/
614static int iscsi_data_xmit(struct iscsi_conn *conn) 685static int iscsi_data_xmit(struct iscsi_conn *conn)
615{ 686{
616 struct iscsi_transport *tt;
617 int rc = 0; 687 int rc = 0;
618 688
689 spin_lock_bh(&conn->session->lock);
619 if (unlikely(conn->suspend_tx)) { 690 if (unlikely(conn->suspend_tx)) {
620 debug_scsi("conn %d Tx suspended!\n", conn->id); 691 debug_scsi("conn %d Tx suspended!\n", conn->id);
692 spin_unlock_bh(&conn->session->lock);
621 return -ENODATA; 693 return -ENODATA;
622 } 694 }
623 tt = conn->session->tt;
624
625 /*
626 * Transmit in the following order:
627 *
628 * 1) un-finished xmit (ctask or mtask)
629 * 2) immediate control PDUs
630 * 3) write data
631 * 4) SCSI commands
632 * 5) non-immediate control PDUs
633 *
634 * No need to lock around __kfifo_get as long as
635 * there's one producer and one consumer.
636 */
637
638 BUG_ON(conn->ctask && conn->mtask);
639 695
640 if (conn->ctask) { 696 if (conn->ctask) {
641 iscsi_get_ctask(conn->ctask); 697 rc = iscsi_xmit_ctask(conn);
642 rc = tt->xmit_cmd_task(conn, conn->ctask);
643 iscsi_put_ctask(conn->ctask);
644 if (rc) 698 if (rc)
645 goto again; 699 goto again;
646 /* done with this in-progress ctask */
647 conn->ctask = NULL;
648 } 700 }
701
649 if (conn->mtask) { 702 if (conn->mtask) {
650 rc = iscsi_xmit_mtask(conn); 703 rc = iscsi_xmit_mtask(conn);
651 if (rc) 704 if (rc)
652 goto again; 705 goto again;
653 } 706 }
654 707
655 /* process immediate first */ 708 /*
656 if (unlikely(__kfifo_len(conn->immqueue))) { 709 * process mgmt pdus like nops before commands since we should
657 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask, 710 * only have one nop-out as a ping from us and targets should not
658 sizeof(void*))) { 711 * overflow us with nop-ins
659 spin_lock_bh(&conn->session->lock); 712 */
660 list_add_tail(&conn->mtask->running, 713check_mgmt:
661 &conn->mgmt_run_list); 714 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
662 spin_unlock_bh(&conn->session->lock); 715 sizeof(void*))) {
663 rc = iscsi_xmit_mtask(conn); 716 iscsi_prep_mtask(conn, conn->mtask);
664 if (rc) 717 list_add_tail(&conn->mtask->running, &conn->mgmt_run_list);
665 goto again; 718 rc = iscsi_xmit_mtask(conn);
666 } 719 if (rc)
720 goto again;
667 } 721 }
668 722
669 /* process command queue */ 723 /* process command queue */
670 spin_lock_bh(&conn->session->lock);
671 while (!list_empty(&conn->xmitqueue)) { 724 while (!list_empty(&conn->xmitqueue)) {
725 rc = iscsi_check_cmdsn_window_closed(conn);
726 if (rc) {
727 spin_unlock_bh(&conn->session->lock);
728 return rc;
729 }
672 /* 730 /*
673 * iscsi tcp may readd the task to the xmitqueue to send 731 * iscsi tcp may readd the task to the xmitqueue to send
674 * write data 732 * write data
675 */ 733 */
676 conn->ctask = list_entry(conn->xmitqueue.next, 734 conn->ctask = list_entry(conn->xmitqueue.next,
677 struct iscsi_cmd_task, running); 735 struct iscsi_cmd_task, running);
736 if (conn->ctask->state == ISCSI_TASK_PENDING) {
737 iscsi_prep_scsi_cmd_pdu(conn->ctask);
738 conn->session->tt->init_cmd_task(conn->ctask);
739 }
678 conn->ctask->state = ISCSI_TASK_RUNNING; 740 conn->ctask->state = ISCSI_TASK_RUNNING;
679 list_move_tail(conn->xmitqueue.next, &conn->run_list); 741 list_move_tail(conn->xmitqueue.next, &conn->run_list);
680 __iscsi_get_ctask(conn->ctask); 742 rc = iscsi_xmit_ctask(conn);
681 spin_unlock_bh(&conn->session->lock); 743 if (rc)
682
683 rc = tt->xmit_cmd_task(conn, conn->ctask);
684
685 spin_lock_bh(&conn->session->lock);
686 __iscsi_put_ctask(conn->ctask);
687 if (rc) {
688 spin_unlock_bh(&conn->session->lock);
689 goto again; 744 goto again;
690 } 745 /*
746 * we could continuously get new ctask requests so
747 * we need to check the mgmt queue for nops that need to
748 * be sent to aviod starvation
749 */
750 if (__kfifo_len(conn->mgmtqueue))
751 goto check_mgmt;
691 } 752 }
692 spin_unlock_bh(&conn->session->lock); 753 spin_unlock_bh(&conn->session->lock);
693 /* done with this ctask */
694 conn->ctask = NULL;
695
696 /* process the rest control plane PDUs, if any */
697 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
698 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
699 sizeof(void*))) {
700 spin_lock_bh(&conn->session->lock);
701 list_add_tail(&conn->mtask->running,
702 &conn->mgmt_run_list);
703 spin_unlock_bh(&conn->session->lock);
704 rc = iscsi_xmit_mtask(conn);
705 if (rc)
706 goto again;
707 }
708 }
709
710 return -ENODATA; 754 return -ENODATA;
711 755
712again: 756again:
713 if (unlikely(conn->suspend_tx)) 757 if (unlikely(conn->suspend_tx))
714 return -ENODATA; 758 rc = -ENODATA;
715 759 spin_unlock_bh(&conn->session->lock);
716 return rc; 760 return rc;
717} 761}
718 762
@@ -724,11 +768,9 @@ static void iscsi_xmitworker(struct work_struct *work)
724 /* 768 /*
725 * serialize Xmit worker on a per-connection basis. 769 * serialize Xmit worker on a per-connection basis.
726 */ 770 */
727 mutex_lock(&conn->xmitmutex);
728 do { 771 do {
729 rc = iscsi_data_xmit(conn); 772 rc = iscsi_data_xmit(conn);
730 } while (rc >= 0 || rc == -EAGAIN); 773 } while (rc >= 0 || rc == -EAGAIN);
731 mutex_unlock(&conn->xmitmutex);
732} 774}
733 775
734enum { 776enum {
@@ -786,20 +828,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
786 goto fault; 828 goto fault;
787 } 829 }
788 830
789 /*
790 * Check for iSCSI window and take care of CmdSN wrap-around
791 */
792 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
793 reason = FAILURE_WINDOW_CLOSED;
794 goto reject;
795 }
796
797 conn = session->leadconn; 831 conn = session->leadconn;
798 if (!conn) { 832 if (!conn) {
799 reason = FAILURE_SESSION_FREED; 833 reason = FAILURE_SESSION_FREED;
800 goto fault; 834 goto fault;
801 } 835 }
802 836
837 /*
838 * We check this here and in data xmit, because if we get to the point
839 * that this check is hitting the window then we have enough IO in
840 * flight and enough IO waiting to be transmitted it is better
841 * to let the scsi/block layer queue up.
842 */
843 if (iscsi_check_cmdsn_window_closed(conn)) {
844 reason = FAILURE_WINDOW_CLOSED;
845 goto reject;
846 }
847
803 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask, 848 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
804 sizeof(void*))) { 849 sizeof(void*))) {
805 reason = FAILURE_OOM; 850 reason = FAILURE_OOM;
@@ -814,18 +859,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
814 ctask->conn = conn; 859 ctask->conn = conn;
815 ctask->sc = sc; 860 ctask->sc = sc;
816 INIT_LIST_HEAD(&ctask->running); 861 INIT_LIST_HEAD(&ctask->running);
817 ctask->total_length = sc->request_bufflen;
818 iscsi_prep_scsi_cmd_pdu(ctask);
819
820 session->tt->init_cmd_task(ctask);
821 862
822 list_add_tail(&ctask->running, &conn->xmitqueue); 863 list_add_tail(&ctask->running, &conn->xmitqueue);
823 debug_scsi(
824 "ctask enq [%s cid %d sc %p cdb 0x%x itt 0x%x len %d cmdsn %d "
825 "win %d]\n",
826 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
827 conn->id, sc, sc->cmnd[0], ctask->itt, sc->request_bufflen,
828 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
829 spin_unlock(&session->lock); 864 spin_unlock(&session->lock);
830 865
831 scsi_queue_work(host, &conn->xmitwork); 866 scsi_queue_work(host, &conn->xmitwork);
@@ -841,7 +876,7 @@ fault:
841 printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n", 876 printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
842 sc->cmnd[0], reason); 877 sc->cmnd[0], reason);
843 sc->result = (DID_NO_CONNECT << 16); 878 sc->result = (DID_NO_CONNECT << 16);
844 sc->resid = sc->request_bufflen; 879 scsi_set_resid(sc, scsi_bufflen(sc));
845 sc->scsi_done(sc); 880 sc->scsi_done(sc);
846 return 0; 881 return 0;
847} 882}
@@ -856,19 +891,16 @@ int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
856} 891}
857EXPORT_SYMBOL_GPL(iscsi_change_queue_depth); 892EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
858 893
859static int 894static struct iscsi_mgmt_task *
860iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 895__iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
861 char *data, uint32_t data_size) 896 char *data, uint32_t data_size)
862{ 897{
863 struct iscsi_session *session = conn->session; 898 struct iscsi_session *session = conn->session;
864 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
865 struct iscsi_mgmt_task *mtask; 899 struct iscsi_mgmt_task *mtask;
866 900
867 spin_lock_bh(&session->lock); 901 if (session->state == ISCSI_STATE_TERMINATE)
868 if (session->state == ISCSI_STATE_TERMINATE) { 902 return NULL;
869 spin_unlock_bh(&session->lock); 903
870 return -EPERM;
871 }
872 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) || 904 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
873 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE)) 905 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
874 /* 906 /*
@@ -882,27 +914,11 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
882 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); 914 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
883 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); 915 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
884 916
885 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
886 if (!__kfifo_get(session->mgmtpool.queue, 917 if (!__kfifo_get(session->mgmtpool.queue,
887 (void*)&mtask, sizeof(void*))) { 918 (void*)&mtask, sizeof(void*)))
888 spin_unlock_bh(&session->lock); 919 return NULL;
889 return -ENOSPC;
890 }
891 } 920 }
892 921
893 /*
894 * pre-format CmdSN for outgoing PDU.
895 */
896 if (hdr->itt != RESERVED_ITT) {
897 hdr->itt = build_itt(mtask->itt, conn->id, session->age);
898 nop->cmdsn = cpu_to_be32(session->cmdsn);
899 if (conn->c_stage == ISCSI_CONN_STARTED &&
900 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
901 session->cmdsn++;
902 } else
903 /* do not advance CmdSN */
904 nop->cmdsn = cpu_to_be32(session->cmdsn);
905
906 if (data_size) { 922 if (data_size) {
907 memcpy(mtask->data, data, data_size); 923 memcpy(mtask->data, data, data_size);
908 mtask->data_count = data_size; 924 mtask->data_count = data_size;
@@ -911,38 +927,23 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
911 927
912 INIT_LIST_HEAD(&mtask->running); 928 INIT_LIST_HEAD(&mtask->running);
913 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr)); 929 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
914 if (session->tt->init_mgmt_task) 930 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
915 session->tt->init_mgmt_task(conn, mtask, data, data_size); 931 return mtask;
916 spin_unlock_bh(&session->lock);
917
918 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
919 hdr->opcode, hdr->itt, data_size);
920
921 /*
922 * since send_pdu() could be called at least from two contexts,
923 * we need to serialize __kfifo_put, so we don't have to take
924 * additional lock on fast data-path
925 */
926 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
927 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
928 else
929 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
930
931 scsi_queue_work(session->host, &conn->xmitwork);
932 return 0;
933} 932}
934 933
935int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, 934int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
936 char *data, uint32_t data_size) 935 char *data, uint32_t data_size)
937{ 936{
938 struct iscsi_conn *conn = cls_conn->dd_data; 937 struct iscsi_conn *conn = cls_conn->dd_data;
939 int rc; 938 struct iscsi_session *session = conn->session;
940 939 int err = 0;
941 mutex_lock(&conn->xmitmutex);
942 rc = iscsi_conn_send_generic(conn, hdr, data, data_size);
943 mutex_unlock(&conn->xmitmutex);
944 940
945 return rc; 941 spin_lock_bh(&session->lock);
942 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
943 err = -EPERM;
944 spin_unlock_bh(&session->lock);
945 scsi_queue_work(session->host, &conn->xmitwork);
946 return err;
946} 947}
947EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); 948EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
948 949
@@ -1027,14 +1028,12 @@ static void iscsi_tmabort_timedout(unsigned long data)
1027 spin_unlock(&session->lock); 1028 spin_unlock(&session->lock);
1028} 1029}
1029 1030
1030/* must be called with the mutex lock */
1031static int iscsi_exec_abort_task(struct scsi_cmnd *sc, 1031static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1032 struct iscsi_cmd_task *ctask) 1032 struct iscsi_cmd_task *ctask)
1033{ 1033{
1034 struct iscsi_conn *conn = ctask->conn; 1034 struct iscsi_conn *conn = ctask->conn;
1035 struct iscsi_session *session = conn->session; 1035 struct iscsi_session *session = conn->session;
1036 struct iscsi_tm *hdr = &conn->tmhdr; 1036 struct iscsi_tm *hdr = &conn->tmhdr;
1037 int rc;
1038 1037
1039 /* 1038 /*
1040 * ctask timed out but session is OK requests must be serialized. 1039 * ctask timed out but session is OK requests must be serialized.
@@ -1047,32 +1046,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1047 hdr->rtt = ctask->hdr->itt; 1046 hdr->rtt = ctask->hdr->itt;
1048 hdr->refcmdsn = ctask->hdr->cmdsn; 1047 hdr->refcmdsn = ctask->hdr->cmdsn;
1049 1048
1050 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr, 1049 ctask->mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1051 NULL, 0); 1050 NULL, 0);
1052 if (rc) { 1051 if (!ctask->mtask) {
1053 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1052 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1054 debug_scsi("abort sent failure [itt 0x%x] %d\n", ctask->itt, 1053 debug_scsi("abort sent failure [itt 0x%x]\n", ctask->itt);
1055 rc); 1054 return -EPERM;
1056 return rc;
1057 } 1055 }
1056 ctask->state = ISCSI_TASK_ABORTING;
1058 1057
1059 debug_scsi("abort sent [itt 0x%x]\n", ctask->itt); 1058 debug_scsi("abort sent [itt 0x%x]\n", ctask->itt);
1060 1059
1061 spin_lock_bh(&session->lock);
1062 ctask->mtask = (struct iscsi_mgmt_task *)
1063 session->mgmt_cmds[get_itt(hdr->itt) -
1064 ISCSI_MGMT_ITT_OFFSET];
1065
1066 if (conn->tmabort_state == TMABORT_INITIAL) { 1060 if (conn->tmabort_state == TMABORT_INITIAL) {
1067 conn->tmfcmd_pdus_cnt++; 1061 conn->tmfcmd_pdus_cnt++;
1068 conn->tmabort_timer.expires = 10*HZ + jiffies; 1062 conn->tmabort_timer.expires = 20*HZ + jiffies;
1069 conn->tmabort_timer.function = iscsi_tmabort_timedout; 1063 conn->tmabort_timer.function = iscsi_tmabort_timedout;
1070 conn->tmabort_timer.data = (unsigned long)ctask; 1064 conn->tmabort_timer.data = (unsigned long)ctask;
1071 add_timer(&conn->tmabort_timer); 1065 add_timer(&conn->tmabort_timer);
1072 debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt); 1066 debug_scsi("abort set timeout [itt 0x%x]\n", ctask->itt);
1073 } 1067 }
1074 spin_unlock_bh(&session->lock); 1068 spin_unlock_bh(&session->lock);
1075 mutex_unlock(&conn->xmitmutex); 1069 scsi_queue_work(session->host, &conn->xmitwork);
1076 1070
1077 /* 1071 /*
1078 * block eh thread until: 1072 * block eh thread until:
@@ -1089,13 +1083,12 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
1089 if (signal_pending(current)) 1083 if (signal_pending(current))
1090 flush_signals(current); 1084 flush_signals(current);
1091 del_timer_sync(&conn->tmabort_timer); 1085 del_timer_sync(&conn->tmabort_timer);
1092 1086 spin_lock_bh(&session->lock);
1093 mutex_lock(&conn->xmitmutex);
1094 return 0; 1087 return 0;
1095} 1088}
1096 1089
1097/* 1090/*
1098 * xmit mutex and session lock must be held 1091 * session lock must be held
1099 */ 1092 */
1100static struct iscsi_mgmt_task * 1093static struct iscsi_mgmt_task *
1101iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt) 1094iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
@@ -1127,7 +1120,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1127 if (!ctask->mtask) 1120 if (!ctask->mtask)
1128 return -EINVAL; 1121 return -EINVAL;
1129 1122
1130 if (!iscsi_remove_mgmt_task(conn->immqueue, ctask->mtask->itt)) 1123 if (!iscsi_remove_mgmt_task(conn->mgmtqueue, ctask->mtask->itt))
1131 list_del(&ctask->mtask->running); 1124 list_del(&ctask->mtask->running);
1132 __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask, 1125 __kfifo_put(session->mgmtpool.queue, (void*)&ctask->mtask,
1133 sizeof(void*)); 1126 sizeof(void*));
@@ -1136,7 +1129,7 @@ static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
1136} 1129}
1137 1130
1138/* 1131/*
1139 * session lock and xmitmutex must be held 1132 * session lock must be held
1140 */ 1133 */
1141static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, 1134static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1142 int err) 1135 int err)
@@ -1147,11 +1140,14 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1147 if (!sc) 1140 if (!sc)
1148 return; 1141 return;
1149 1142
1150 conn->session->tt->cleanup_cmd_task(conn, ctask); 1143 if (ctask->state != ISCSI_TASK_PENDING)
1144 conn->session->tt->cleanup_cmd_task(conn, ctask);
1151 iscsi_ctask_mtask_cleanup(ctask); 1145 iscsi_ctask_mtask_cleanup(ctask);
1152 1146
1153 sc->result = err; 1147 sc->result = err;
1154 sc->resid = sc->request_bufflen; 1148 scsi_set_resid(sc, scsi_bufflen(sc));
1149 if (conn->ctask == ctask)
1150 conn->ctask = NULL;
1155 /* release ref from queuecommand */ 1151 /* release ref from queuecommand */
1156 __iscsi_put_ctask(ctask); 1152 __iscsi_put_ctask(ctask);
1157} 1153}
@@ -1179,7 +1175,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1179 conn->eh_abort_cnt++; 1175 conn->eh_abort_cnt++;
1180 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt); 1176 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1181 1177
1182 mutex_lock(&conn->xmitmutex);
1183 spin_lock_bh(&session->lock); 1178 spin_lock_bh(&session->lock);
1184 1179
1185 /* 1180 /*
@@ -1192,9 +1187,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1192 1187
1193 /* ctask completed before time out */ 1188 /* ctask completed before time out */
1194 if (!ctask->sc) { 1189 if (!ctask->sc) {
1195 spin_unlock_bh(&session->lock);
1196 debug_scsi("sc completed while abort in progress\n"); 1190 debug_scsi("sc completed while abort in progress\n");
1197 goto success_rel_mutex; 1191 goto success;
1198 } 1192 }
1199 1193
1200 /* what should we do here ? */ 1194 /* what should we do here ? */
@@ -1204,15 +1198,13 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1204 goto failed; 1198 goto failed;
1205 } 1199 }
1206 1200
1207 if (ctask->state == ISCSI_TASK_PENDING) 1201 if (ctask->state == ISCSI_TASK_PENDING) {
1208 goto success_cleanup; 1202 fail_command(conn, ctask, DID_ABORT << 16);
1203 goto success;
1204 }
1209 1205
1210 conn->tmabort_state = TMABORT_INITIAL; 1206 conn->tmabort_state = TMABORT_INITIAL;
1211
1212 spin_unlock_bh(&session->lock);
1213 rc = iscsi_exec_abort_task(sc, ctask); 1207 rc = iscsi_exec_abort_task(sc, ctask);
1214 spin_lock_bh(&session->lock);
1215
1216 if (rc || sc->SCp.phase != session->age || 1208 if (rc || sc->SCp.phase != session->age ||
1217 session->state != ISCSI_STATE_LOGGED_IN) 1209 session->state != ISCSI_STATE_LOGGED_IN)
1218 goto failed; 1210 goto failed;
@@ -1220,45 +1212,44 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
1220 1212
1221 switch (conn->tmabort_state) { 1213 switch (conn->tmabort_state) {
1222 case TMABORT_SUCCESS: 1214 case TMABORT_SUCCESS:
1223 goto success_cleanup; 1215 spin_unlock_bh(&session->lock);
1216 /*
1217 * clean up task if aborted. grab the recv lock as a writer
1218 */
1219 write_lock_bh(conn->recv_lock);
1220 spin_lock(&session->lock);
1221 fail_command(conn, ctask, DID_ABORT << 16);
1222 spin_unlock(&session->lock);
1223 write_unlock_bh(conn->recv_lock);
1224 /*
1225 * make sure xmit thread is not still touching the
1226 * ctask/scsi_cmnd
1227 */
1228 scsi_flush_work(session->host);
1229 goto success_unlocked;
1224 case TMABORT_NOT_FOUND: 1230 case TMABORT_NOT_FOUND:
1225 if (!ctask->sc) { 1231 if (!ctask->sc) {
1226 /* ctask completed before tmf abort response */ 1232 /* ctask completed before tmf abort response */
1227 spin_unlock_bh(&session->lock);
1228 debug_scsi("sc completed while abort in progress\n"); 1233 debug_scsi("sc completed while abort in progress\n");
1229 goto success_rel_mutex; 1234 goto success;
1230 } 1235 }
1231 /* fall through */ 1236 /* fall through */
1232 default: 1237 default:
1233 /* timedout or failed */ 1238 /* timedout or failed */
1234 spin_unlock_bh(&session->lock); 1239 spin_unlock_bh(&session->lock);
1235 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 1240 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1236 spin_lock_bh(&session->lock); 1241 goto failed_unlocked;
1237 goto failed;
1238 } 1242 }
1239 1243
1240success_cleanup: 1244success:
1241 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1242 spin_unlock_bh(&session->lock); 1245 spin_unlock_bh(&session->lock);
1243 1246success_unlocked:
1244 /* 1247 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1245 * clean up task if aborted. we have the xmitmutex so grab
1246 * the recv lock as a writer
1247 */
1248 write_lock_bh(conn->recv_lock);
1249 spin_lock(&session->lock);
1250 fail_command(conn, ctask, DID_ABORT << 16);
1251 spin_unlock(&session->lock);
1252 write_unlock_bh(conn->recv_lock);
1253
1254success_rel_mutex:
1255 mutex_unlock(&conn->xmitmutex);
1256 return SUCCESS; 1248 return SUCCESS;
1257 1249
1258failed: 1250failed:
1259 spin_unlock_bh(&session->lock); 1251 spin_unlock_bh(&session->lock);
1260 mutex_unlock(&conn->xmitmutex); 1252failed_unlocked:
1261
1262 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt); 1253 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1263 return FAILED; 1254 return FAILED;
1264} 1255}
@@ -1339,6 +1330,10 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
1339 * iscsi_session_setup - create iscsi cls session and host and session 1330 * iscsi_session_setup - create iscsi cls session and host and session
1340 * @scsit: scsi transport template 1331 * @scsit: scsi transport template
1341 * @iscsit: iscsi transport template 1332 * @iscsit: iscsi transport template
1333 * @cmds_max: scsi host can queue
1334 * @qdepth: scsi host cmds per lun
1335 * @cmd_task_size: LLD ctask private data size
1336 * @mgmt_task_size: LLD mtask private data size
1342 * @initial_cmdsn: initial CmdSN 1337 * @initial_cmdsn: initial CmdSN
1343 * @hostno: host no allocated 1338 * @hostno: host no allocated
1344 * 1339 *
@@ -1348,6 +1343,7 @@ EXPORT_SYMBOL_GPL(iscsi_pool_free);
1348struct iscsi_cls_session * 1343struct iscsi_cls_session *
1349iscsi_session_setup(struct iscsi_transport *iscsit, 1344iscsi_session_setup(struct iscsi_transport *iscsit,
1350 struct scsi_transport_template *scsit, 1345 struct scsi_transport_template *scsit,
1346 uint16_t cmds_max, uint16_t qdepth,
1351 int cmd_task_size, int mgmt_task_size, 1347 int cmd_task_size, int mgmt_task_size,
1352 uint32_t initial_cmdsn, uint32_t *hostno) 1348 uint32_t initial_cmdsn, uint32_t *hostno)
1353{ 1349{
@@ -1356,11 +1352,32 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1356 struct iscsi_cls_session *cls_session; 1352 struct iscsi_cls_session *cls_session;
1357 int cmd_i; 1353 int cmd_i;
1358 1354
1355 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
1356 if (qdepth != 0)
1357 printk(KERN_ERR "iscsi: invalid queue depth of %d. "
1358 "Queue depth must be between 1 and %d.\n",
1359 qdepth, ISCSI_MAX_CMD_PER_LUN);
1360 qdepth = ISCSI_DEF_CMD_PER_LUN;
1361 }
1362
1363 if (cmds_max < 2 || (cmds_max & (cmds_max - 1)) ||
1364 cmds_max >= ISCSI_MGMT_ITT_OFFSET) {
1365 if (cmds_max != 0)
1366 printk(KERN_ERR "iscsi: invalid can_queue of %d. "
1367 "can_queue must be a power of 2 and between "
1368 "2 and %d - setting to %d.\n", cmds_max,
1369 ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
1370 cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
1371 }
1372
1359 shost = scsi_host_alloc(iscsit->host_template, 1373 shost = scsi_host_alloc(iscsit->host_template,
1360 hostdata_privsize(sizeof(*session))); 1374 hostdata_privsize(sizeof(*session)));
1361 if (!shost) 1375 if (!shost)
1362 return NULL; 1376 return NULL;
1363 1377
1378 /* the iscsi layer takes one task for reserve */
1379 shost->can_queue = cmds_max - 1;
1380 shost->cmd_per_lun = qdepth;
1364 shost->max_id = 1; 1381 shost->max_id = 1;
1365 shost->max_channel = 0; 1382 shost->max_channel = 0;
1366 shost->max_lun = iscsit->max_lun; 1383 shost->max_lun = iscsit->max_lun;
@@ -1374,7 +1391,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
1374 session->host = shost; 1391 session->host = shost;
1375 session->state = ISCSI_STATE_FREE; 1392 session->state = ISCSI_STATE_FREE;
1376 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX; 1393 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
1377 session->cmds_max = ISCSI_XMIT_CMDS_MAX; 1394 session->cmds_max = cmds_max;
1378 session->cmdsn = initial_cmdsn; 1395 session->cmdsn = initial_cmdsn;
1379 session->exp_cmdsn = initial_cmdsn + 1; 1396 session->exp_cmdsn = initial_cmdsn + 1;
1380 session->max_cmdsn = initial_cmdsn + 1; 1397 session->max_cmdsn = initial_cmdsn + 1;
@@ -1461,7 +1478,14 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1461 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds); 1478 iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
1462 iscsi_pool_free(&session->cmdpool, (void**)session->cmds); 1479 iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
1463 1480
1481 kfree(session->password);
1482 kfree(session->password_in);
1483 kfree(session->username);
1484 kfree(session->username_in);
1464 kfree(session->targetname); 1485 kfree(session->targetname);
1486 kfree(session->netdev);
1487 kfree(session->hwaddress);
1488 kfree(session->initiatorname);
1465 1489
1466 iscsi_destroy_session(cls_session); 1490 iscsi_destroy_session(cls_session);
1467 scsi_host_put(shost); 1491 scsi_host_put(shost);
@@ -1499,11 +1523,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1499 INIT_LIST_HEAD(&conn->xmitqueue); 1523 INIT_LIST_HEAD(&conn->xmitqueue);
1500 1524
1501 /* initialize general immediate & non-immediate PDU commands queue */ 1525 /* initialize general immediate & non-immediate PDU commands queue */
1502 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
1503 GFP_KERNEL, NULL);
1504 if (conn->immqueue == ERR_PTR(-ENOMEM))
1505 goto immqueue_alloc_fail;
1506
1507 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*), 1526 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
1508 GFP_KERNEL, NULL); 1527 GFP_KERNEL, NULL);
1509 if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) 1528 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
@@ -1527,7 +1546,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1527 conn->login_mtask->data = conn->data = data; 1546 conn->login_mtask->data = conn->data = data;
1528 1547
1529 init_timer(&conn->tmabort_timer); 1548 init_timer(&conn->tmabort_timer);
1530 mutex_init(&conn->xmitmutex);
1531 init_waitqueue_head(&conn->ehwait); 1549 init_waitqueue_head(&conn->ehwait);
1532 1550
1533 return cls_conn; 1551 return cls_conn;
@@ -1538,8 +1556,6 @@ login_mtask_data_alloc_fail:
1538login_mtask_alloc_fail: 1556login_mtask_alloc_fail:
1539 kfifo_free(conn->mgmtqueue); 1557 kfifo_free(conn->mgmtqueue);
1540mgmtqueue_alloc_fail: 1558mgmtqueue_alloc_fail:
1541 kfifo_free(conn->immqueue);
1542immqueue_alloc_fail:
1543 iscsi_destroy_conn(cls_conn); 1559 iscsi_destroy_conn(cls_conn);
1544 return NULL; 1560 return NULL;
1545} 1561}
@@ -1558,10 +1574,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1558 struct iscsi_session *session = conn->session; 1574 struct iscsi_session *session = conn->session;
1559 unsigned long flags; 1575 unsigned long flags;
1560 1576
1561 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1562 mutex_lock(&conn->xmitmutex);
1563
1564 spin_lock_bh(&session->lock); 1577 spin_lock_bh(&session->lock);
1578 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1565 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 1579 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
1566 if (session->leadconn == conn) { 1580 if (session->leadconn == conn) {
1567 /* 1581 /*
@@ -1572,8 +1586,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1572 } 1586 }
1573 spin_unlock_bh(&session->lock); 1587 spin_unlock_bh(&session->lock);
1574 1588
1575 mutex_unlock(&conn->xmitmutex);
1576
1577 /* 1589 /*
1578 * Block until all in-progress commands for this connection 1590 * Block until all in-progress commands for this connection
1579 * time out or fail. 1591 * time out or fail.
@@ -1610,7 +1622,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
1610 } 1622 }
1611 spin_unlock_bh(&session->lock); 1623 spin_unlock_bh(&session->lock);
1612 1624
1613 kfifo_free(conn->immqueue);
1614 kfifo_free(conn->mgmtqueue); 1625 kfifo_free(conn->mgmtqueue);
1615 1626
1616 iscsi_destroy_conn(cls_conn); 1627 iscsi_destroy_conn(cls_conn);
@@ -1671,8 +1682,7 @@ flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
1671 struct iscsi_mgmt_task *mtask, *tmp; 1682 struct iscsi_mgmt_task *mtask, *tmp;
1672 1683
1673 /* handle pending */ 1684 /* handle pending */
1674 while (__kfifo_get(conn->immqueue, (void*)&mtask, sizeof(void*)) || 1685 while (__kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
1675 __kfifo_get(conn->mgmtqueue, (void*)&mtask, sizeof(void*))) {
1676 if (mtask == conn->login_mtask) 1686 if (mtask == conn->login_mtask)
1677 continue; 1687 continue;
1678 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt); 1688 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
@@ -1742,12 +1752,12 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1742 conn->c_stage = ISCSI_CONN_STOPPED; 1752 conn->c_stage = ISCSI_CONN_STOPPED;
1743 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 1753 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1744 spin_unlock_bh(&session->lock); 1754 spin_unlock_bh(&session->lock);
1755 scsi_flush_work(session->host);
1745 1756
1746 write_lock_bh(conn->recv_lock); 1757 write_lock_bh(conn->recv_lock);
1747 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 1758 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1748 write_unlock_bh(conn->recv_lock); 1759 write_unlock_bh(conn->recv_lock);
1749 1760
1750 mutex_lock(&conn->xmitmutex);
1751 /* 1761 /*
1752 * for connection level recovery we should not calculate 1762 * for connection level recovery we should not calculate
1753 * header digest. conn->hdr_size used for optimization 1763 * header digest. conn->hdr_size used for optimization
@@ -1771,8 +1781,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
1771 fail_all_commands(conn); 1781 fail_all_commands(conn);
1772 flush_control_queues(session, conn); 1782 flush_control_queues(session, conn);
1773 spin_unlock_bh(&session->lock); 1783 spin_unlock_bh(&session->lock);
1774
1775 mutex_unlock(&conn->xmitmutex);
1776} 1784}
1777 1785
1778void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 1786void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
@@ -1867,6 +1875,30 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
1867 case ISCSI_PARAM_EXP_STATSN: 1875 case ISCSI_PARAM_EXP_STATSN:
1868 sscanf(buf, "%u", &conn->exp_statsn); 1876 sscanf(buf, "%u", &conn->exp_statsn);
1869 break; 1877 break;
1878 case ISCSI_PARAM_USERNAME:
1879 kfree(session->username);
1880 session->username = kstrdup(buf, GFP_KERNEL);
1881 if (!session->username)
1882 return -ENOMEM;
1883 break;
1884 case ISCSI_PARAM_USERNAME_IN:
1885 kfree(session->username_in);
1886 session->username_in = kstrdup(buf, GFP_KERNEL);
1887 if (!session->username_in)
1888 return -ENOMEM;
1889 break;
1890 case ISCSI_PARAM_PASSWORD:
1891 kfree(session->password);
1892 session->password = kstrdup(buf, GFP_KERNEL);
1893 if (!session->password)
1894 return -ENOMEM;
1895 break;
1896 case ISCSI_PARAM_PASSWORD_IN:
1897 kfree(session->password_in);
1898 session->password_in = kstrdup(buf, GFP_KERNEL);
1899 if (!session->password_in)
1900 return -ENOMEM;
1901 break;
1870 case ISCSI_PARAM_TARGET_NAME: 1902 case ISCSI_PARAM_TARGET_NAME:
1871 /* this should not change between logins */ 1903 /* this should not change between logins */
1872 if (session->targetname) 1904 if (session->targetname)
@@ -1940,6 +1972,18 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
1940 case ISCSI_PARAM_TPGT: 1972 case ISCSI_PARAM_TPGT:
1941 len = sprintf(buf, "%d\n", session->tpgt); 1973 len = sprintf(buf, "%d\n", session->tpgt);
1942 break; 1974 break;
1975 case ISCSI_PARAM_USERNAME:
1976 len = sprintf(buf, "%s\n", session->username);
1977 break;
1978 case ISCSI_PARAM_USERNAME_IN:
1979 len = sprintf(buf, "%s\n", session->username_in);
1980 break;
1981 case ISCSI_PARAM_PASSWORD:
1982 len = sprintf(buf, "%s\n", session->password);
1983 break;
1984 case ISCSI_PARAM_PASSWORD_IN:
1985 len = sprintf(buf, "%s\n", session->password_in);
1986 break;
1943 default: 1987 default:
1944 return -ENOSYS; 1988 return -ENOSYS;
1945 } 1989 }
@@ -1990,6 +2034,66 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
1990} 2034}
1991EXPORT_SYMBOL_GPL(iscsi_conn_get_param); 2035EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
1992 2036
2037int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2038 char *buf)
2039{
2040 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2041 int len;
2042
2043 switch (param) {
2044 case ISCSI_HOST_PARAM_NETDEV_NAME:
2045 if (!session->netdev)
2046 len = sprintf(buf, "%s\n", "default");
2047 else
2048 len = sprintf(buf, "%s\n", session->netdev);
2049 break;
2050 case ISCSI_HOST_PARAM_HWADDRESS:
2051 if (!session->hwaddress)
2052 len = sprintf(buf, "%s\n", "default");
2053 else
2054 len = sprintf(buf, "%s\n", session->hwaddress);
2055 break;
2056 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2057 if (!session->initiatorname)
2058 len = sprintf(buf, "%s\n", "unknown");
2059 else
2060 len = sprintf(buf, "%s\n", session->initiatorname);
2061 break;
2062
2063 default:
2064 return -ENOSYS;
2065 }
2066
2067 return len;
2068}
2069EXPORT_SYMBOL_GPL(iscsi_host_get_param);
2070
2071int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2072 char *buf, int buflen)
2073{
2074 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2075
2076 switch (param) {
2077 case ISCSI_HOST_PARAM_NETDEV_NAME:
2078 if (!session->netdev)
2079 session->netdev = kstrdup(buf, GFP_KERNEL);
2080 break;
2081 case ISCSI_HOST_PARAM_HWADDRESS:
2082 if (!session->hwaddress)
2083 session->hwaddress = kstrdup(buf, GFP_KERNEL);
2084 break;
2085 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2086 if (!session->initiatorname)
2087 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2088 break;
2089 default:
2090 return -ENOSYS;
2091 }
2092
2093 return 0;
2094}
2095EXPORT_SYMBOL_GPL(iscsi_host_set_param);
2096
1993MODULE_AUTHOR("Mike Christie"); 2097MODULE_AUTHOR("Mike Christie");
1994MODULE_DESCRIPTION("iSCSI library functions"); 2098MODULE_DESCRIPTION("iSCSI library functions");
1995MODULE_LICENSE("GPL"); 2099MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b4b52694497c..d70ddfda93fc 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -76,8 +76,8 @@ static void sas_scsi_task_done(struct sas_task *task)
76 hs = DID_NO_CONNECT; 76 hs = DID_NO_CONNECT;
77 break; 77 break;
78 case SAS_DATA_UNDERRUN: 78 case SAS_DATA_UNDERRUN:
79 sc->resid = ts->residual; 79 scsi_set_resid(sc, ts->residual);
80 if (sc->request_bufflen - sc->resid < sc->underflow) 80 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
81 hs = DID_ERROR; 81 hs = DID_ERROR;
82 break; 82 break;
83 case SAS_DATA_OVERRUN: 83 case SAS_DATA_OVERRUN:
@@ -161,9 +161,9 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
161 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); 161 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
162 memcpy(task->ssp_task.cdb, cmd->cmnd, 16); 162 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
163 163
164 task->scatter = cmd->request_buffer; 164 task->scatter = scsi_sglist(cmd);
165 task->num_scatter = cmd->use_sg; 165 task->num_scatter = scsi_sg_count(cmd);
166 task->total_xfer_len = cmd->request_bufflen; 166 task->total_xfer_len = scsi_bufflen(cmd);
167 task->data_dir = cmd->sc_data_direction; 167 task->data_dir = cmd->sc_data_direction;
168 168
169 task->task_done = sas_scsi_task_done; 169 task->task_done = sas_scsi_task_done;
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index d1be465d5f55..1c286707dd5f 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,7 +1,7 @@
1#/******************************************************************* 1#/*******************************************************************
2# * This file is part of the Emulex Linux Device Driver for * 2# * This file is part of the Emulex Linux Device Driver for *
3# * Fibre Channel Host Bus Adapters. * 3# * Fibre Channel Host Bus Adapters. *
4# * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4# * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5# * EMULEX and SLI are trademarks of Emulex. * 5# * EMULEX and SLI are trademarks of Emulex. *
6# * www.emulex.com * 6# * www.emulex.com *
7# * * 7# * *
@@ -27,4 +27,5 @@ endif
27obj-$(CONFIG_SCSI_LPFC) := lpfc.o 27obj-$(CONFIG_SCSI_LPFC) := lpfc.o
28 28
29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \ 29lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o 30 lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
31 lpfc_vport.o lpfc_debugfs.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 82e8f90c4617..f8f64d6485cd 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -19,8 +19,9 @@
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21 21
22struct lpfc_sli2_slim; 22#include <scsi/scsi_host.h>
23 23
24struct lpfc_sli2_slim;
24 25
25#define LPFC_MAX_TARGET 256 /* max number of targets supported */ 26#define LPFC_MAX_TARGET 256 /* max number of targets supported */
26#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els 27#define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els
@@ -32,6 +33,20 @@ struct lpfc_sli2_slim;
32#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ 33#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
33#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ 34#define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */
34 35
36/*
37 * Following time intervals are used of adjusting SCSI device
38 * queue depths when there are driver resource error or Firmware
39 * resource error.
40 */
41#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */
42#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */
43
44/* Number of exchanges reserved for discovery to complete */
45#define LPFC_DISC_IOCB_BUFF_COUNT 20
46
47#define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */
48#define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */
49
35/* Define macros for 64 bit support */ 50/* Define macros for 64 bit support */
36#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr))) 51#define putPaddrLow(addr) ((uint32_t) (0xffffffff & (u64)(addr)))
37#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32))) 52#define putPaddrHigh(addr) ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
@@ -61,6 +76,11 @@ struct lpfc_dma_pool {
61 uint32_t current_count; 76 uint32_t current_count;
62}; 77};
63 78
79struct hbq_dmabuf {
80 struct lpfc_dmabuf dbuf;
81 uint32_t tag;
82};
83
64/* Priority bit. Set value to exceed low water mark in lpfc_mem. */ 84/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
65#define MEM_PRI 0x100 85#define MEM_PRI 0x100
66 86
@@ -90,6 +110,29 @@ typedef struct lpfc_vpd {
90 uint32_t sli2FwRev; 110 uint32_t sli2FwRev;
91 uint8_t sli2FwName[16]; 111 uint8_t sli2FwName[16];
92 } rev; 112 } rev;
113 struct {
114#ifdef __BIG_ENDIAN_BITFIELD
115 uint32_t rsvd2 :24; /* Reserved */
116 uint32_t cmv : 1; /* Configure Max VPIs */
117 uint32_t ccrp : 1; /* Config Command Ring Polling */
118 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
119 uint32_t chbs : 1; /* Cofigure Host Backing store */
120 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
121 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
122 uint32_t cmx : 1; /* Configure Max XRIs */
123 uint32_t cmr : 1; /* Configure Max RPIs */
124#else /* __LITTLE_ENDIAN */
125 uint32_t cmr : 1; /* Configure Max RPIs */
126 uint32_t cmx : 1; /* Configure Max XRIs */
127 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
128 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
129 uint32_t chbs : 1; /* Cofigure Host Backing store */
130 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
131 uint32_t ccrp : 1; /* Config Command Ring Polling */
132 uint32_t cmv : 1; /* Configure Max VPIs */
133 uint32_t rsvd2 :24; /* Reserved */
134#endif
135 } sli3Feat;
93} lpfc_vpd_t; 136} lpfc_vpd_t;
94 137
95struct lpfc_scsi_buf; 138struct lpfc_scsi_buf;
@@ -122,6 +165,7 @@ struct lpfc_stats {
122 uint32_t elsRcvRPS; 165 uint32_t elsRcvRPS;
123 uint32_t elsRcvRPL; 166 uint32_t elsRcvRPL;
124 uint32_t elsXmitFLOGI; 167 uint32_t elsXmitFLOGI;
168 uint32_t elsXmitFDISC;
125 uint32_t elsXmitPLOGI; 169 uint32_t elsXmitPLOGI;
126 uint32_t elsXmitPRLI; 170 uint32_t elsXmitPRLI;
127 uint32_t elsXmitADISC; 171 uint32_t elsXmitADISC;
@@ -165,50 +209,186 @@ struct lpfc_sysfs_mbox {
165 struct lpfcMboxq * mbox; 209 struct lpfcMboxq * mbox;
166}; 210};
167 211
212struct lpfc_hba;
213
214
215enum discovery_state {
216 LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */
217 LPFC_VPORT_FAILED = 1, /* vport has failed */
218 LPFC_LOCAL_CFG_LINK = 6, /* local NPORT Id configured */
219 LPFC_FLOGI = 7, /* FLOGI sent to Fabric */
220 LPFC_FDISC = 8, /* FDISC sent for vport */
221 LPFC_FABRIC_CFG_LINK = 9, /* Fabric assigned NPORT Id
222 * configured */
223 LPFC_NS_REG = 10, /* Register with NameServer */
224 LPFC_NS_QRY = 11, /* Query NameServer for NPort ID list */
225 LPFC_BUILD_DISC_LIST = 12, /* Build ADISC and PLOGI lists for
226 * device authentication / discovery */
227 LPFC_DISC_AUTH = 13, /* Processing ADISC list */
228 LPFC_VPORT_READY = 32,
229};
230
231enum hba_state {
232 LPFC_LINK_UNKNOWN = 0, /* HBA state is unknown */
233 LPFC_WARM_START = 1, /* HBA state after selective reset */
234 LPFC_INIT_START = 2, /* Initial state after board reset */
235 LPFC_INIT_MBX_CMDS = 3, /* Initialize HBA with mbox commands */
236 LPFC_LINK_DOWN = 4, /* HBA initialized, link is down */
237 LPFC_LINK_UP = 5, /* Link is up - issue READ_LA */
238 LPFC_CLEAR_LA = 6, /* authentication cmplt - issue
239 * CLEAR_LA */
240 LPFC_HBA_READY = 32,
241 LPFC_HBA_ERROR = -1
242};
243
244struct lpfc_vport {
245 struct list_head listentry;
246 struct lpfc_hba *phba;
247 uint8_t port_type;
248#define LPFC_PHYSICAL_PORT 1
249#define LPFC_NPIV_PORT 2
250#define LPFC_FABRIC_PORT 3
251 enum discovery_state port_state;
252
253 uint16_t vpi;
254
255 uint32_t fc_flag; /* FC flags */
256/* Several of these flags are HBA centric and should be moved to
257 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
258 */
259#define FC_PT2PT 0x1 /* pt2pt with no fabric */
260#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
261#define FC_DISC_TMO 0x4 /* Discovery timer running */
262#define FC_PUBLIC_LOOP 0x8 /* Public loop */
263#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
264#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
265#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
266#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
267#define FC_FABRIC 0x100 /* We are fabric attached */
268#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
269#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
270#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
271#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
272#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
273#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
274#define FC_RFF_NOT_SUPPORTED 0x40000 /* RFF_ID was rejected by switch */
275#define FC_VPORT_NEEDS_REG_VPI 0x80000 /* Needs to have its vpi registered */
276#define FC_RSCN_DEFERRED 0x100000 /* A deferred RSCN being processed */
277
278 struct list_head fc_nodes;
279
280 /* Keep counters for the number of entries in each list. */
281 uint16_t fc_plogi_cnt;
282 uint16_t fc_adisc_cnt;
283 uint16_t fc_reglogin_cnt;
284 uint16_t fc_prli_cnt;
285 uint16_t fc_unmap_cnt;
286 uint16_t fc_map_cnt;
287 uint16_t fc_npr_cnt;
288 uint16_t fc_unused_cnt;
289 struct serv_parm fc_sparam; /* buffer for our service parameters */
290
291 uint32_t fc_myDID; /* fibre channel S_ID */
292 uint32_t fc_prevDID; /* previous fibre channel S_ID */
293
294 int32_t stopped; /* HBA has not been restarted since last ERATT */
295 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
296
297 uint32_t num_disc_nodes; /*in addition to hba_state */
298
299 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
300 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
301 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
302 struct lpfc_name fc_nodename; /* fc nodename */
303 struct lpfc_name fc_portname; /* fc portname */
304
305 struct lpfc_work_evt disc_timeout_evt;
306
307 struct timer_list fc_disctmo; /* Discovery rescue timer */
308 uint8_t fc_ns_retry; /* retries for fabric nameserver */
309 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
310
311 spinlock_t work_port_lock;
312 uint32_t work_port_events; /* Timeout to be handled */
313#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
314#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
315#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
316
317#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
318#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
319#define WORKER_FABRIC_BLOCK_TMO 0x400 /* hba: fabric block timout */
320#define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */
321#define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */
322
323 struct timer_list fc_fdmitmo;
324 struct timer_list els_tmofunc;
325
326 int unreg_vpi_cmpl;
327
328 uint8_t load_flag;
329#define FC_LOADING 0x1 /* HBA in process of loading drvr */
330#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
331 char *vname; /* Application assigned name */
332 struct fc_vport *fc_vport;
333
334#ifdef CONFIG_LPFC_DEBUG_FS
335 struct dentry *debug_disc_trc;
336 struct dentry *debug_nodelist;
337 struct dentry *vport_debugfs_root;
338 struct lpfc_disc_trc *disc_trc;
339 atomic_t disc_trc_cnt;
340#endif
341};
342
343struct hbq_s {
344 uint16_t entry_count; /* Current number of HBQ slots */
345 uint32_t next_hbqPutIdx; /* Index to next HBQ slot to use */
346 uint32_t hbqPutIdx; /* HBQ slot to use */
347 uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
348};
349
350#define LPFC_MAX_HBQS 16
351/* this matches the possition in the lpfc_hbq_defs array */
352#define LPFC_ELS_HBQ 0
353
168struct lpfc_hba { 354struct lpfc_hba {
169 struct lpfc_sli sli; 355 struct lpfc_sli sli;
356 uint32_t sli_rev; /* SLI2 or SLI3 */
357 uint32_t sli3_options; /* Mask of enabled SLI3 options */
358#define LPFC_SLI3_ENABLED 0x01
359#define LPFC_SLI3_HBQ_ENABLED 0x02
360#define LPFC_SLI3_NPIV_ENABLED 0x04
361#define LPFC_SLI3_VPORT_TEARDOWN 0x08
362 uint32_t iocb_cmd_size;
363 uint32_t iocb_rsp_size;
364
365 enum hba_state link_state;
366 uint32_t link_flag; /* link state flags */
367#define LS_LOOPBACK_MODE 0x1 /* NPort is in Loopback mode */
368 /* This flag is set while issuing */
369 /* INIT_LINK mailbox command */
370#define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
371#define LS_IGNORE_ERATT 0x3 /* intr handler should ignore ERATT */
372
170 struct lpfc_sli2_slim *slim2p; 373 struct lpfc_sli2_slim *slim2p;
374 struct lpfc_dmabuf hbqslimp;
375
171 dma_addr_t slim2p_mapping; 376 dma_addr_t slim2p_mapping;
377
172 uint16_t pci_cfg_value; 378 uint16_t pci_cfg_value;
173 379
174 int32_t hba_state; 380 uint8_t work_found;
175 381#define LPFC_MAX_WORKER_ITERATION 4
176#define LPFC_STATE_UNKNOWN 0 /* HBA state is unknown */
177#define LPFC_WARM_START 1 /* HBA state after selective reset */
178#define LPFC_INIT_START 2 /* Initial state after board reset */
179#define LPFC_INIT_MBX_CMDS 3 /* Initialize HBA with mbox commands */
180#define LPFC_LINK_DOWN 4 /* HBA initialized, link is down */
181#define LPFC_LINK_UP 5 /* Link is up - issue READ_LA */
182#define LPFC_LOCAL_CFG_LINK 6 /* local NPORT Id configured */
183#define LPFC_FLOGI 7 /* FLOGI sent to Fabric */
184#define LPFC_FABRIC_CFG_LINK 8 /* Fabric assigned NPORT Id
185 configured */
186#define LPFC_NS_REG 9 /* Register with NameServer */
187#define LPFC_NS_QRY 10 /* Query NameServer for NPort ID list */
188#define LPFC_BUILD_DISC_LIST 11 /* Build ADISC and PLOGI lists for
189 * device authentication / discovery */
190#define LPFC_DISC_AUTH 12 /* Processing ADISC list */
191#define LPFC_CLEAR_LA 13 /* authentication cmplt - issue
192 CLEAR_LA */
193#define LPFC_HBA_READY 32
194#define LPFC_HBA_ERROR -1
195 382
196 int32_t stopped; /* HBA has not been restarted since last ERATT */
197 uint8_t fc_linkspeed; /* Link speed after last READ_LA */ 383 uint8_t fc_linkspeed; /* Link speed after last READ_LA */
198 384
199 uint32_t fc_eventTag; /* event tag for link attention */ 385 uint32_t fc_eventTag; /* event tag for link attention */
200 uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
201 386
202 uint32_t num_disc_nodes; /*in addition to hba_state */
203 387
204 struct timer_list fc_estabtmo; /* link establishment timer */ 388 struct timer_list fc_estabtmo; /* link establishment timer */
205 struct timer_list fc_disctmo; /* Discovery rescue timer */
206 struct timer_list fc_fdmitmo; /* fdmi timer */
207 /* These fields used to be binfo */ 389 /* These fields used to be binfo */
208 struct lpfc_name fc_nodename; /* fc nodename */
209 struct lpfc_name fc_portname; /* fc portname */
210 uint32_t fc_pref_DID; /* preferred D_ID */ 390 uint32_t fc_pref_DID; /* preferred D_ID */
211 uint8_t fc_pref_ALPA; /* preferred AL_PA */ 391 uint8_t fc_pref_ALPA; /* preferred AL_PA */
212 uint32_t fc_edtov; /* E_D_TOV timer value */ 392 uint32_t fc_edtov; /* E_D_TOV timer value */
213 uint32_t fc_arbtov; /* ARB_TOV timer value */ 393 uint32_t fc_arbtov; /* ARB_TOV timer value */
214 uint32_t fc_ratov; /* R_A_TOV timer value */ 394 uint32_t fc_ratov; /* R_A_TOV timer value */
@@ -216,61 +396,21 @@ struct lpfc_hba {
216 uint32_t fc_altov; /* AL_TOV timer value */ 396 uint32_t fc_altov; /* AL_TOV timer value */
217 uint32_t fc_crtov; /* C_R_TOV timer value */ 397 uint32_t fc_crtov; /* C_R_TOV timer value */
218 uint32_t fc_citov; /* C_I_TOV timer value */ 398 uint32_t fc_citov; /* C_I_TOV timer value */
219 uint32_t fc_myDID; /* fibre channel S_ID */
220 uint32_t fc_prevDID; /* previous fibre channel S_ID */
221 399
222 struct serv_parm fc_sparam; /* buffer for our service parameters */
223 struct serv_parm fc_fabparam; /* fabric service parameters buffer */ 400 struct serv_parm fc_fabparam; /* fabric service parameters buffer */
224 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */ 401 uint8_t alpa_map[128]; /* AL_PA map from READ_LA */
225 402
226 uint8_t fc_ns_retry; /* retries for fabric nameserver */
227 uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
228 uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
229 struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
230 uint32_t lmt; 403 uint32_t lmt;
231 uint32_t fc_flag; /* FC flags */
232#define FC_PT2PT 0x1 /* pt2pt with no fabric */
233#define FC_PT2PT_PLOGI 0x2 /* pt2pt initiate PLOGI */
234#define FC_DISC_TMO 0x4 /* Discovery timer running */
235#define FC_PUBLIC_LOOP 0x8 /* Public loop */
236#define FC_LBIT 0x10 /* LOGIN bit in loopinit set */
237#define FC_RSCN_MODE 0x20 /* RSCN cmd rcv'ed */
238#define FC_NLP_MORE 0x40 /* More node to process in node tbl */
239#define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */
240#define FC_FABRIC 0x100 /* We are fabric attached */
241#define FC_ESTABLISH_LINK 0x200 /* Reestablish Link */
242#define FC_RSCN_DISCOVERY 0x400 /* Authenticate all devices after RSCN*/
243#define FC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
244#define FC_LOADING 0x1000 /* HBA in process of loading drvr */
245#define FC_UNLOADING 0x2000 /* HBA in process of unloading drvr */
246#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
247#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
248#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
249#define FC_BYPASSED_MODE 0x20000 /* NPort is in bypassed mode */
250#define FC_LOOPBACK_MODE 0x40000 /* NPort is in Loopback mode */
251 /* This flag is set while issuing */
252 /* INIT_LINK mailbox command */
253#define FC_IGNORE_ERATT 0x80000 /* intr handler should ignore ERATT */
254 404
255 uint32_t fc_topology; /* link topology, from LINK INIT */ 405 uint32_t fc_topology; /* link topology, from LINK INIT */
256 406
257 struct lpfc_stats fc_stat; 407 struct lpfc_stats fc_stat;
258 408
259 struct list_head fc_nodes;
260
261 /* Keep counters for the number of entries in each list. */
262 uint16_t fc_plogi_cnt;
263 uint16_t fc_adisc_cnt;
264 uint16_t fc_reglogin_cnt;
265 uint16_t fc_prli_cnt;
266 uint16_t fc_unmap_cnt;
267 uint16_t fc_map_cnt;
268 uint16_t fc_npr_cnt;
269 uint16_t fc_unused_cnt;
270 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */ 409 struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
271 uint32_t nport_event_cnt; /* timestamp for nlplist entry */ 410 uint32_t nport_event_cnt; /* timestamp for nlplist entry */
272 411
273 uint32_t wwnn[2]; 412 uint8_t wwnn[8];
413 uint8_t wwpn[8];
274 uint32_t RandomData[7]; 414 uint32_t RandomData[7];
275 415
276 uint32_t cfg_log_verbose; 416 uint32_t cfg_log_verbose;
@@ -278,6 +418,9 @@ struct lpfc_hba {
278 uint32_t cfg_nodev_tmo; 418 uint32_t cfg_nodev_tmo;
279 uint32_t cfg_devloss_tmo; 419 uint32_t cfg_devloss_tmo;
280 uint32_t cfg_hba_queue_depth; 420 uint32_t cfg_hba_queue_depth;
421 uint32_t cfg_peer_port_login;
422 uint32_t cfg_vport_restrict_login;
423 uint32_t cfg_npiv_enable;
281 uint32_t cfg_fcp_class; 424 uint32_t cfg_fcp_class;
282 uint32_t cfg_use_adisc; 425 uint32_t cfg_use_adisc;
283 uint32_t cfg_ack0; 426 uint32_t cfg_ack0;
@@ -304,22 +447,20 @@ struct lpfc_hba {
304 447
305 lpfc_vpd_t vpd; /* vital product data */ 448 lpfc_vpd_t vpd; /* vital product data */
306 449
307 struct Scsi_Host *host;
308 struct pci_dev *pcidev; 450 struct pci_dev *pcidev;
309 struct list_head work_list; 451 struct list_head work_list;
310 uint32_t work_ha; /* Host Attention Bits for WT */ 452 uint32_t work_ha; /* Host Attention Bits for WT */
311 uint32_t work_ha_mask; /* HA Bits owned by WT */ 453 uint32_t work_ha_mask; /* HA Bits owned by WT */
312 uint32_t work_hs; /* HS stored in case of ERRAT */ 454 uint32_t work_hs; /* HS stored in case of ERRAT */
313 uint32_t work_status[2]; /* Extra status from SLIM */ 455 uint32_t work_status[2]; /* Extra status from SLIM */
314 uint32_t work_hba_events; /* Timeout to be handled */
315#define WORKER_DISC_TMO 0x1 /* Discovery timeout */
316#define WORKER_ELS_TMO 0x2 /* ELS timeout */
317#define WORKER_MBOX_TMO 0x4 /* MBOX timeout */
318#define WORKER_FDMI_TMO 0x8 /* FDMI timeout */
319 456
320 wait_queue_head_t *work_wait; 457 wait_queue_head_t *work_wait;
321 struct task_struct *worker_thread; 458 struct task_struct *worker_thread;
322 459
460 struct list_head hbq_buffer_list;
461 uint32_t hbq_count; /* Count of configured HBQs */
462 struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
463
323 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ 464 unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */
324 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ 465 unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */
325 void __iomem *slim_memmap_p; /* Kernel memory mapped address for 466 void __iomem *slim_memmap_p; /* Kernel memory mapped address for
@@ -334,6 +475,10 @@ struct lpfc_hba {
334 reg */ 475 reg */
335 void __iomem *HCregaddr; /* virtual address for host ctl reg */ 476 void __iomem *HCregaddr; /* virtual address for host ctl reg */
336 477
478 struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
479 uint32_t __iomem *hbq_put; /* Address in SLIM to HBQ put ptrs */
480 uint32_t *hbq_get; /* Host mem address of HBQ get ptrs */
481
337 int brd_no; /* FC board number */ 482 int brd_no; /* FC board number */
338 483
339 char SerialNumber[32]; /* adapter Serial Number */ 484 char SerialNumber[32]; /* adapter Serial Number */
@@ -353,7 +498,6 @@ struct lpfc_hba {
353 uint8_t soft_wwn_enable; 498 uint8_t soft_wwn_enable;
354 499
355 struct timer_list fcp_poll_timer; 500 struct timer_list fcp_poll_timer;
356 struct timer_list els_tmofunc;
357 501
358 /* 502 /*
359 * stat counters 503 * stat counters
@@ -370,31 +514,69 @@ struct lpfc_hba {
370 uint32_t total_scsi_bufs; 514 uint32_t total_scsi_bufs;
371 struct list_head lpfc_iocb_list; 515 struct list_head lpfc_iocb_list;
372 uint32_t total_iocbq_bufs; 516 uint32_t total_iocbq_bufs;
517 spinlock_t hbalock;
373 518
374 /* pci_mem_pools */ 519 /* pci_mem_pools */
375 struct pci_pool *lpfc_scsi_dma_buf_pool; 520 struct pci_pool *lpfc_scsi_dma_buf_pool;
376 struct pci_pool *lpfc_mbuf_pool; 521 struct pci_pool *lpfc_mbuf_pool;
522 struct pci_pool *lpfc_hbq_pool;
377 struct lpfc_dma_pool lpfc_mbuf_safety_pool; 523 struct lpfc_dma_pool lpfc_mbuf_safety_pool;
378 524
379 mempool_t *mbox_mem_pool; 525 mempool_t *mbox_mem_pool;
380 mempool_t *nlp_mem_pool; 526 mempool_t *nlp_mem_pool;
381 527
382 struct fc_host_statistics link_stats; 528 struct fc_host_statistics link_stats;
529
530 struct list_head port_list;
531 struct lpfc_vport *pport; /* physical lpfc_vport pointer */
532 uint16_t max_vpi; /* Maximum virtual nports */
533#define LPFC_MAX_VPI 100 /* Max number of VPorts supported */
534 unsigned long *vpi_bmask; /* vpi allocation table */
535
536 /* Data structure used by fabric iocb scheduler */
537 struct list_head fabric_iocb_list;
538 atomic_t fabric_iocb_count;
539 struct timer_list fabric_block_timer;
540 unsigned long bit_flags;
541#define FABRIC_COMANDS_BLOCKED 0
542 atomic_t num_rsrc_err;
543 atomic_t num_cmd_success;
544 unsigned long last_rsrc_error_time;
545 unsigned long last_ramp_down_time;
546 unsigned long last_ramp_up_time;
547#ifdef CONFIG_LPFC_DEBUG_FS
548 struct dentry *hba_debugfs_root;
549 atomic_t debugfs_vport_count;
550#endif
551
552 /* Fields used for heart beat. */
553 unsigned long last_completion_time;
554 struct timer_list hb_tmofunc;
555 uint8_t hb_outstanding;
383}; 556};
384 557
558static inline struct Scsi_Host *
559lpfc_shost_from_vport(struct lpfc_vport *vport)
560{
561 return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
562}
563
385static inline void 564static inline void
386lpfc_set_loopback_flag(struct lpfc_hba *phba) { 565lpfc_set_loopback_flag(struct lpfc_hba *phba)
566{
387 if (phba->cfg_topology == FLAGS_LOCAL_LB) 567 if (phba->cfg_topology == FLAGS_LOCAL_LB)
388 phba->fc_flag |= FC_LOOPBACK_MODE; 568 phba->link_flag |= LS_LOOPBACK_MODE;
389 else 569 else
390 phba->fc_flag &= ~FC_LOOPBACK_MODE; 570 phba->link_flag &= ~LS_LOOPBACK_MODE;
391} 571}
392 572
393struct rnidrsp { 573static inline int
394 void *buf; 574lpfc_is_link_up(struct lpfc_hba *phba)
395 uint32_t uniqueid; 575{
396 struct list_head list; 576 return phba->link_state == LPFC_LINK_UP ||
397 uint32_t data; 577 phba->link_state == LPFC_CLEAR_LA ||
398}; 578 phba->link_state == LPFC_HBA_READY;
579}
399 580
400#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */ 581#define FC_REG_DUMP_EVENT 0x10 /* Register for Dump events */
582
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5dfda9778c80..860a52c090f4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -39,6 +39,7 @@
39#include "lpfc_version.h" 39#include "lpfc_version.h"
40#include "lpfc_compat.h" 40#include "lpfc_compat.h"
41#include "lpfc_crtn.h" 41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
42 43
43#define LPFC_DEF_DEVLOSS_TMO 30 44#define LPFC_DEF_DEVLOSS_TMO 30
44#define LPFC_MIN_DEVLOSS_TMO 1 45#define LPFC_MIN_DEVLOSS_TMO 1
@@ -76,116 +77,156 @@ static ssize_t
76lpfc_info_show(struct class_device *cdev, char *buf) 77lpfc_info_show(struct class_device *cdev, char *buf)
77{ 78{
78 struct Scsi_Host *host = class_to_shost(cdev); 79 struct Scsi_Host *host = class_to_shost(cdev);
80
79 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host)); 81 return snprintf(buf, PAGE_SIZE, "%s\n",lpfc_info(host));
80} 82}
81 83
82static ssize_t 84static ssize_t
83lpfc_serialnum_show(struct class_device *cdev, char *buf) 85lpfc_serialnum_show(struct class_device *cdev, char *buf)
84{ 86{
85 struct Scsi_Host *host = class_to_shost(cdev); 87 struct Scsi_Host *shost = class_to_shost(cdev);
86 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 88 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
89 struct lpfc_hba *phba = vport->phba;
90
87 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber); 91 return snprintf(buf, PAGE_SIZE, "%s\n",phba->SerialNumber);
88} 92}
89 93
90static ssize_t 94static ssize_t
91lpfc_modeldesc_show(struct class_device *cdev, char *buf) 95lpfc_modeldesc_show(struct class_device *cdev, char *buf)
92{ 96{
93 struct Scsi_Host *host = class_to_shost(cdev); 97 struct Scsi_Host *shost = class_to_shost(cdev);
94 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 98 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
99 struct lpfc_hba *phba = vport->phba;
100
95 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc); 101 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelDesc);
96} 102}
97 103
98static ssize_t 104static ssize_t
99lpfc_modelname_show(struct class_device *cdev, char *buf) 105lpfc_modelname_show(struct class_device *cdev, char *buf)
100{ 106{
101 struct Scsi_Host *host = class_to_shost(cdev); 107 struct Scsi_Host *shost = class_to_shost(cdev);
102 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 108 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
109 struct lpfc_hba *phba = vport->phba;
110
103 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName); 111 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ModelName);
104} 112}
105 113
106static ssize_t 114static ssize_t
107lpfc_programtype_show(struct class_device *cdev, char *buf) 115lpfc_programtype_show(struct class_device *cdev, char *buf)
108{ 116{
109 struct Scsi_Host *host = class_to_shost(cdev); 117 struct Scsi_Host *shost = class_to_shost(cdev);
110 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 118 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
119 struct lpfc_hba *phba = vport->phba;
120
111 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType); 121 return snprintf(buf, PAGE_SIZE, "%s\n",phba->ProgramType);
112} 122}
113 123
114static ssize_t 124static ssize_t
115lpfc_portnum_show(struct class_device *cdev, char *buf) 125lpfc_vportnum_show(struct class_device *cdev, char *buf)
116{ 126{
117 struct Scsi_Host *host = class_to_shost(cdev); 127 struct Scsi_Host *shost = class_to_shost(cdev);
118 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 128 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
129 struct lpfc_hba *phba = vport->phba;
130
119 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port); 131 return snprintf(buf, PAGE_SIZE, "%s\n",phba->Port);
120} 132}
121 133
122static ssize_t 134static ssize_t
123lpfc_fwrev_show(struct class_device *cdev, char *buf) 135lpfc_fwrev_show(struct class_device *cdev, char *buf)
124{ 136{
125 struct Scsi_Host *host = class_to_shost(cdev); 137 struct Scsi_Host *shost = class_to_shost(cdev);
126 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 138 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
139 struct lpfc_hba *phba = vport->phba;
127 char fwrev[32]; 140 char fwrev[32];
141
128 lpfc_decode_firmware_rev(phba, fwrev, 1); 142 lpfc_decode_firmware_rev(phba, fwrev, 1);
129 return snprintf(buf, PAGE_SIZE, "%s\n",fwrev); 143 return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
130} 144}
131 145
132static ssize_t 146static ssize_t
133lpfc_hdw_show(struct class_device *cdev, char *buf) 147lpfc_hdw_show(struct class_device *cdev, char *buf)
134{ 148{
135 char hdw[9]; 149 char hdw[9];
136 struct Scsi_Host *host = class_to_shost(cdev); 150 struct Scsi_Host *shost = class_to_shost(cdev);
137 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 151 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
152 struct lpfc_hba *phba = vport->phba;
138 lpfc_vpd_t *vp = &phba->vpd; 153 lpfc_vpd_t *vp = &phba->vpd;
154
139 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); 155 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
140 return snprintf(buf, PAGE_SIZE, "%s\n", hdw); 156 return snprintf(buf, PAGE_SIZE, "%s\n", hdw);
141} 157}
142static ssize_t 158static ssize_t
143lpfc_option_rom_version_show(struct class_device *cdev, char *buf) 159lpfc_option_rom_version_show(struct class_device *cdev, char *buf)
144{ 160{
145 struct Scsi_Host *host = class_to_shost(cdev); 161 struct Scsi_Host *shost = class_to_shost(cdev);
146 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 162 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
163 struct lpfc_hba *phba = vport->phba;
164
147 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion); 165 return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
148} 166}
149static ssize_t 167static ssize_t
150lpfc_state_show(struct class_device *cdev, char *buf) 168lpfc_state_show(struct class_device *cdev, char *buf)
151{ 169{
152 struct Scsi_Host *host = class_to_shost(cdev); 170 struct Scsi_Host *shost = class_to_shost(cdev);
153 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 171 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
154 int len = 0; 172 struct lpfc_hba *phba = vport->phba;
155 switch (phba->hba_state) { 173 int len = 0;
156 case LPFC_STATE_UNKNOWN: 174
175 switch (phba->link_state) {
176 case LPFC_LINK_UNKNOWN:
157 case LPFC_WARM_START: 177 case LPFC_WARM_START:
158 case LPFC_INIT_START: 178 case LPFC_INIT_START:
159 case LPFC_INIT_MBX_CMDS: 179 case LPFC_INIT_MBX_CMDS:
160 case LPFC_LINK_DOWN: 180 case LPFC_LINK_DOWN:
181 case LPFC_HBA_ERROR:
161 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n"); 182 len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
162 break; 183 break;
163 case LPFC_LINK_UP: 184 case LPFC_LINK_UP:
164 case LPFC_LOCAL_CFG_LINK:
165 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up\n");
166 break;
167 case LPFC_FLOGI:
168 case LPFC_FABRIC_CFG_LINK:
169 case LPFC_NS_REG:
170 case LPFC_NS_QRY:
171 case LPFC_BUILD_DISC_LIST:
172 case LPFC_DISC_AUTH:
173 case LPFC_CLEAR_LA: 185 case LPFC_CLEAR_LA:
174 len += snprintf(buf + len, PAGE_SIZE-len,
175 "Link Up - Discovery\n");
176 break;
177 case LPFC_HBA_READY: 186 case LPFC_HBA_READY:
178 len += snprintf(buf + len, PAGE_SIZE-len, 187 len += snprintf(buf + len, PAGE_SIZE-len, "Link Up - \n");
179 "Link Up - Ready:\n"); 188
189 switch (vport->port_state) {
190 len += snprintf(buf + len, PAGE_SIZE-len,
191 "initializing\n");
192 break;
193 case LPFC_LOCAL_CFG_LINK:
194 len += snprintf(buf + len, PAGE_SIZE-len,
195 "Configuring Link\n");
196 break;
197 case LPFC_FDISC:
198 case LPFC_FLOGI:
199 case LPFC_FABRIC_CFG_LINK:
200 case LPFC_NS_REG:
201 case LPFC_NS_QRY:
202 case LPFC_BUILD_DISC_LIST:
203 case LPFC_DISC_AUTH:
204 len += snprintf(buf + len, PAGE_SIZE - len,
205 "Discovery\n");
206 break;
207 case LPFC_VPORT_READY:
208 len += snprintf(buf + len, PAGE_SIZE - len, "Ready\n");
209 break;
210
211 case LPFC_VPORT_FAILED:
212 len += snprintf(buf + len, PAGE_SIZE - len, "Failed\n");
213 break;
214
215 case LPFC_VPORT_UNKNOWN:
216 len += snprintf(buf + len, PAGE_SIZE - len,
217 "Unknown\n");
218 break;
219 }
220
180 if (phba->fc_topology == TOPOLOGY_LOOP) { 221 if (phba->fc_topology == TOPOLOGY_LOOP) {
181 if (phba->fc_flag & FC_PUBLIC_LOOP) 222 if (vport->fc_flag & FC_PUBLIC_LOOP)
182 len += snprintf(buf + len, PAGE_SIZE-len, 223 len += snprintf(buf + len, PAGE_SIZE-len,
183 " Public Loop\n"); 224 " Public Loop\n");
184 else 225 else
185 len += snprintf(buf + len, PAGE_SIZE-len, 226 len += snprintf(buf + len, PAGE_SIZE-len,
186 " Private Loop\n"); 227 " Private Loop\n");
187 } else { 228 } else {
188 if (phba->fc_flag & FC_FABRIC) 229 if (vport->fc_flag & FC_FABRIC)
189 len += snprintf(buf + len, PAGE_SIZE-len, 230 len += snprintf(buf + len, PAGE_SIZE-len,
190 " Fabric\n"); 231 " Fabric\n");
191 else 232 else
@@ -193,29 +234,32 @@ lpfc_state_show(struct class_device *cdev, char *buf)
193 " Point-2-Point\n"); 234 " Point-2-Point\n");
194 } 235 }
195 } 236 }
237
196 return len; 238 return len;
197} 239}
198 240
199static ssize_t 241static ssize_t
200lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf) 242lpfc_num_discovered_ports_show(struct class_device *cdev, char *buf)
201{ 243{
202 struct Scsi_Host *host = class_to_shost(cdev); 244 struct Scsi_Host *shost = class_to_shost(cdev);
203 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 245 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
204 return snprintf(buf, PAGE_SIZE, "%d\n", phba->fc_map_cnt + 246
205 phba->fc_unmap_cnt); 247 return snprintf(buf, PAGE_SIZE, "%d\n",
248 vport->fc_map_cnt + vport->fc_unmap_cnt);
206} 249}
207 250
208 251
209static int 252static int
210lpfc_issue_lip(struct Scsi_Host *host) 253lpfc_issue_lip(struct Scsi_Host *shost)
211{ 254{
212 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 255 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
256 struct lpfc_hba *phba = vport->phba;
213 LPFC_MBOXQ_t *pmboxq; 257 LPFC_MBOXQ_t *pmboxq;
214 int mbxstatus = MBXERR_ERROR; 258 int mbxstatus = MBXERR_ERROR;
215 259
216 if ((phba->fc_flag & FC_OFFLINE_MODE) || 260 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
217 (phba->fc_flag & FC_BLOCK_MGMT_IO) || 261 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) ||
218 (phba->hba_state != LPFC_HBA_READY)) 262 (vport->port_state != LPFC_VPORT_READY))
219 return -EPERM; 263 return -EPERM;
220 264
221 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL); 265 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
@@ -238,9 +282,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
238 } 282 }
239 283
240 lpfc_set_loopback_flag(phba); 284 lpfc_set_loopback_flag(phba);
241 if (mbxstatus == MBX_TIMEOUT) 285 if (mbxstatus != MBX_TIMEOUT)
242 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
243 else
244 mempool_free(pmboxq, phba->mbox_mem_pool); 286 mempool_free(pmboxq, phba->mbox_mem_pool);
245 287
246 if (mbxstatus == MBXERR_ERROR) 288 if (mbxstatus == MBXERR_ERROR)
@@ -320,8 +362,10 @@ lpfc_selective_reset(struct lpfc_hba *phba)
320static ssize_t 362static ssize_t
321lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count) 363lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
322{ 364{
323 struct Scsi_Host *host = class_to_shost(cdev); 365 struct Scsi_Host *shost = class_to_shost(cdev);
324 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 366 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
367 struct lpfc_hba *phba = vport->phba;
368
325 int status = -EINVAL; 369 int status = -EINVAL;
326 370
327 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 371 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
@@ -336,23 +380,26 @@ lpfc_issue_reset(struct class_device *cdev, const char *buf, size_t count)
336static ssize_t 380static ssize_t
337lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf) 381lpfc_nport_evt_cnt_show(struct class_device *cdev, char *buf)
338{ 382{
339 struct Scsi_Host *host = class_to_shost(cdev); 383 struct Scsi_Host *shost = class_to_shost(cdev);
340 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 384 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
385 struct lpfc_hba *phba = vport->phba;
386
341 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt); 387 return snprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
342} 388}
343 389
344static ssize_t 390static ssize_t
345lpfc_board_mode_show(struct class_device *cdev, char *buf) 391lpfc_board_mode_show(struct class_device *cdev, char *buf)
346{ 392{
347 struct Scsi_Host *host = class_to_shost(cdev); 393 struct Scsi_Host *shost = class_to_shost(cdev);
348 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 394 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
395 struct lpfc_hba *phba = vport->phba;
349 char * state; 396 char * state;
350 397
351 if (phba->hba_state == LPFC_HBA_ERROR) 398 if (phba->link_state == LPFC_HBA_ERROR)
352 state = "error"; 399 state = "error";
353 else if (phba->hba_state == LPFC_WARM_START) 400 else if (phba->link_state == LPFC_WARM_START)
354 state = "warm start"; 401 state = "warm start";
355 else if (phba->hba_state == LPFC_INIT_START) 402 else if (phba->link_state == LPFC_INIT_START)
356 state = "offline"; 403 state = "offline";
357 else 404 else
358 state = "online"; 405 state = "online";
@@ -363,8 +410,9 @@ lpfc_board_mode_show(struct class_device *cdev, char *buf)
363static ssize_t 410static ssize_t
364lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count) 411lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
365{ 412{
366 struct Scsi_Host *host = class_to_shost(cdev); 413 struct Scsi_Host *shost = class_to_shost(cdev);
367 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 414 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
415 struct lpfc_hba *phba = vport->phba;
368 struct completion online_compl; 416 struct completion online_compl;
369 int status=0; 417 int status=0;
370 418
@@ -389,11 +437,166 @@ lpfc_board_mode_store(struct class_device *cdev, const char *buf, size_t count)
389 return -EIO; 437 return -EIO;
390} 438}
391 439
440int
441lpfc_get_hba_info(struct lpfc_hba *phba,
442 uint32_t *mxri, uint32_t *axri,
443 uint32_t *mrpi, uint32_t *arpi,
444 uint32_t *mvpi, uint32_t *avpi)
445{
446 struct lpfc_sli *psli = &phba->sli;
447 LPFC_MBOXQ_t *pmboxq;
448 MAILBOX_t *pmb;
449 int rc = 0;
450
451 /*
452 * prevent udev from issuing mailbox commands until the port is
453 * configured.
454 */
455 if (phba->link_state < LPFC_LINK_DOWN ||
456 !phba->mbox_mem_pool ||
457 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
458 return 0;
459
460 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
461 return 0;
462
463 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
464 if (!pmboxq)
465 return 0;
466 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
467
468 pmb = &pmboxq->mb;
469 pmb->mbxCommand = MBX_READ_CONFIG;
470 pmb->mbxOwner = OWN_HOST;
471 pmboxq->context1 = NULL;
472
473 if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
474 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
475 rc = MBX_NOT_FINISHED;
476 else
477 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
478
479 if (rc != MBX_SUCCESS) {
480 if (rc != MBX_TIMEOUT)
481 mempool_free(pmboxq, phba->mbox_mem_pool);
482 return 0;
483 }
484
485 if (mrpi)
486 *mrpi = pmb->un.varRdConfig.max_rpi;
487 if (arpi)
488 *arpi = pmb->un.varRdConfig.avail_rpi;
489 if (mxri)
490 *mxri = pmb->un.varRdConfig.max_xri;
491 if (axri)
492 *axri = pmb->un.varRdConfig.avail_xri;
493 if (mvpi)
494 *mvpi = pmb->un.varRdConfig.max_vpi;
495 if (avpi)
496 *avpi = pmb->un.varRdConfig.avail_vpi;
497
498 mempool_free(pmboxq, phba->mbox_mem_pool);
499 return 1;
500}
501
502static ssize_t
503lpfc_max_rpi_show(struct class_device *cdev, char *buf)
504{
505 struct Scsi_Host *shost = class_to_shost(cdev);
506 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
507 struct lpfc_hba *phba = vport->phba;
508 uint32_t cnt;
509
510 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
511 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
512 return snprintf(buf, PAGE_SIZE, "Unknown\n");
513}
514
515static ssize_t
516lpfc_used_rpi_show(struct class_device *cdev, char *buf)
517{
518 struct Scsi_Host *shost = class_to_shost(cdev);
519 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
520 struct lpfc_hba *phba = vport->phba;
521 uint32_t cnt, acnt;
522
523 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
524 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
525 return snprintf(buf, PAGE_SIZE, "Unknown\n");
526}
527
528static ssize_t
529lpfc_max_xri_show(struct class_device *cdev, char *buf)
530{
531 struct Scsi_Host *shost = class_to_shost(cdev);
532 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
533 struct lpfc_hba *phba = vport->phba;
534 uint32_t cnt;
535
536 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
537 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
538 return snprintf(buf, PAGE_SIZE, "Unknown\n");
539}
540
541static ssize_t
542lpfc_used_xri_show(struct class_device *cdev, char *buf)
543{
544 struct Scsi_Host *shost = class_to_shost(cdev);
545 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
546 struct lpfc_hba *phba = vport->phba;
547 uint32_t cnt, acnt;
548
549 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
550 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
551 return snprintf(buf, PAGE_SIZE, "Unknown\n");
552}
553
554static ssize_t
555lpfc_max_vpi_show(struct class_device *cdev, char *buf)
556{
557 struct Scsi_Host *shost = class_to_shost(cdev);
558 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
559 struct lpfc_hba *phba = vport->phba;
560 uint32_t cnt;
561
562 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
563 return snprintf(buf, PAGE_SIZE, "%d\n", cnt);
564 return snprintf(buf, PAGE_SIZE, "Unknown\n");
565}
566
567static ssize_t
568lpfc_used_vpi_show(struct class_device *cdev, char *buf)
569{
570 struct Scsi_Host *shost = class_to_shost(cdev);
571 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
572 struct lpfc_hba *phba = vport->phba;
573 uint32_t cnt, acnt;
574
575 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
576 return snprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
577 return snprintf(buf, PAGE_SIZE, "Unknown\n");
578}
579
580static ssize_t
581lpfc_npiv_info_show(struct class_device *cdev, char *buf)
582{
583 struct Scsi_Host *shost = class_to_shost(cdev);
584 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
585 struct lpfc_hba *phba = vport->phba;
586
587 if (!(phba->max_vpi))
588 return snprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
589 if (vport->port_type == LPFC_PHYSICAL_PORT)
590 return snprintf(buf, PAGE_SIZE, "NPIV Physical\n");
591 return snprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
592}
593
392static ssize_t 594static ssize_t
393lpfc_poll_show(struct class_device *cdev, char *buf) 595lpfc_poll_show(struct class_device *cdev, char *buf)
394{ 596{
395 struct Scsi_Host *host = class_to_shost(cdev); 597 struct Scsi_Host *shost = class_to_shost(cdev);
396 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 598 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
599 struct lpfc_hba *phba = vport->phba;
397 600
398 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll); 601 return snprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
399} 602}
@@ -402,8 +605,9 @@ static ssize_t
402lpfc_poll_store(struct class_device *cdev, const char *buf, 605lpfc_poll_store(struct class_device *cdev, const char *buf,
403 size_t count) 606 size_t count)
404{ 607{
405 struct Scsi_Host *host = class_to_shost(cdev); 608 struct Scsi_Host *shost = class_to_shost(cdev);
406 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 609 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
610 struct lpfc_hba *phba = vport->phba;
407 uint32_t creg_val; 611 uint32_t creg_val;
408 uint32_t old_val; 612 uint32_t old_val;
409 int val=0; 613 int val=0;
@@ -417,7 +621,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
417 if ((val & 0x3) != val) 621 if ((val & 0x3) != val)
418 return -EINVAL; 622 return -EINVAL;
419 623
420 spin_lock_irq(phba->host->host_lock); 624 spin_lock_irq(&phba->hbalock);
421 625
422 old_val = phba->cfg_poll; 626 old_val = phba->cfg_poll;
423 627
@@ -432,16 +636,16 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
432 lpfc_poll_start_timer(phba); 636 lpfc_poll_start_timer(phba);
433 } 637 }
434 } else if (val != 0x0) { 638 } else if (val != 0x0) {
435 spin_unlock_irq(phba->host->host_lock); 639 spin_unlock_irq(&phba->hbalock);
436 return -EINVAL; 640 return -EINVAL;
437 } 641 }
438 642
439 if (!(val & DISABLE_FCP_RING_INT) && 643 if (!(val & DISABLE_FCP_RING_INT) &&
440 (old_val & DISABLE_FCP_RING_INT)) 644 (old_val & DISABLE_FCP_RING_INT))
441 { 645 {
442 spin_unlock_irq(phba->host->host_lock); 646 spin_unlock_irq(&phba->hbalock);
443 del_timer(&phba->fcp_poll_timer); 647 del_timer(&phba->fcp_poll_timer);
444 spin_lock_irq(phba->host->host_lock); 648 spin_lock_irq(&phba->hbalock);
445 creg_val = readl(phba->HCregaddr); 649 creg_val = readl(phba->HCregaddr);
446 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 650 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
447 writel(creg_val, phba->HCregaddr); 651 writel(creg_val, phba->HCregaddr);
@@ -450,7 +654,7 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
450 654
451 phba->cfg_poll = val; 655 phba->cfg_poll = val;
452 656
453 spin_unlock_irq(phba->host->host_lock); 657 spin_unlock_irq(&phba->hbalock);
454 658
455 return strlen(buf); 659 return strlen(buf);
456} 660}
@@ -459,8 +663,9 @@ lpfc_poll_store(struct class_device *cdev, const char *buf,
459static ssize_t \ 663static ssize_t \
460lpfc_##attr##_show(struct class_device *cdev, char *buf) \ 664lpfc_##attr##_show(struct class_device *cdev, char *buf) \
461{ \ 665{ \
462 struct Scsi_Host *host = class_to_shost(cdev);\ 666 struct Scsi_Host *shost = class_to_shost(cdev);\
463 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ 667 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
668 struct lpfc_hba *phba = vport->phba;\
464 int val = 0;\ 669 int val = 0;\
465 val = phba->cfg_##attr;\ 670 val = phba->cfg_##attr;\
466 return snprintf(buf, PAGE_SIZE, "%d\n",\ 671 return snprintf(buf, PAGE_SIZE, "%d\n",\
@@ -471,8 +676,9 @@ lpfc_##attr##_show(struct class_device *cdev, char *buf) \
471static ssize_t \ 676static ssize_t \
472lpfc_##attr##_show(struct class_device *cdev, char *buf) \ 677lpfc_##attr##_show(struct class_device *cdev, char *buf) \
473{ \ 678{ \
474 struct Scsi_Host *host = class_to_shost(cdev);\ 679 struct Scsi_Host *shost = class_to_shost(cdev);\
475 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ 680 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
681 struct lpfc_hba *phba = vport->phba;\
476 int val = 0;\ 682 int val = 0;\
477 val = phba->cfg_##attr;\ 683 val = phba->cfg_##attr;\
478 return snprintf(buf, PAGE_SIZE, "%#x\n",\ 684 return snprintf(buf, PAGE_SIZE, "%#x\n",\
@@ -514,8 +720,9 @@ lpfc_##attr##_set(struct lpfc_hba *phba, int val) \
514static ssize_t \ 720static ssize_t \
515lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \ 721lpfc_##attr##_store(struct class_device *cdev, const char *buf, size_t count) \
516{ \ 722{ \
517 struct Scsi_Host *host = class_to_shost(cdev);\ 723 struct Scsi_Host *shost = class_to_shost(cdev);\
518 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata;\ 724 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
725 struct lpfc_hba *phba = vport->phba;\
519 int val=0;\ 726 int val=0;\
520 if (!isdigit(buf[0]))\ 727 if (!isdigit(buf[0]))\
521 return -EINVAL;\ 728 return -EINVAL;\
@@ -576,7 +783,7 @@ static CLASS_DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
576static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL); 783static CLASS_DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
577static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL); 784static CLASS_DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
578static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL); 785static CLASS_DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
579static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_portnum_show, NULL); 786static CLASS_DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
580static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL); 787static CLASS_DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
581static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL); 788static CLASS_DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
582static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL); 789static CLASS_DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
@@ -592,6 +799,13 @@ static CLASS_DEVICE_ATTR(management_version, S_IRUGO, management_version_show,
592static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, 799static CLASS_DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
593 lpfc_board_mode_show, lpfc_board_mode_store); 800 lpfc_board_mode_show, lpfc_board_mode_store);
594static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); 801static CLASS_DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
802static CLASS_DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
803static CLASS_DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
804static CLASS_DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
805static CLASS_DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
806static CLASS_DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
807static CLASS_DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
808static CLASS_DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
595 809
596 810
597static char *lpfc_soft_wwn_key = "C99G71SL8032A"; 811static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -600,8 +814,9 @@ static ssize_t
600lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf, 814lpfc_soft_wwn_enable_store(struct class_device *cdev, const char *buf,
601 size_t count) 815 size_t count)
602{ 816{
603 struct Scsi_Host *host = class_to_shost(cdev); 817 struct Scsi_Host *shost = class_to_shost(cdev);
604 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 818 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
819 struct lpfc_hba *phba = vport->phba;
605 unsigned int cnt = count; 820 unsigned int cnt = count;
606 821
607 /* 822 /*
@@ -634,8 +849,10 @@ static CLASS_DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
634static ssize_t 849static ssize_t
635lpfc_soft_wwpn_show(struct class_device *cdev, char *buf) 850lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
636{ 851{
637 struct Scsi_Host *host = class_to_shost(cdev); 852 struct Scsi_Host *shost = class_to_shost(cdev);
638 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 853 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
854 struct lpfc_hba *phba = vport->phba;
855
639 return snprintf(buf, PAGE_SIZE, "0x%llx\n", 856 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
640 (unsigned long long)phba->cfg_soft_wwpn); 857 (unsigned long long)phba->cfg_soft_wwpn);
641} 858}
@@ -644,8 +861,9 @@ lpfc_soft_wwpn_show(struct class_device *cdev, char *buf)
644static ssize_t 861static ssize_t
645lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count) 862lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
646{ 863{
647 struct Scsi_Host *host = class_to_shost(cdev); 864 struct Scsi_Host *shost = class_to_shost(cdev);
648 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 865 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
866 struct lpfc_hba *phba = vport->phba;
649 struct completion online_compl; 867 struct completion online_compl;
650 int stat1=0, stat2=0; 868 int stat1=0, stat2=0;
651 unsigned int i, j, cnt=count; 869 unsigned int i, j, cnt=count;
@@ -680,9 +898,9 @@ lpfc_soft_wwpn_store(struct class_device *cdev, const char *buf, size_t count)
680 } 898 }
681 } 899 }
682 phba->cfg_soft_wwpn = wwn_to_u64(wwpn); 900 phba->cfg_soft_wwpn = wwn_to_u64(wwpn);
683 fc_host_port_name(host) = phba->cfg_soft_wwpn; 901 fc_host_port_name(shost) = phba->cfg_soft_wwpn;
684 if (phba->cfg_soft_wwnn) 902 if (phba->cfg_soft_wwnn)
685 fc_host_node_name(host) = phba->cfg_soft_wwnn; 903 fc_host_node_name(shost) = phba->cfg_soft_wwnn;
686 904
687 dev_printk(KERN_NOTICE, &phba->pcidev->dev, 905 dev_printk(KERN_NOTICE, &phba->pcidev->dev,
688 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no); 906 "lpfc%d: Reinitializing to use soft_wwpn\n", phba->brd_no);
@@ -777,6 +995,15 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
777static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, 995static CLASS_DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
778 lpfc_poll_show, lpfc_poll_store); 996 lpfc_poll_show, lpfc_poll_store);
779 997
998int lpfc_sli_mode = 0;
999module_param(lpfc_sli_mode, int, 0);
1000MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
1001 " 0 - auto (SLI-3 if supported),"
1002 " 2 - select SLI-2 even on SLI-3 capable HBAs,"
1003 " 3 - select SLI-3");
1004
1005LPFC_ATTR_R(npiv_enable, 0, 0, 1, "Enable NPIV functionality");
1006
780/* 1007/*
781# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear 1008# lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
782# until the timer expires. Value range is [0,255]. Default value is 30. 1009# until the timer expires. Value range is [0,255]. Default value is 30.
@@ -790,8 +1017,9 @@ MODULE_PARM_DESC(lpfc_nodev_tmo,
790static ssize_t 1017static ssize_t
791lpfc_nodev_tmo_show(struct class_device *cdev, char *buf) 1018lpfc_nodev_tmo_show(struct class_device *cdev, char *buf)
792{ 1019{
793 struct Scsi_Host *host = class_to_shost(cdev); 1020 struct Scsi_Host *shost = class_to_shost(cdev);
794 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1021 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1022 struct lpfc_hba *phba = vport->phba;
795 int val = 0; 1023 int val = 0;
796 val = phba->cfg_devloss_tmo; 1024 val = phba->cfg_devloss_tmo;
797 return snprintf(buf, PAGE_SIZE, "%d\n", 1025 return snprintf(buf, PAGE_SIZE, "%d\n",
@@ -832,13 +1060,19 @@ lpfc_nodev_tmo_init(struct lpfc_hba *phba, int val)
832static void 1060static void
833lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba) 1061lpfc_update_rport_devloss_tmo(struct lpfc_hba *phba)
834{ 1062{
1063 struct lpfc_vport *vport;
1064 struct Scsi_Host *shost;
835 struct lpfc_nodelist *ndlp; 1065 struct lpfc_nodelist *ndlp;
836 1066
837 spin_lock_irq(phba->host->host_lock); 1067 list_for_each_entry(vport, &phba->port_list, listentry) {
838 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) 1068 shost = lpfc_shost_from_vport(vport);
839 if (ndlp->rport) 1069 spin_lock_irq(shost->host_lock);
840 ndlp->rport->dev_loss_tmo = phba->cfg_devloss_tmo; 1070 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
841 spin_unlock_irq(phba->host->host_lock); 1071 if (ndlp->rport)
1072 ndlp->rport->dev_loss_tmo =
1073 phba->cfg_devloss_tmo;
1074 spin_unlock_irq(shost->host_lock);
1075 }
842} 1076}
843 1077
844static int 1078static int
@@ -946,6 +1180,33 @@ LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
946 "Max number of FCP commands we can queue to a lpfc HBA"); 1180 "Max number of FCP commands we can queue to a lpfc HBA");
947 1181
948/* 1182/*
1183# peer_port_login: This parameter allows/prevents logins
1184# between peer ports hosted on the same physical port.
1185# When this parameter is set 0 peer ports of same physical port
1186# are not allowed to login to each other.
1187# When this parameter is set 1 peer ports of same physical port
1188# are allowed to login to each other.
1189# Default value of this parameter is 0.
1190*/
1191LPFC_ATTR_R(peer_port_login, 0, 0, 1,
1192 "Allow peer ports on the same physical port to login to each "
1193 "other.");
1194
1195/*
1196# vport_restrict_login: This parameter allows/prevents logins
1197# between Virtual Ports and remote initiators.
1198# When this parameter is not set (0) Virtual Ports will accept PLOGIs from
1199# other initiators and will attempt to PLOGI all remote ports.
1200# When this parameter is set (1) Virtual Ports will reject PLOGIs from
1201# remote ports and will not attempt to PLOGI to other initiators.
1202# This parameter does not restrict to the physical port.
1203# This parameter does not restrict logins to Fabric resident remote ports.
1204# Default value of this parameter is 1.
1205*/
1206LPFC_ATTR_RW(vport_restrict_login, 1, 0, 1,
1207 "Restrict virtual ports login to remote initiators.");
1208
1209/*
949# Some disk devices have a "select ID" or "select Target" capability. 1210# Some disk devices have a "select ID" or "select Target" capability.
950# From a protocol standpoint "select ID" usually means select the 1211# From a protocol standpoint "select ID" usually means select the
951# Fibre channel "ALPA". In the FC-AL Profile there is an "informative 1212# Fibre channel "ALPA". In the FC-AL Profile there is an "informative
@@ -1088,7 +1349,8 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
1088LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible"); 1349LPFC_ATTR_R(use_msi, 0, 0, 1, "Use Message Signaled Interrupts, if possible");
1089 1350
1090 1351
1091struct class_device_attribute *lpfc_host_attrs[] = { 1352
1353struct class_device_attribute *lpfc_hba_attrs[] = {
1092 &class_device_attr_info, 1354 &class_device_attr_info,
1093 &class_device_attr_serialnum, 1355 &class_device_attr_serialnum,
1094 &class_device_attr_modeldesc, 1356 &class_device_attr_modeldesc,
@@ -1104,6 +1366,8 @@ struct class_device_attribute *lpfc_host_attrs[] = {
1104 &class_device_attr_lpfc_log_verbose, 1366 &class_device_attr_lpfc_log_verbose,
1105 &class_device_attr_lpfc_lun_queue_depth, 1367 &class_device_attr_lpfc_lun_queue_depth,
1106 &class_device_attr_lpfc_hba_queue_depth, 1368 &class_device_attr_lpfc_hba_queue_depth,
1369 &class_device_attr_lpfc_peer_port_login,
1370 &class_device_attr_lpfc_vport_restrict_login,
1107 &class_device_attr_lpfc_nodev_tmo, 1371 &class_device_attr_lpfc_nodev_tmo,
1108 &class_device_attr_lpfc_devloss_tmo, 1372 &class_device_attr_lpfc_devloss_tmo,
1109 &class_device_attr_lpfc_fcp_class, 1373 &class_device_attr_lpfc_fcp_class,
@@ -1119,9 +1383,17 @@ struct class_device_attribute *lpfc_host_attrs[] = {
1119 &class_device_attr_lpfc_multi_ring_type, 1383 &class_device_attr_lpfc_multi_ring_type,
1120 &class_device_attr_lpfc_fdmi_on, 1384 &class_device_attr_lpfc_fdmi_on,
1121 &class_device_attr_lpfc_max_luns, 1385 &class_device_attr_lpfc_max_luns,
1386 &class_device_attr_lpfc_npiv_enable,
1122 &class_device_attr_nport_evt_cnt, 1387 &class_device_attr_nport_evt_cnt,
1123 &class_device_attr_management_version, 1388 &class_device_attr_management_version,
1124 &class_device_attr_board_mode, 1389 &class_device_attr_board_mode,
1390 &class_device_attr_max_vpi,
1391 &class_device_attr_used_vpi,
1392 &class_device_attr_max_rpi,
1393 &class_device_attr_used_rpi,
1394 &class_device_attr_max_xri,
1395 &class_device_attr_used_xri,
1396 &class_device_attr_npiv_info,
1125 &class_device_attr_issue_reset, 1397 &class_device_attr_issue_reset,
1126 &class_device_attr_lpfc_poll, 1398 &class_device_attr_lpfc_poll,
1127 &class_device_attr_lpfc_poll_tmo, 1399 &class_device_attr_lpfc_poll_tmo,
@@ -1137,9 +1409,11 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1137 char *buf, loff_t off, size_t count) 1409 char *buf, loff_t off, size_t count)
1138{ 1410{
1139 size_t buf_off; 1411 size_t buf_off;
1140 struct Scsi_Host *host = class_to_shost(container_of(kobj, 1412 struct class_device *cdev = container_of(kobj, struct class_device,
1141 struct class_device, kobj)); 1413 kobj);
1142 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1414 struct Scsi_Host *shost = class_to_shost(cdev);
1415 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1416 struct lpfc_hba *phba = vport->phba;
1143 1417
1144 if ((off + count) > FF_REG_AREA_SIZE) 1418 if ((off + count) > FF_REG_AREA_SIZE)
1145 return -ERANGE; 1419 return -ERANGE;
@@ -1149,18 +1423,16 @@ sysfs_ctlreg_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1149 if (off % 4 || count % 4 || (unsigned long)buf % 4) 1423 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1150 return -EINVAL; 1424 return -EINVAL;
1151 1425
1152 spin_lock_irq(phba->host->host_lock); 1426 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
1153
1154 if (!(phba->fc_flag & FC_OFFLINE_MODE)) {
1155 spin_unlock_irq(phba->host->host_lock);
1156 return -EPERM; 1427 return -EPERM;
1157 } 1428 }
1158 1429
1430 spin_lock_irq(&phba->hbalock);
1159 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) 1431 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t))
1160 writel(*((uint32_t *)(buf + buf_off)), 1432 writel(*((uint32_t *)(buf + buf_off)),
1161 phba->ctrl_regs_memmap_p + off + buf_off); 1433 phba->ctrl_regs_memmap_p + off + buf_off);
1162 1434
1163 spin_unlock_irq(phba->host->host_lock); 1435 spin_unlock_irq(&phba->hbalock);
1164 1436
1165 return count; 1437 return count;
1166} 1438}
@@ -1171,9 +1443,11 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1171{ 1443{
1172 size_t buf_off; 1444 size_t buf_off;
1173 uint32_t * tmp_ptr; 1445 uint32_t * tmp_ptr;
1174 struct Scsi_Host *host = class_to_shost(container_of(kobj, 1446 struct class_device *cdev = container_of(kobj, struct class_device,
1175 struct class_device, kobj)); 1447 kobj);
1176 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1448 struct Scsi_Host *shost = class_to_shost(cdev);
1449 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1450 struct lpfc_hba *phba = vport->phba;
1177 1451
1178 if (off > FF_REG_AREA_SIZE) 1452 if (off > FF_REG_AREA_SIZE)
1179 return -ERANGE; 1453 return -ERANGE;
@@ -1186,14 +1460,14 @@ sysfs_ctlreg_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1186 if (off % 4 || count % 4 || (unsigned long)buf % 4) 1460 if (off % 4 || count % 4 || (unsigned long)buf % 4)
1187 return -EINVAL; 1461 return -EINVAL;
1188 1462
1189 spin_lock_irq(phba->host->host_lock); 1463 spin_lock_irq(&phba->hbalock);
1190 1464
1191 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) { 1465 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
1192 tmp_ptr = (uint32_t *)(buf + buf_off); 1466 tmp_ptr = (uint32_t *)(buf + buf_off);
1193 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off); 1467 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
1194 } 1468 }
1195 1469
1196 spin_unlock_irq(phba->host->host_lock); 1470 spin_unlock_irq(&phba->hbalock);
1197 1471
1198 return count; 1472 return count;
1199} 1473}
@@ -1210,7 +1484,7 @@ static struct bin_attribute sysfs_ctlreg_attr = {
1210 1484
1211 1485
1212static void 1486static void
1213sysfs_mbox_idle (struct lpfc_hba * phba) 1487sysfs_mbox_idle(struct lpfc_hba *phba)
1214{ 1488{
1215 phba->sysfs_mbox.state = SMBOX_IDLE; 1489 phba->sysfs_mbox.state = SMBOX_IDLE;
1216 phba->sysfs_mbox.offset = 0; 1490 phba->sysfs_mbox.offset = 0;
@@ -1226,10 +1500,12 @@ static ssize_t
1226sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, 1500sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1227 char *buf, loff_t off, size_t count) 1501 char *buf, loff_t off, size_t count)
1228{ 1502{
1229 struct Scsi_Host * host = 1503 struct class_device *cdev = container_of(kobj, struct class_device,
1230 class_to_shost(container_of(kobj, struct class_device, kobj)); 1504 kobj);
1231 struct lpfc_hba * phba = (struct lpfc_hba*)host->hostdata; 1505 struct Scsi_Host *shost = class_to_shost(cdev);
1232 struct lpfcMboxq * mbox = NULL; 1506 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1507 struct lpfc_hba *phba = vport->phba;
1508 struct lpfcMboxq *mbox = NULL;
1233 1509
1234 if ((count + off) > MAILBOX_CMD_SIZE) 1510 if ((count + off) > MAILBOX_CMD_SIZE)
1235 return -ERANGE; 1511 return -ERANGE;
@@ -1247,7 +1523,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1247 memset(mbox, 0, sizeof (LPFC_MBOXQ_t)); 1523 memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
1248 } 1524 }
1249 1525
1250 spin_lock_irq(host->host_lock); 1526 spin_lock_irq(&phba->hbalock);
1251 1527
1252 if (off == 0) { 1528 if (off == 0) {
1253 if (phba->sysfs_mbox.mbox) 1529 if (phba->sysfs_mbox.mbox)
@@ -1258,9 +1534,9 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1258 } else { 1534 } else {
1259 if (phba->sysfs_mbox.state != SMBOX_WRITING || 1535 if (phba->sysfs_mbox.state != SMBOX_WRITING ||
1260 phba->sysfs_mbox.offset != off || 1536 phba->sysfs_mbox.offset != off ||
1261 phba->sysfs_mbox.mbox == NULL ) { 1537 phba->sysfs_mbox.mbox == NULL) {
1262 sysfs_mbox_idle(phba); 1538 sysfs_mbox_idle(phba);
1263 spin_unlock_irq(host->host_lock); 1539 spin_unlock_irq(&phba->hbalock);
1264 return -EAGAIN; 1540 return -EAGAIN;
1265 } 1541 }
1266 } 1542 }
@@ -1270,7 +1546,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
1270 1546
1271 phba->sysfs_mbox.offset = off + count; 1547 phba->sysfs_mbox.offset = off + count;
1272 1548
1273 spin_unlock_irq(host->host_lock); 1549 spin_unlock_irq(&phba->hbalock);
1274 1550
1275 return count; 1551 return count;
1276} 1552}
@@ -1279,10 +1555,11 @@ static ssize_t
1279sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, 1555sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1280 char *buf, loff_t off, size_t count) 1556 char *buf, loff_t off, size_t count)
1281{ 1557{
1282 struct Scsi_Host *host = 1558 struct class_device *cdev = container_of(kobj, struct class_device,
1283 class_to_shost(container_of(kobj, struct class_device, 1559 kobj);
1284 kobj)); 1560 struct Scsi_Host *shost = class_to_shost(cdev);
1285 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1561 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1562 struct lpfc_hba *phba = vport->phba;
1286 int rc; 1563 int rc;
1287 1564
1288 if (off > MAILBOX_CMD_SIZE) 1565 if (off > MAILBOX_CMD_SIZE)
@@ -1297,7 +1574,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1297 if (off && count == 0) 1574 if (off && count == 0)
1298 return 0; 1575 return 0;
1299 1576
1300 spin_lock_irq(phba->host->host_lock); 1577 spin_lock_irq(&phba->hbalock);
1301 1578
1302 if (off == 0 && 1579 if (off == 0 &&
1303 phba->sysfs_mbox.state == SMBOX_WRITING && 1580 phba->sysfs_mbox.state == SMBOX_WRITING &&
@@ -1320,12 +1597,12 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1320 case MBX_SET_MASK: 1597 case MBX_SET_MASK:
1321 case MBX_SET_SLIM: 1598 case MBX_SET_SLIM:
1322 case MBX_SET_DEBUG: 1599 case MBX_SET_DEBUG:
1323 if (!(phba->fc_flag & FC_OFFLINE_MODE)) { 1600 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
1324 printk(KERN_WARNING "mbox_read:Command 0x%x " 1601 printk(KERN_WARNING "mbox_read:Command 0x%x "
1325 "is illegal in on-line state\n", 1602 "is illegal in on-line state\n",
1326 phba->sysfs_mbox.mbox->mb.mbxCommand); 1603 phba->sysfs_mbox.mbox->mb.mbxCommand);
1327 sysfs_mbox_idle(phba); 1604 sysfs_mbox_idle(phba);
1328 spin_unlock_irq(phba->host->host_lock); 1605 spin_unlock_irq(&phba->hbalock);
1329 return -EPERM; 1606 return -EPERM;
1330 } 1607 }
1331 case MBX_LOAD_SM: 1608 case MBX_LOAD_SM:
@@ -1355,48 +1632,48 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1355 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", 1632 printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
1356 phba->sysfs_mbox.mbox->mb.mbxCommand); 1633 phba->sysfs_mbox.mbox->mb.mbxCommand);
1357 sysfs_mbox_idle(phba); 1634 sysfs_mbox_idle(phba);
1358 spin_unlock_irq(phba->host->host_lock); 1635 spin_unlock_irq(&phba->hbalock);
1359 return -EPERM; 1636 return -EPERM;
1360 default: 1637 default:
1361 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", 1638 printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
1362 phba->sysfs_mbox.mbox->mb.mbxCommand); 1639 phba->sysfs_mbox.mbox->mb.mbxCommand);
1363 sysfs_mbox_idle(phba); 1640 sysfs_mbox_idle(phba);
1364 spin_unlock_irq(phba->host->host_lock); 1641 spin_unlock_irq(&phba->hbalock);
1365 return -EPERM; 1642 return -EPERM;
1366 } 1643 }
1367 1644
1368 if (phba->fc_flag & FC_BLOCK_MGMT_IO) { 1645 phba->sysfs_mbox.mbox->vport = vport;
1646
1647 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
1369 sysfs_mbox_idle(phba); 1648 sysfs_mbox_idle(phba);
1370 spin_unlock_irq(host->host_lock); 1649 spin_unlock_irq(&phba->hbalock);
1371 return -EAGAIN; 1650 return -EAGAIN;
1372 } 1651 }
1373 1652
1374 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1653 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1375 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ 1654 (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
1376 1655
1377 spin_unlock_irq(phba->host->host_lock); 1656 spin_unlock_irq(&phba->hbalock);
1378 rc = lpfc_sli_issue_mbox (phba, 1657 rc = lpfc_sli_issue_mbox (phba,
1379 phba->sysfs_mbox.mbox, 1658 phba->sysfs_mbox.mbox,
1380 MBX_POLL); 1659 MBX_POLL);
1381 spin_lock_irq(phba->host->host_lock); 1660 spin_lock_irq(&phba->hbalock);
1382 1661
1383 } else { 1662 } else {
1384 spin_unlock_irq(phba->host->host_lock); 1663 spin_unlock_irq(&phba->hbalock);
1385 rc = lpfc_sli_issue_mbox_wait (phba, 1664 rc = lpfc_sli_issue_mbox_wait (phba,
1386 phba->sysfs_mbox.mbox, 1665 phba->sysfs_mbox.mbox,
1387 lpfc_mbox_tmo_val(phba, 1666 lpfc_mbox_tmo_val(phba,
1388 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); 1667 phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
1389 spin_lock_irq(phba->host->host_lock); 1668 spin_lock_irq(&phba->hbalock);
1390 } 1669 }
1391 1670
1392 if (rc != MBX_SUCCESS) { 1671 if (rc != MBX_SUCCESS) {
1393 if (rc == MBX_TIMEOUT) { 1672 if (rc == MBX_TIMEOUT) {
1394 phba->sysfs_mbox.mbox->mbox_cmpl =
1395 lpfc_sli_def_mbox_cmpl;
1396 phba->sysfs_mbox.mbox = NULL; 1673 phba->sysfs_mbox.mbox = NULL;
1397 } 1674 }
1398 sysfs_mbox_idle(phba); 1675 sysfs_mbox_idle(phba);
1399 spin_unlock_irq(host->host_lock); 1676 spin_unlock_irq(&phba->hbalock);
1400 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; 1677 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
1401 } 1678 }
1402 phba->sysfs_mbox.state = SMBOX_READING; 1679 phba->sysfs_mbox.state = SMBOX_READING;
@@ -1405,7 +1682,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1405 phba->sysfs_mbox.state != SMBOX_READING) { 1682 phba->sysfs_mbox.state != SMBOX_READING) {
1406 printk(KERN_WARNING "mbox_read: Bad State\n"); 1683 printk(KERN_WARNING "mbox_read: Bad State\n");
1407 sysfs_mbox_idle(phba); 1684 sysfs_mbox_idle(phba);
1408 spin_unlock_irq(host->host_lock); 1685 spin_unlock_irq(&phba->hbalock);
1409 return -EAGAIN; 1686 return -EAGAIN;
1410 } 1687 }
1411 1688
@@ -1416,7 +1693,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
1416 if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE) 1693 if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
1417 sysfs_mbox_idle(phba); 1694 sysfs_mbox_idle(phba);
1418 1695
1419 spin_unlock_irq(phba->host->host_lock); 1696 spin_unlock_irq(&phba->hbalock);
1420 1697
1421 return count; 1698 return count;
1422} 1699}
@@ -1432,35 +1709,35 @@ static struct bin_attribute sysfs_mbox_attr = {
1432}; 1709};
1433 1710
1434int 1711int
1435lpfc_alloc_sysfs_attr(struct lpfc_hba *phba) 1712lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
1436{ 1713{
1437 struct Scsi_Host *host = phba->host; 1714 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1438 int error; 1715 int error;
1439 1716
1440 error = sysfs_create_bin_file(&host->shost_classdev.kobj, 1717 error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
1441 &sysfs_ctlreg_attr); 1718 &sysfs_ctlreg_attr);
1442 if (error) 1719 if (error)
1443 goto out; 1720 goto out;
1444 1721
1445 error = sysfs_create_bin_file(&host->shost_classdev.kobj, 1722 error = sysfs_create_bin_file(&shost->shost_classdev.kobj,
1446 &sysfs_mbox_attr); 1723 &sysfs_mbox_attr);
1447 if (error) 1724 if (error)
1448 goto out_remove_ctlreg_attr; 1725 goto out_remove_ctlreg_attr;
1449 1726
1450 return 0; 1727 return 0;
1451out_remove_ctlreg_attr: 1728out_remove_ctlreg_attr:
1452 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr); 1729 sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
1453out: 1730out:
1454 return error; 1731 return error;
1455} 1732}
1456 1733
1457void 1734void
1458lpfc_free_sysfs_attr(struct lpfc_hba *phba) 1735lpfc_free_sysfs_attr(struct lpfc_vport *vport)
1459{ 1736{
1460 struct Scsi_Host *host = phba->host; 1737 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1461 1738
1462 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_mbox_attr); 1739 sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_mbox_attr);
1463 sysfs_remove_bin_file(&host->shost_classdev.kobj, &sysfs_ctlreg_attr); 1740 sysfs_remove_bin_file(&shost->shost_classdev.kobj, &sysfs_ctlreg_attr);
1464} 1741}
1465 1742
1466 1743
@@ -1471,26 +1748,30 @@ lpfc_free_sysfs_attr(struct lpfc_hba *phba)
1471static void 1748static void
1472lpfc_get_host_port_id(struct Scsi_Host *shost) 1749lpfc_get_host_port_id(struct Scsi_Host *shost)
1473{ 1750{
1474 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1751 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1752
1475 /* note: fc_myDID already in cpu endianness */ 1753 /* note: fc_myDID already in cpu endianness */
1476 fc_host_port_id(shost) = phba->fc_myDID; 1754 fc_host_port_id(shost) = vport->fc_myDID;
1477} 1755}
1478 1756
1479static void 1757static void
1480lpfc_get_host_port_type(struct Scsi_Host *shost) 1758lpfc_get_host_port_type(struct Scsi_Host *shost)
1481{ 1759{
1482 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1760 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1761 struct lpfc_hba *phba = vport->phba;
1483 1762
1484 spin_lock_irq(shost->host_lock); 1763 spin_lock_irq(shost->host_lock);
1485 1764
1486 if (phba->hba_state == LPFC_HBA_READY) { 1765 if (vport->port_type == LPFC_NPIV_PORT) {
1766 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1767 } else if (lpfc_is_link_up(phba)) {
1487 if (phba->fc_topology == TOPOLOGY_LOOP) { 1768 if (phba->fc_topology == TOPOLOGY_LOOP) {
1488 if (phba->fc_flag & FC_PUBLIC_LOOP) 1769 if (vport->fc_flag & FC_PUBLIC_LOOP)
1489 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 1770 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
1490 else 1771 else
1491 fc_host_port_type(shost) = FC_PORTTYPE_LPORT; 1772 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
1492 } else { 1773 } else {
1493 if (phba->fc_flag & FC_FABRIC) 1774 if (vport->fc_flag & FC_FABRIC)
1494 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 1775 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
1495 else 1776 else
1496 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 1777 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
@@ -1504,29 +1785,20 @@ lpfc_get_host_port_type(struct Scsi_Host *shost)
1504static void 1785static void
1505lpfc_get_host_port_state(struct Scsi_Host *shost) 1786lpfc_get_host_port_state(struct Scsi_Host *shost)
1506{ 1787{
1507 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1788 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1789 struct lpfc_hba *phba = vport->phba;
1508 1790
1509 spin_lock_irq(shost->host_lock); 1791 spin_lock_irq(shost->host_lock);
1510 1792
1511 if (phba->fc_flag & FC_OFFLINE_MODE) 1793 if (vport->fc_flag & FC_OFFLINE_MODE)
1512 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 1794 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1513 else { 1795 else {
1514 switch (phba->hba_state) { 1796 switch (phba->link_state) {
1515 case LPFC_STATE_UNKNOWN: 1797 case LPFC_LINK_UNKNOWN:
1516 case LPFC_WARM_START:
1517 case LPFC_INIT_START:
1518 case LPFC_INIT_MBX_CMDS:
1519 case LPFC_LINK_DOWN: 1798 case LPFC_LINK_DOWN:
1520 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 1799 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1521 break; 1800 break;
1522 case LPFC_LINK_UP: 1801 case LPFC_LINK_UP:
1523 case LPFC_LOCAL_CFG_LINK:
1524 case LPFC_FLOGI:
1525 case LPFC_FABRIC_CFG_LINK:
1526 case LPFC_NS_REG:
1527 case LPFC_NS_QRY:
1528 case LPFC_BUILD_DISC_LIST:
1529 case LPFC_DISC_AUTH:
1530 case LPFC_CLEAR_LA: 1802 case LPFC_CLEAR_LA:
1531 case LPFC_HBA_READY: 1803 case LPFC_HBA_READY:
1532 /* Links up, beyond this port_type reports state */ 1804 /* Links up, beyond this port_type reports state */
@@ -1547,11 +1819,12 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
1547static void 1819static void
1548lpfc_get_host_speed(struct Scsi_Host *shost) 1820lpfc_get_host_speed(struct Scsi_Host *shost)
1549{ 1821{
1550 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1822 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1823 struct lpfc_hba *phba = vport->phba;
1551 1824
1552 spin_lock_irq(shost->host_lock); 1825 spin_lock_irq(shost->host_lock);
1553 1826
1554 if (phba->hba_state == LPFC_HBA_READY) { 1827 if (lpfc_is_link_up(phba)) {
1555 switch(phba->fc_linkspeed) { 1828 switch(phba->fc_linkspeed) {
1556 case LA_1GHZ_LINK: 1829 case LA_1GHZ_LINK:
1557 fc_host_speed(shost) = FC_PORTSPEED_1GBIT; 1830 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
@@ -1577,39 +1850,31 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
1577static void 1850static void
1578lpfc_get_host_fabric_name (struct Scsi_Host *shost) 1851lpfc_get_host_fabric_name (struct Scsi_Host *shost)
1579{ 1852{
1580 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata; 1853 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1854 struct lpfc_hba *phba = vport->phba;
1581 u64 node_name; 1855 u64 node_name;
1582 1856
1583 spin_lock_irq(shost->host_lock); 1857 spin_lock_irq(shost->host_lock);
1584 1858
1585 if ((phba->fc_flag & FC_FABRIC) || 1859 if ((vport->fc_flag & FC_FABRIC) ||
1586 ((phba->fc_topology == TOPOLOGY_LOOP) && 1860 ((phba->fc_topology == TOPOLOGY_LOOP) &&
1587 (phba->fc_flag & FC_PUBLIC_LOOP))) 1861 (vport->fc_flag & FC_PUBLIC_LOOP)))
1588 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 1862 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
1589 else 1863 else
1590 /* fabric is local port if there is no F/FL_Port */ 1864 /* fabric is local port if there is no F/FL_Port */
1591 node_name = wwn_to_u64(phba->fc_nodename.u.wwn); 1865 node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1592 1866
1593 spin_unlock_irq(shost->host_lock); 1867 spin_unlock_irq(shost->host_lock);
1594 1868
1595 fc_host_fabric_name(shost) = node_name; 1869 fc_host_fabric_name(shost) = node_name;
1596} 1870}
1597 1871
1598static void
1599lpfc_get_host_symbolic_name (struct Scsi_Host *shost)
1600{
1601 struct lpfc_hba *phba = (struct lpfc_hba*)shost->hostdata;
1602
1603 spin_lock_irq(shost->host_lock);
1604 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost));
1605 spin_unlock_irq(shost->host_lock);
1606}
1607
1608static struct fc_host_statistics * 1872static struct fc_host_statistics *
1609lpfc_get_stats(struct Scsi_Host *shost) 1873lpfc_get_stats(struct Scsi_Host *shost)
1610{ 1874{
1611 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1875 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1612 struct lpfc_sli *psli = &phba->sli; 1876 struct lpfc_hba *phba = vport->phba;
1877 struct lpfc_sli *psli = &phba->sli;
1613 struct fc_host_statistics *hs = &phba->link_stats; 1878 struct fc_host_statistics *hs = &phba->link_stats;
1614 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 1879 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
1615 LPFC_MBOXQ_t *pmboxq; 1880 LPFC_MBOXQ_t *pmboxq;
@@ -1617,7 +1882,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
1617 unsigned long seconds; 1882 unsigned long seconds;
1618 int rc = 0; 1883 int rc = 0;
1619 1884
1620 if (phba->fc_flag & FC_BLOCK_MGMT_IO) 1885 /*
1886 * prevent udev from issuing mailbox commands until the port is
1887 * configured.
1888 */
1889 if (phba->link_state < LPFC_LINK_DOWN ||
1890 !phba->mbox_mem_pool ||
1891 (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
1892 return NULL;
1893
1894 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1621 return NULL; 1895 return NULL;
1622 1896
1623 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1897 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1629,17 +1903,16 @@ lpfc_get_stats(struct Scsi_Host *shost)
1629 pmb->mbxCommand = MBX_READ_STATUS; 1903 pmb->mbxCommand = MBX_READ_STATUS;
1630 pmb->mbxOwner = OWN_HOST; 1904 pmb->mbxOwner = OWN_HOST;
1631 pmboxq->context1 = NULL; 1905 pmboxq->context1 = NULL;
1906 pmboxq->vport = vport;
1632 1907
1633 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1908 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1634 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1909 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1635 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 1910 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1636 else 1911 else
1637 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1912 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1638 1913
1639 if (rc != MBX_SUCCESS) { 1914 if (rc != MBX_SUCCESS) {
1640 if (rc == MBX_TIMEOUT) 1915 if (rc != MBX_TIMEOUT)
1641 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1642 else
1643 mempool_free(pmboxq, phba->mbox_mem_pool); 1916 mempool_free(pmboxq, phba->mbox_mem_pool);
1644 return NULL; 1917 return NULL;
1645 } 1918 }
@@ -1655,18 +1928,17 @@ lpfc_get_stats(struct Scsi_Host *shost)
1655 pmb->mbxCommand = MBX_READ_LNK_STAT; 1928 pmb->mbxCommand = MBX_READ_LNK_STAT;
1656 pmb->mbxOwner = OWN_HOST; 1929 pmb->mbxOwner = OWN_HOST;
1657 pmboxq->context1 = NULL; 1930 pmboxq->context1 = NULL;
1931 pmboxq->vport = vport;
1658 1932
1659 if ((phba->fc_flag & FC_OFFLINE_MODE) || 1933 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1660 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 1934 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1661 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 1935 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1662 else 1936 else
1663 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 1937 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1664 1938
1665 if (rc != MBX_SUCCESS) { 1939 if (rc != MBX_SUCCESS) {
1666 if (rc == MBX_TIMEOUT) 1940 if (rc != MBX_TIMEOUT)
1667 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1941 mempool_free(pmboxq, phba->mbox_mem_pool);
1668 else
1669 mempool_free( pmboxq, phba->mbox_mem_pool);
1670 return NULL; 1942 return NULL;
1671 } 1943 }
1672 1944
@@ -1713,14 +1985,15 @@ lpfc_get_stats(struct Scsi_Host *shost)
1713static void 1985static void
1714lpfc_reset_stats(struct Scsi_Host *shost) 1986lpfc_reset_stats(struct Scsi_Host *shost)
1715{ 1987{
1716 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1988 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1717 struct lpfc_sli *psli = &phba->sli; 1989 struct lpfc_hba *phba = vport->phba;
1718 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; 1990 struct lpfc_sli *psli = &phba->sli;
1991 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
1719 LPFC_MBOXQ_t *pmboxq; 1992 LPFC_MBOXQ_t *pmboxq;
1720 MAILBOX_t *pmb; 1993 MAILBOX_t *pmb;
1721 int rc = 0; 1994 int rc = 0;
1722 1995
1723 if (phba->fc_flag & FC_BLOCK_MGMT_IO) 1996 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
1724 return; 1997 return;
1725 1998
1726 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1999 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1733,17 +2006,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1733 pmb->mbxOwner = OWN_HOST; 2006 pmb->mbxOwner = OWN_HOST;
1734 pmb->un.varWords[0] = 0x1; /* reset request */ 2007 pmb->un.varWords[0] = 0x1; /* reset request */
1735 pmboxq->context1 = NULL; 2008 pmboxq->context1 = NULL;
2009 pmboxq->vport = vport;
1736 2010
1737 if ((phba->fc_flag & FC_OFFLINE_MODE) || 2011 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1738 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 2012 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1739 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2013 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1740 else 2014 else
1741 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 2015 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1742 2016
1743 if (rc != MBX_SUCCESS) { 2017 if (rc != MBX_SUCCESS) {
1744 if (rc == MBX_TIMEOUT) 2018 if (rc != MBX_TIMEOUT)
1745 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1746 else
1747 mempool_free(pmboxq, phba->mbox_mem_pool); 2019 mempool_free(pmboxq, phba->mbox_mem_pool);
1748 return; 2020 return;
1749 } 2021 }
@@ -1752,17 +2024,16 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1752 pmb->mbxCommand = MBX_READ_LNK_STAT; 2024 pmb->mbxCommand = MBX_READ_LNK_STAT;
1753 pmb->mbxOwner = OWN_HOST; 2025 pmb->mbxOwner = OWN_HOST;
1754 pmboxq->context1 = NULL; 2026 pmboxq->context1 = NULL;
2027 pmboxq->vport = vport;
1755 2028
1756 if ((phba->fc_flag & FC_OFFLINE_MODE) || 2029 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1757 (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) 2030 (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
1758 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2031 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
1759 else 2032 else
1760 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); 2033 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
1761 2034
1762 if (rc != MBX_SUCCESS) { 2035 if (rc != MBX_SUCCESS) {
1763 if (rc == MBX_TIMEOUT) 2036 if (rc != MBX_TIMEOUT)
1764 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1765 else
1766 mempool_free( pmboxq, phba->mbox_mem_pool); 2037 mempool_free( pmboxq, phba->mbox_mem_pool);
1767 return; 2038 return;
1768 } 2039 }
@@ -1791,13 +2062,13 @@ lpfc_reset_stats(struct Scsi_Host *shost)
1791static struct lpfc_nodelist * 2062static struct lpfc_nodelist *
1792lpfc_get_node_by_target(struct scsi_target *starget) 2063lpfc_get_node_by_target(struct scsi_target *starget)
1793{ 2064{
1794 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2065 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1795 struct lpfc_hba *phba = (struct lpfc_hba *) shost->hostdata; 2066 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1796 struct lpfc_nodelist *ndlp; 2067 struct lpfc_nodelist *ndlp;
1797 2068
1798 spin_lock_irq(shost->host_lock); 2069 spin_lock_irq(shost->host_lock);
1799 /* Search for this, mapped, target ID */ 2070 /* Search for this, mapped, target ID */
1800 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2071 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1801 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 2072 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1802 starget->id == ndlp->nlp_sid) { 2073 starget->id == ndlp->nlp_sid) {
1803 spin_unlock_irq(shost->host_lock); 2074 spin_unlock_irq(shost->host_lock);
@@ -1887,8 +2158,66 @@ struct fc_function_template lpfc_transport_functions = {
1887 .get_host_fabric_name = lpfc_get_host_fabric_name, 2158 .get_host_fabric_name = lpfc_get_host_fabric_name,
1888 .show_host_fabric_name = 1, 2159 .show_host_fabric_name = 1,
1889 2160
1890 .get_host_symbolic_name = lpfc_get_host_symbolic_name, 2161 /*
1891 .show_host_symbolic_name = 1, 2162 * The LPFC driver treats linkdown handling as target loss events
2163 * so there are no sysfs handlers for link_down_tmo.
2164 */
2165
2166 .get_fc_host_stats = lpfc_get_stats,
2167 .reset_fc_host_stats = lpfc_reset_stats,
2168
2169 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
2170 .show_rport_maxframe_size = 1,
2171 .show_rport_supported_classes = 1,
2172
2173 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
2174 .show_rport_dev_loss_tmo = 1,
2175
2176 .get_starget_port_id = lpfc_get_starget_port_id,
2177 .show_starget_port_id = 1,
2178
2179 .get_starget_node_name = lpfc_get_starget_node_name,
2180 .show_starget_node_name = 1,
2181
2182 .get_starget_port_name = lpfc_get_starget_port_name,
2183 .show_starget_port_name = 1,
2184
2185 .issue_fc_host_lip = lpfc_issue_lip,
2186 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
2187 .terminate_rport_io = lpfc_terminate_rport_io,
2188
2189 .vport_create = lpfc_vport_create,
2190 .vport_delete = lpfc_vport_delete,
2191 .dd_fcvport_size = sizeof(struct lpfc_vport *),
2192};
2193
2194struct fc_function_template lpfc_vport_transport_functions = {
2195 /* fixed attributes the driver supports */
2196 .show_host_node_name = 1,
2197 .show_host_port_name = 1,
2198 .show_host_supported_classes = 1,
2199 .show_host_supported_fc4s = 1,
2200 .show_host_supported_speeds = 1,
2201 .show_host_maxframe_size = 1,
2202
2203 /* dynamic attributes the driver supports */
2204 .get_host_port_id = lpfc_get_host_port_id,
2205 .show_host_port_id = 1,
2206
2207 .get_host_port_type = lpfc_get_host_port_type,
2208 .show_host_port_type = 1,
2209
2210 .get_host_port_state = lpfc_get_host_port_state,
2211 .show_host_port_state = 1,
2212
2213 /* active_fc4s is shown but doesn't change (thus no get function) */
2214 .show_host_active_fc4s = 1,
2215
2216 .get_host_speed = lpfc_get_host_speed,
2217 .show_host_speed = 1,
2218
2219 .get_host_fabric_name = lpfc_get_host_fabric_name,
2220 .show_host_fabric_name = 1,
1892 2221
1893 /* 2222 /*
1894 * The LPFC driver treats linkdown handling as target loss events 2223 * The LPFC driver treats linkdown handling as target loss events
@@ -1917,6 +2246,8 @@ struct fc_function_template lpfc_transport_functions = {
1917 .issue_fc_host_lip = lpfc_issue_lip, 2246 .issue_fc_host_lip = lpfc_issue_lip,
1918 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk, 2247 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
1919 .terminate_rport_io = lpfc_terminate_rport_io, 2248 .terminate_rport_io = lpfc_terminate_rport_io,
2249
2250 .vport_disable = lpfc_vport_disable,
1920}; 2251};
1921 2252
1922void 2253void
@@ -1939,6 +2270,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
1939 lpfc_discovery_threads_init(phba, lpfc_discovery_threads); 2270 lpfc_discovery_threads_init(phba, lpfc_discovery_threads);
1940 lpfc_max_luns_init(phba, lpfc_max_luns); 2271 lpfc_max_luns_init(phba, lpfc_max_luns);
1941 lpfc_poll_tmo_init(phba, lpfc_poll_tmo); 2272 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
2273 lpfc_peer_port_login_init(phba, lpfc_peer_port_login);
2274 lpfc_npiv_enable_init(phba, lpfc_npiv_enable);
2275 lpfc_vport_restrict_login_init(phba, lpfc_vport_restrict_login);
1942 lpfc_use_msi_init(phba, lpfc_use_msi); 2276 lpfc_use_msi_init(phba, lpfc_use_msi);
1943 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo); 2277 lpfc_devloss_tmo_init(phba, lpfc_devloss_tmo);
1944 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo); 2278 lpfc_nodev_tmo_init(phba, lpfc_nodev_tmo);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index b8c2a8862d8c..e19d1a746586 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -23,92 +23,114 @@ typedef int (*node_filter)(struct lpfc_nodelist *ndlp, void *param);
23struct fc_rport; 23struct fc_rport;
24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); 24void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); 25void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
26void lpfc_heart_beat(struct lpfc_hba *, LPFC_MBOXQ_t *);
26int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 27int lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
27 struct lpfc_dmabuf *mp); 28 struct lpfc_dmabuf *mp);
28void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 29void lpfc_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
30void lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport);
29void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); 31void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
30int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 32int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
31void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); 33void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
32void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); 34void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
33int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, 35int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
34 uint32_t); 36 LPFC_MBOXQ_t *, uint32_t);
35void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 37void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
36void lpfc_unreg_did(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); 38void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
39void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
40void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
37void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); 41void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
38 42
39 43void lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove);
40int lpfc_linkdown(struct lpfc_hba *); 44int lpfc_linkdown(struct lpfc_hba *);
41void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 45void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
42 46
43void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *); 47void lpfc_mbx_cmpl_clear_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
44void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 48void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
49void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
45void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 50void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
46void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 51void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
47void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); 52void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
48void lpfc_dequeue_node(struct lpfc_hba *, struct lpfc_nodelist *); 53void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
49void lpfc_nlp_set_state(struct lpfc_hba *, struct lpfc_nodelist *, int); 54void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int);
50void lpfc_drop_node(struct lpfc_hba *, struct lpfc_nodelist *); 55void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *);
51void lpfc_set_disctmo(struct lpfc_hba *); 56void lpfc_set_disctmo(struct lpfc_vport *);
52int lpfc_can_disctmo(struct lpfc_hba *); 57int lpfc_can_disctmo(struct lpfc_vport *);
53int lpfc_unreg_rpi(struct lpfc_hba *, struct lpfc_nodelist *); 58int lpfc_unreg_rpi(struct lpfc_vport *, struct lpfc_nodelist *);
59void lpfc_unreg_all_rpis(struct lpfc_vport *);
60void lpfc_unreg_default_rpis(struct lpfc_vport *);
61void lpfc_issue_reg_vpi(struct lpfc_hba *, struct lpfc_vport *);
62
54int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *, 63int lpfc_check_sli_ndlp(struct lpfc_hba *, struct lpfc_sli_ring *,
55 struct lpfc_iocbq *, struct lpfc_nodelist *); 64 struct lpfc_iocbq *, struct lpfc_nodelist *);
56void lpfc_nlp_init(struct lpfc_hba *, struct lpfc_nodelist *, uint32_t); 65void lpfc_nlp_init(struct lpfc_vport *, struct lpfc_nodelist *, uint32_t);
57struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *); 66struct lpfc_nodelist *lpfc_nlp_get(struct lpfc_nodelist *);
58int lpfc_nlp_put(struct lpfc_nodelist *); 67int lpfc_nlp_put(struct lpfc_nodelist *);
59struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_hba *, uint32_t); 68struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t);
60void lpfc_disc_list_loopmap(struct lpfc_hba *); 69void lpfc_disc_list_loopmap(struct lpfc_vport *);
61void lpfc_disc_start(struct lpfc_hba *); 70void lpfc_disc_start(struct lpfc_vport *);
62void lpfc_disc_flush_list(struct lpfc_hba *); 71void lpfc_disc_flush_list(struct lpfc_vport *);
72void lpfc_cleanup_discovery_resources(struct lpfc_vport *);
63void lpfc_disc_timeout(unsigned long); 73void lpfc_disc_timeout(unsigned long);
64 74
65struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); 75struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
66struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi); 76struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
67 77
78void lpfc_worker_wake_up(struct lpfc_hba *);
68int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t); 79int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
69int lpfc_do_work(void *); 80int lpfc_do_work(void *);
70int lpfc_disc_state_machine(struct lpfc_hba *, struct lpfc_nodelist *, void *, 81int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
71 uint32_t); 82 uint32_t);
72 83
73int lpfc_check_sparm(struct lpfc_hba *, struct lpfc_nodelist *, 84void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
85 struct lpfc_nodelist *);
86void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
87int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
74 struct serv_parm *, uint32_t); 88 struct serv_parm *, uint32_t);
75int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist * ndlp); 89int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
90int lpfc_els_chk_latt(struct lpfc_vport *);
76int lpfc_els_abort_flogi(struct lpfc_hba *); 91int lpfc_els_abort_flogi(struct lpfc_hba *);
77int lpfc_initial_flogi(struct lpfc_hba *); 92int lpfc_initial_flogi(struct lpfc_vport *);
78int lpfc_issue_els_plogi(struct lpfc_hba *, uint32_t, uint8_t); 93int lpfc_initial_fdisc(struct lpfc_vport *);
79int lpfc_issue_els_prli(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); 94int lpfc_issue_els_fdisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
80int lpfc_issue_els_adisc(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); 95int lpfc_issue_els_plogi(struct lpfc_vport *, uint32_t, uint8_t);
81int lpfc_issue_els_logo(struct lpfc_hba *, struct lpfc_nodelist *, uint8_t); 96int lpfc_issue_els_prli(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
82int lpfc_issue_els_scr(struct lpfc_hba *, uint32_t, uint8_t); 97int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
98int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
99int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
100int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
83int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); 101int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
84int lpfc_els_rsp_acc(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, 102int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
103int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
85 struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t); 104 struct lpfc_nodelist *, LPFC_MBOXQ_t *, uint8_t);
86int lpfc_els_rsp_reject(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, 105int lpfc_els_rsp_reject(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
87 struct lpfc_nodelist *); 106 struct lpfc_nodelist *, LPFC_MBOXQ_t *);
88int lpfc_els_rsp_adisc_acc(struct lpfc_hba *, struct lpfc_iocbq *, 107int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *,
89 struct lpfc_nodelist *); 108 struct lpfc_nodelist *);
90int lpfc_els_rsp_prli_acc(struct lpfc_hba *, struct lpfc_iocbq *, 109int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *,
91 struct lpfc_nodelist *); 110 struct lpfc_nodelist *);
92void lpfc_cancel_retry_delay_tmo(struct lpfc_hba *, struct lpfc_nodelist *); 111void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *);
93void lpfc_els_retry_delay(unsigned long); 112void lpfc_els_retry_delay(unsigned long);
94void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); 113void lpfc_els_retry_delay_handler(struct lpfc_nodelist *);
114void lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *);
95void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 115void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
96 struct lpfc_iocbq *); 116 struct lpfc_iocbq *);
97int lpfc_els_handle_rscn(struct lpfc_hba *); 117int lpfc_els_handle_rscn(struct lpfc_vport *);
98int lpfc_els_flush_rscn(struct lpfc_hba *); 118void lpfc_els_flush_rscn(struct lpfc_vport *);
99int lpfc_rscn_payload_check(struct lpfc_hba *, uint32_t); 119int lpfc_rscn_payload_check(struct lpfc_vport *, uint32_t);
100void lpfc_els_flush_cmd(struct lpfc_hba *); 120void lpfc_els_flush_cmd(struct lpfc_vport *);
101int lpfc_els_disc_adisc(struct lpfc_hba *); 121int lpfc_els_disc_adisc(struct lpfc_vport *);
102int lpfc_els_disc_plogi(struct lpfc_hba *); 122int lpfc_els_disc_plogi(struct lpfc_vport *);
103void lpfc_els_timeout(unsigned long); 123void lpfc_els_timeout(unsigned long);
104void lpfc_els_timeout_handler(struct lpfc_hba *); 124void lpfc_els_timeout_handler(struct lpfc_vport *);
125void lpfc_hb_timeout(unsigned long);
126void lpfc_hb_timeout_handler(struct lpfc_hba *);
105 127
106void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, 128void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
107 struct lpfc_iocbq *); 129 struct lpfc_iocbq *);
108int lpfc_ns_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int); 130int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
109int lpfc_fdmi_cmd(struct lpfc_hba *, struct lpfc_nodelist *, int); 131int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
110void lpfc_fdmi_tmo(unsigned long); 132void lpfc_fdmi_tmo(unsigned long);
111void lpfc_fdmi_tmo_handler(struct lpfc_hba *); 133void lpfc_fdmi_timeout_handler(struct lpfc_vport *vport);
112 134
113int lpfc_config_port_prep(struct lpfc_hba *); 135int lpfc_config_port_prep(struct lpfc_hba *);
114int lpfc_config_port_post(struct lpfc_hba *); 136int lpfc_config_port_post(struct lpfc_hba *);
@@ -136,16 +158,23 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
136void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); 158void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
137void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 159void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
138LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); 160LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
161void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
139int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 162int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
140 163
164void lpfc_config_hbq(struct lpfc_hba *, struct lpfc_hbq_init *, uint32_t ,
165 LPFC_MBOXQ_t *);
166struct lpfc_hbq_entry * lpfc_sli_next_hbq_slot(struct lpfc_hba *, uint32_t);
167
141int lpfc_mem_alloc(struct lpfc_hba *); 168int lpfc_mem_alloc(struct lpfc_hba *);
142void lpfc_mem_free(struct lpfc_hba *); 169void lpfc_mem_free(struct lpfc_hba *);
170void lpfc_stop_vport_timers(struct lpfc_vport *);
143 171
144void lpfc_poll_timeout(unsigned long ptr); 172void lpfc_poll_timeout(unsigned long ptr);
145void lpfc_poll_start_timer(struct lpfc_hba * phba); 173void lpfc_poll_start_timer(struct lpfc_hba * phba);
146void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba); 174void lpfc_sli_poll_fcp_ring(struct lpfc_hba * hba);
147struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *); 175struct lpfc_iocbq * lpfc_sli_get_iocbq(struct lpfc_hba *);
148void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 176void lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
177void __lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
149uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb); 178uint16_t lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocb);
150 179
151void lpfc_reset_barrier(struct lpfc_hba * phba); 180void lpfc_reset_barrier(struct lpfc_hba * phba);
@@ -154,6 +183,7 @@ int lpfc_sli_brdkill(struct lpfc_hba *);
154int lpfc_sli_brdreset(struct lpfc_hba *); 183int lpfc_sli_brdreset(struct lpfc_hba *);
155int lpfc_sli_brdrestart(struct lpfc_hba *); 184int lpfc_sli_brdrestart(struct lpfc_hba *);
156int lpfc_sli_hba_setup(struct lpfc_hba *); 185int lpfc_sli_hba_setup(struct lpfc_hba *);
186int lpfc_sli_host_down(struct lpfc_vport *);
157int lpfc_sli_hba_down(struct lpfc_hba *); 187int lpfc_sli_hba_down(struct lpfc_hba *);
158int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 188int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
159int lpfc_sli_handle_mb_event(struct lpfc_hba *); 189int lpfc_sli_handle_mb_event(struct lpfc_hba *);
@@ -164,27 +194,36 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
164int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, 194int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
165 struct lpfc_iocbq *, uint32_t); 195 struct lpfc_iocbq *, uint32_t);
166void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); 196void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
167int lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); 197void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
168int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, 198int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
169 struct lpfc_dmabuf *); 199 struct lpfc_dmabuf *);
170struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, 200struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
171 struct lpfc_sli_ring *, 201 struct lpfc_sli_ring *,
172 dma_addr_t); 202 dma_addr_t);
203int lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *, uint32_t);
204int lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *, uint32_t);
205void lpfc_sli_hbqbuf_free_all(struct lpfc_hba *);
206struct hbq_dmabuf *lpfc_sli_hbqbuf_find(struct lpfc_hba *, uint32_t);
207int lpfc_sli_hbq_size(void);
173int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, 208int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,
174 struct lpfc_iocbq *); 209 struct lpfc_iocbq *);
175int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, 210int lpfc_sli_sum_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
176 uint64_t, lpfc_ctx_cmd); 211 uint64_t, lpfc_ctx_cmd);
177int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t, 212int lpfc_sli_abort_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, uint16_t,
178 uint64_t, uint32_t, lpfc_ctx_cmd); 213 uint64_t, uint32_t, lpfc_ctx_cmd);
179 214
180void lpfc_mbox_timeout(unsigned long); 215void lpfc_mbox_timeout(unsigned long);
181void lpfc_mbox_timeout_handler(struct lpfc_hba *); 216void lpfc_mbox_timeout_handler(struct lpfc_hba *);
182 217
183struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_hba *, uint32_t); 218struct lpfc_nodelist *__lpfc_find_node(struct lpfc_vport *, node_filter,
184struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_hba *, struct lpfc_name *); 219 void *);
220struct lpfc_nodelist *lpfc_find_node(struct lpfc_vport *, node_filter, void *);
221struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
222struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
223 struct lpfc_name *);
185 224
186int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 225int lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
187 uint32_t timeout); 226 uint32_t timeout);
188 227
189int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 228int lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
190 struct lpfc_sli_ring * pring, 229 struct lpfc_sli_ring * pring,
@@ -195,25 +234,56 @@ void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,
195 struct lpfc_iocbq * cmdiocb, 234 struct lpfc_iocbq * cmdiocb,
196 struct lpfc_iocbq * rspiocb); 235 struct lpfc_iocbq * rspiocb);
197 236
237void *lpfc_hbq_alloc(struct lpfc_hba *, int, dma_addr_t *);
238void lpfc_hbq_free(struct lpfc_hba *, void *, dma_addr_t);
239void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
240
198void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *); 241void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
242void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
199void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t); 243void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
200 244
245void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
201/* Function prototypes. */ 246/* Function prototypes. */
202const char* lpfc_info(struct Scsi_Host *); 247const char* lpfc_info(struct Scsi_Host *);
203void lpfc_scan_start(struct Scsi_Host *);
204int lpfc_scan_finished(struct Scsi_Host *, unsigned long); 248int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
205 249
206void lpfc_get_cfgparam(struct lpfc_hba *); 250void lpfc_get_cfgparam(struct lpfc_hba *);
207int lpfc_alloc_sysfs_attr(struct lpfc_hba *); 251int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
208void lpfc_free_sysfs_attr(struct lpfc_hba *); 252void lpfc_free_sysfs_attr(struct lpfc_vport *);
209extern struct class_device_attribute *lpfc_host_attrs[]; 253extern struct class_device_attribute *lpfc_hba_attrs[];
210extern struct scsi_host_template lpfc_template; 254extern struct scsi_host_template lpfc_template;
211extern struct fc_function_template lpfc_transport_functions; 255extern struct fc_function_template lpfc_transport_functions;
256extern struct fc_function_template lpfc_vport_transport_functions;
257extern int lpfc_sli_mode;
212 258
213void lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp); 259int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
214void lpfc_terminate_rport_io(struct fc_rport *); 260void lpfc_terminate_rport_io(struct fc_rport *);
215void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport); 261void lpfc_dev_loss_tmo_callbk(struct fc_rport *rport);
216 262
263struct lpfc_vport *lpfc_create_port(struct lpfc_hba *, int, struct fc_vport *);
264int lpfc_vport_disable(struct fc_vport *fc_vport, bool disable);
265void lpfc_mbx_unreg_vpi(struct lpfc_vport *);
266void destroy_port(struct lpfc_vport *);
267int lpfc_get_instance(void);
268void lpfc_host_attrib_init(struct Scsi_Host *);
269
270extern void lpfc_debugfs_initialize(struct lpfc_vport *);
271extern void lpfc_debugfs_terminate(struct lpfc_vport *);
272extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
273 uint32_t, uint32_t);
274
275/* Interface exported by fabric iocb scheduler */
276int lpfc_issue_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
277void lpfc_fabric_abort_vport(struct lpfc_vport *);
278void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
279void lpfc_fabric_abort_hba(struct lpfc_hba *);
280void lpfc_fabric_abort_flogi(struct lpfc_hba *);
281void lpfc_fabric_block_timeout(unsigned long);
282void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);
283void lpfc_adjust_queue_depth(struct lpfc_hba *);
284void lpfc_ramp_down_queue_handler(struct lpfc_hba *);
285void lpfc_ramp_up_queue_handler(struct lpfc_hba *);
286
217#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) 287#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
218#define HBA_EVENT_RSCN 5 288#define HBA_EVENT_RSCN 5
219#define HBA_EVENT_LINK_UP 2 289#define HBA_EVENT_LINK_UP 2
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 34a9e3bb2614..ae9d6f385a6c 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -40,6 +40,8 @@
40#include "lpfc_logmsg.h" 40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h" 41#include "lpfc_crtn.h"
42#include "lpfc_version.h" 42#include "lpfc_version.h"
43#include "lpfc_vport.h"
44#include "lpfc_debugfs.h"
43 45
44#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver 46#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver
45 * incapable of reporting */ 47 * incapable of reporting */
@@ -58,25 +60,69 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION;
58/* 60/*
59 * lpfc_ct_unsol_event 61 * lpfc_ct_unsol_event
60 */ 62 */
63static void
64lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
65 struct lpfc_dmabuf *mp, uint32_t size)
66{
67 if (!mp) {
68 printk(KERN_ERR "%s (%d): Unsolited CT, no buffer, "
69 "piocbq = %p, status = x%x, mp = %p, size = %d\n",
70 __FUNCTION__, __LINE__,
71 piocbq, piocbq->iocb.ulpStatus, mp, size);
72 }
73
74 printk(KERN_ERR "%s (%d): Ignoring unsolicted CT piocbq = %p, "
75 "buffer = %p, size = %d, status = x%x\n",
76 __FUNCTION__, __LINE__,
77 piocbq, mp, size,
78 piocbq->iocb.ulpStatus);
79
80}
81
82static void
83lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
84 struct lpfc_dmabuf *mp, uint32_t size)
85{
86 if (!mp) {
87 printk(KERN_ERR "%s (%d): Unsolited CT, no "
88 "HBQ buffer, piocbq = %p, status = x%x\n",
89 __FUNCTION__, __LINE__,
90 piocbq, piocbq->iocb.ulpStatus);
91 } else {
92 lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
93 printk(KERN_ERR "%s (%d): Ignoring unsolicted CT "
94 "piocbq = %p, buffer = %p, size = %d, "
95 "status = x%x\n",
96 __FUNCTION__, __LINE__,
97 piocbq, mp, size, piocbq->iocb.ulpStatus);
98 }
99}
100
61void 101void
62lpfc_ct_unsol_event(struct lpfc_hba * phba, 102lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
63 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq) 103 struct lpfc_iocbq *piocbq)
64{ 104{
65 105
66 struct lpfc_iocbq *next_piocbq; 106 struct lpfc_dmabuf *mp = NULL;
67 struct lpfc_dmabuf *pmbuf = NULL;
68 struct lpfc_dmabuf *matp, *next_matp;
69 uint32_t ctx = 0, size = 0, cnt = 0;
70 IOCB_t *icmd = &piocbq->iocb; 107 IOCB_t *icmd = &piocbq->iocb;
71 IOCB_t *save_icmd = icmd; 108 int i;
72 int i, go_exit = 0; 109 struct lpfc_iocbq *iocbq;
73 struct list_head head; 110 dma_addr_t paddr;
111 uint32_t size;
112 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
113 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
114
115 piocbq->context2 = NULL;
116 piocbq->context3 = NULL;
74 117
75 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && 118 if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
119 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
120 } else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
76 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { 121 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
77 /* Not enough posted buffers; Try posting more buffers */ 122 /* Not enough posted buffers; Try posting more buffers */
78 phba->fc_stat.NoRcvBuf++; 123 phba->fc_stat.NoRcvBuf++;
79 lpfc_post_buffer(phba, pring, 0, 1); 124 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
125 lpfc_post_buffer(phba, pring, 0, 1);
80 return; 126 return;
81 } 127 }
82 128
@@ -86,66 +132,56 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
86 if (icmd->ulpBdeCount == 0) 132 if (icmd->ulpBdeCount == 0)
87 return; 133 return;
88 134
89 INIT_LIST_HEAD(&head); 135 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
90 list_add_tail(&head, &piocbq->list); 136 list_for_each_entry(iocbq, &piocbq->list, list) {
91 137 icmd = &iocbq->iocb;
92 list_for_each_entry_safe(piocbq, next_piocbq, &head, list) { 138 if (icmd->ulpBdeCount == 0) {
93 icmd = &piocbq->iocb; 139 printk(KERN_ERR "%s (%d): Unsolited CT, no "
94 if (ctx == 0) 140 "BDE, iocbq = %p, status = x%x\n",
95 ctx = (uint32_t) (icmd->ulpContext); 141 __FUNCTION__, __LINE__,
96 if (icmd->ulpBdeCount == 0) 142 iocbq, iocbq->iocb.ulpStatus);
97 continue; 143 continue;
98
99 for (i = 0; i < icmd->ulpBdeCount; i++) {
100 matp = lpfc_sli_ringpostbuf_get(phba, pring,
101 getPaddr(icmd->un.
102 cont64[i].
103 addrHigh,
104 icmd->un.
105 cont64[i].
106 addrLow));
107 if (!matp) {
108 /* Insert lpfc log message here */
109 lpfc_post_buffer(phba, pring, cnt, 1);
110 go_exit = 1;
111 goto ct_unsol_event_exit_piocbq;
112 } 144 }
113 145
114 /* Typically for Unsolicited CT requests */ 146 size = icmd->un.cont64[0].tus.f.bdeSize;
115 if (!pmbuf) { 147 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf1, size);
116 pmbuf = matp; 148 lpfc_in_buf_free(phba, bdeBuf1);
117 INIT_LIST_HEAD(&pmbuf->list); 149 if (icmd->ulpBdeCount == 2) {
118 } else 150 lpfc_ct_ignore_hbq_buffer(phba, piocbq, bdeBuf2,
119 list_add_tail(&matp->list, &pmbuf->list); 151 size);
120 152 lpfc_in_buf_free(phba, bdeBuf2);
121 size += icmd->un.cont64[i].tus.f.bdeSize; 153 }
122 cnt++;
123 } 154 }
155 } else {
156 struct lpfc_iocbq *next;
157
158 list_for_each_entry_safe(iocbq, next, &piocbq->list, list) {
159 icmd = &iocbq->iocb;
160 if (icmd->ulpBdeCount == 0) {
161 printk(KERN_ERR "%s (%d): Unsolited CT, no "
162 "BDE, iocbq = %p, status = x%x\n",
163 __FUNCTION__, __LINE__,
164 iocbq, iocbq->iocb.ulpStatus);
165 continue;
166 }
124 167
125 icmd->ulpBdeCount = 0; 168 for (i = 0; i < icmd->ulpBdeCount; i++) {
126 } 169 paddr = getPaddr(icmd->un.cont64[i].addrHigh,
127 170 icmd->un.cont64[i].addrLow);
128 lpfc_post_buffer(phba, pring, cnt, 1); 171 mp = lpfc_sli_ringpostbuf_get(phba, pring,
129 if (save_icmd->ulpStatus) { 172 paddr);
130 go_exit = 1; 173 size = icmd->un.cont64[i].tus.f.bdeSize;
131 } 174 lpfc_ct_unsol_buffer(phba, piocbq, mp, size);
132 175 lpfc_in_buf_free(phba, mp);
133ct_unsol_event_exit_piocbq: 176 }
134 list_del(&head); 177 list_del(&iocbq->list);
135 if (pmbuf) { 178 lpfc_sli_release_iocbq(phba, iocbq);
136 list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
137 lpfc_mbuf_free(phba, matp->virt, matp->phys);
138 list_del(&matp->list);
139 kfree(matp);
140 } 179 }
141 lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys);
142 kfree(pmbuf);
143 } 180 }
144 return;
145} 181}
146 182
147static void 183static void
148lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist) 184lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
149{ 185{
150 struct lpfc_dmabuf *mlast, *next_mlast; 186 struct lpfc_dmabuf *mlast, *next_mlast;
151 187
@@ -160,7 +196,7 @@ lpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist)
160} 196}
161 197
162static struct lpfc_dmabuf * 198static struct lpfc_dmabuf *
163lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, 199lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl,
164 uint32_t size, int *entries) 200 uint32_t size, int *entries)
165{ 201{
166 struct lpfc_dmabuf *mlist = NULL; 202 struct lpfc_dmabuf *mlist = NULL;
@@ -181,7 +217,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
181 217
182 INIT_LIST_HEAD(&mp->list); 218 INIT_LIST_HEAD(&mp->list);
183 219
184 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT)) 220 if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) ||
221 cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID))
185 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 222 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
186 else 223 else
187 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); 224 mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
@@ -201,8 +238,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
201 238
202 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 239 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
203 /* build buffer ptr list for IOCB */ 240 /* build buffer ptr list for IOCB */
204 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 241 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
205 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 242 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
206 bpl->tus.f.bdeSize = (uint16_t) cnt; 243 bpl->tus.f.bdeSize = (uint16_t) cnt;
207 bpl->tus.w = le32_to_cpu(bpl->tus.w); 244 bpl->tus.w = le32_to_cpu(bpl->tus.w);
208 bpl++; 245 bpl++;
@@ -215,24 +252,49 @@ lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl,
215 return mlist; 252 return mlist;
216} 253}
217 254
255int
256lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
257{
258 struct lpfc_dmabuf *buf_ptr;
259
260 if (ctiocb->context1) {
261 buf_ptr = (struct lpfc_dmabuf *) ctiocb->context1;
262 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
263 kfree(buf_ptr);
264 ctiocb->context1 = NULL;
265 }
266 if (ctiocb->context2) {
267 lpfc_free_ct_rsp(phba, (struct lpfc_dmabuf *) ctiocb->context2);
268 ctiocb->context2 = NULL;
269 }
270
271 if (ctiocb->context3) {
272 buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;
273 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
274 kfree(buf_ptr);
275 ctiocb->context1 = NULL;
276 }
277 lpfc_sli_release_iocbq(phba, ctiocb);
278 return 0;
279}
280
218static int 281static int
219lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp, 282lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
220 struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, 283 struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
221 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 284 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
222 struct lpfc_iocbq *), 285 struct lpfc_iocbq *),
223 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry, 286 struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry,
224 uint32_t tmo) 287 uint32_t tmo, uint8_t retry)
225{ 288{
226 289 struct lpfc_hba *phba = vport->phba;
227 struct lpfc_sli *psli = &phba->sli; 290 struct lpfc_sli *psli = &phba->sli;
228 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; 291 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
229 IOCB_t *icmd; 292 IOCB_t *icmd;
230 struct lpfc_iocbq *geniocb; 293 struct lpfc_iocbq *geniocb;
294 int rc;
231 295
232 /* Allocate buffer for command iocb */ 296 /* Allocate buffer for command iocb */
233 spin_lock_irq(phba->host->host_lock);
234 geniocb = lpfc_sli_get_iocbq(phba); 297 geniocb = lpfc_sli_get_iocbq(phba);
235 spin_unlock_irq(phba->host->host_lock);
236 298
237 if (geniocb == NULL) 299 if (geniocb == NULL)
238 return 1; 300 return 1;
@@ -272,31 +334,40 @@ lpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp,
272 icmd->ulpClass = CLASS3; 334 icmd->ulpClass = CLASS3;
273 icmd->ulpContext = ndlp->nlp_rpi; 335 icmd->ulpContext = ndlp->nlp_rpi;
274 336
337 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
338 /* For GEN_REQUEST64_CR, use the RPI */
339 icmd->ulpCt_h = 0;
340 icmd->ulpCt_l = 0;
341 }
342
275 /* Issue GEN REQ IOCB for NPORT <did> */ 343 /* Issue GEN REQ IOCB for NPORT <did> */
276 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 344 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
277 "%d:0119 Issue GEN REQ IOCB for NPORT x%x " 345 "%d (%d):0119 Issue GEN REQ IOCB to NPORT x%x "
278 "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5], 346 "Data: x%x x%x\n", phba->brd_no, vport->vpi,
279 icmd->ulpIoTag, phba->hba_state); 347 ndlp->nlp_DID, icmd->ulpIoTag,
348 vport->port_state);
280 geniocb->iocb_cmpl = cmpl; 349 geniocb->iocb_cmpl = cmpl;
281 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; 350 geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
282 spin_lock_irq(phba->host->host_lock); 351 geniocb->vport = vport;
283 if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) { 352 geniocb->retry = retry;
353 rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
354
355 if (rc == IOCB_ERROR) {
284 lpfc_sli_release_iocbq(phba, geniocb); 356 lpfc_sli_release_iocbq(phba, geniocb);
285 spin_unlock_irq(phba->host->host_lock);
286 return 1; 357 return 1;
287 } 358 }
288 spin_unlock_irq(phba->host->host_lock);
289 359
290 return 0; 360 return 0;
291} 361}
292 362
293static int 363static int
294lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp, 364lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
295 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, 365 struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
296 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 366 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
297 struct lpfc_iocbq *), 367 struct lpfc_iocbq *),
298 uint32_t rsp_size) 368 uint32_t rsp_size, uint8_t retry)
299{ 369{
370 struct lpfc_hba *phba = vport->phba;
300 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; 371 struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
301 struct lpfc_dmabuf *outmp; 372 struct lpfc_dmabuf *outmp;
302 int cnt = 0, status; 373 int cnt = 0, status;
@@ -310,8 +381,8 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
310 if (!outmp) 381 if (!outmp)
311 return -ENOMEM; 382 return -ENOMEM;
312 383
313 status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0, 384 status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0,
314 cnt+1, 0); 385 cnt+1, 0, retry);
315 if (status) { 386 if (status) {
316 lpfc_free_ct_rsp(phba, outmp); 387 lpfc_free_ct_rsp(phba, outmp);
317 return -ENOMEM; 388 return -ENOMEM;
@@ -319,20 +390,35 @@ lpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp,
319 return 0; 390 return 0;
320} 391}
321 392
393static struct lpfc_vport *
394lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
395
396 struct lpfc_vport *vport_curr;
397
398 list_for_each_entry(vport_curr, &phba->port_list, listentry) {
399 if ((vport_curr->fc_myDID) &&
400 (vport_curr->fc_myDID == did))
401 return vport_curr;
402 }
403
404 return NULL;
405}
406
322static int 407static int
323lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size) 408lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
324{ 409{
410 struct lpfc_hba *phba = vport->phba;
325 struct lpfc_sli_ct_request *Response = 411 struct lpfc_sli_ct_request *Response =
326 (struct lpfc_sli_ct_request *) mp->virt; 412 (struct lpfc_sli_ct_request *) mp->virt;
327 struct lpfc_nodelist *ndlp = NULL; 413 struct lpfc_nodelist *ndlp = NULL;
328 struct lpfc_dmabuf *mlast, *next_mp; 414 struct lpfc_dmabuf *mlast, *next_mp;
329 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; 415 uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
330 uint32_t Did; 416 uint32_t Did, CTentry;
331 uint32_t CTentry;
332 int Cnt; 417 int Cnt;
333 struct list_head head; 418 struct list_head head;
334 419
335 lpfc_set_disctmo(phba); 420 lpfc_set_disctmo(vport);
421 vport->num_disc_nodes = 0;
336 422
337 423
338 list_add_tail(&head, &mp->list); 424 list_add_tail(&head, &mp->list);
@@ -350,39 +436,96 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
350 436
351 /* Loop through entire NameServer list of DIDs */ 437 /* Loop through entire NameServer list of DIDs */
352 while (Cnt >= sizeof (uint32_t)) { 438 while (Cnt >= sizeof (uint32_t)) {
353
354 /* Get next DID from NameServer List */ 439 /* Get next DID from NameServer List */
355 CTentry = *ctptr++; 440 CTentry = *ctptr++;
356 Did = ((be32_to_cpu(CTentry)) & Mask_DID); 441 Did = ((be32_to_cpu(CTentry)) & Mask_DID);
357 442
358 ndlp = NULL; 443 ndlp = NULL;
359 if (Did != phba->fc_myDID) { 444
360 /* Check for rscn processing or not */ 445 /*
361 ndlp = lpfc_setup_disc_node(phba, Did); 446 * Check for rscn processing or not
362 } 447 * To conserve rpi's, filter out addresses for other
363 /* Mark all node table entries that are in the 448 * vports on the same physical HBAs.
364 Nameserver */ 449 */
365 if (ndlp) { 450 if ((Did != vport->fc_myDID) &&
366 /* NameServer Rsp */ 451 ((lpfc_find_vport_by_did(phba, Did) == NULL) ||
367 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 452 phba->cfg_peer_port_login)) {
368 "%d:0238 Process x%x NameServer" 453 if ((vport->port_type != LPFC_NPIV_PORT) ||
369 " Rsp Data: x%x x%x x%x\n", 454 (vport->fc_flag & FC_RFF_NOT_SUPPORTED) ||
370 phba->brd_no, 455 (!phba->cfg_vport_restrict_login)) {
456 ndlp = lpfc_setup_disc_node(vport, Did);
457 if (ndlp) {
458 lpfc_debugfs_disc_trc(vport,
459 LPFC_DISC_TRC_CT,
460 "Parse GID_FTrsp: "
461 "did:x%x flg:x%x x%x",
371 Did, ndlp->nlp_flag, 462 Did, ndlp->nlp_flag,
372 phba->fc_flag, 463 vport->fc_flag);
373 phba->fc_rscn_id_cnt); 464
374 } else { 465 lpfc_printf_log(phba, KERN_INFO,
375 /* NameServer Rsp */ 466 LOG_DISCOVERY,
376 lpfc_printf_log(phba, 467 "%d (%d):0238 Process "
377 KERN_INFO, 468 "x%x NameServer Rsp"
378 LOG_DISCOVERY, 469 "Data: x%x x%x x%x\n",
379 "%d:0239 Skip x%x NameServer " 470 phba->brd_no,
380 "Rsp Data: x%x x%x x%x\n", 471 vport->vpi, Did,
381 phba->brd_no, 472 ndlp->nlp_flag,
382 Did, Size, phba->fc_flag, 473 vport->fc_flag,
383 phba->fc_rscn_id_cnt); 474 vport->fc_rscn_id_cnt);
475 } else {
476 lpfc_debugfs_disc_trc(vport,
477 LPFC_DISC_TRC_CT,
478 "Skip1 GID_FTrsp: "
479 "did:x%x flg:x%x cnt:%d",
480 Did, vport->fc_flag,
481 vport->fc_rscn_id_cnt);
482
483 lpfc_printf_log(phba, KERN_INFO,
484 LOG_DISCOVERY,
485 "%d (%d):0239 Skip x%x "
486 "NameServer Rsp Data: "
487 "x%x x%x\n",
488 phba->brd_no,
489 vport->vpi, Did,
490 vport->fc_flag,
491 vport->fc_rscn_id_cnt);
492 }
493
494 } else {
495 if (!(vport->fc_flag & FC_RSCN_MODE) ||
496 (lpfc_rscn_payload_check(vport, Did))) {
497 lpfc_debugfs_disc_trc(vport,
498 LPFC_DISC_TRC_CT,
499 "Query GID_FTrsp: "
500 "did:x%x flg:x%x cnt:%d",
501 Did, vport->fc_flag,
502 vport->fc_rscn_id_cnt);
503
504 if (lpfc_ns_cmd(vport,
505 SLI_CTNS_GFF_ID,
506 0, Did) == 0)
507 vport->num_disc_nodes++;
508 }
509 else {
510 lpfc_debugfs_disc_trc(vport,
511 LPFC_DISC_TRC_CT,
512 "Skip2 GID_FTrsp: "
513 "did:x%x flg:x%x cnt:%d",
514 Did, vport->fc_flag,
515 vport->fc_rscn_id_cnt);
516
517 lpfc_printf_log(phba, KERN_INFO,
518 LOG_DISCOVERY,
519 "%d (%d):0245 Skip x%x "
520 "NameServer Rsp Data: "
521 "x%x x%x\n",
522 phba->brd_no,
523 vport->vpi, Did,
524 vport->fc_flag,
525 vport->fc_rscn_id_cnt);
526 }
527 }
384 } 528 }
385
386 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY))) 529 if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY)))
387 goto nsout1; 530 goto nsout1;
388 Cnt -= sizeof (uint32_t); 531 Cnt -= sizeof (uint32_t);
@@ -393,190 +536,369 @@ lpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size)
393 536
394nsout1: 537nsout1:
395 list_del(&head); 538 list_del(&head);
396
397 /*
398 * The driver has cycled through all Nports in the RSCN payload.
399 * Complete the handling by cleaning up and marking the
400 * current driver state.
401 */
402 if (phba->hba_state == LPFC_HBA_READY) {
403 lpfc_els_flush_rscn(phba);
404 spin_lock_irq(phba->host->host_lock);
405 phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */
406 spin_unlock_irq(phba->host->host_lock);
407 }
408 return 0; 539 return 0;
409} 540}
410 541
411
412
413
414static void 542static void
415lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 543lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
416 struct lpfc_iocbq * rspiocb) 544 struct lpfc_iocbq *rspiocb)
417{ 545{
546 struct lpfc_vport *vport = cmdiocb->vport;
547 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
418 IOCB_t *irsp; 548 IOCB_t *irsp;
419 struct lpfc_sli *psli;
420 struct lpfc_dmabuf *bmp; 549 struct lpfc_dmabuf *bmp;
421 struct lpfc_dmabuf *inp;
422 struct lpfc_dmabuf *outp; 550 struct lpfc_dmabuf *outp;
423 struct lpfc_nodelist *ndlp;
424 struct lpfc_sli_ct_request *CTrsp; 551 struct lpfc_sli_ct_request *CTrsp;
552 int rc;
425 553
426 psli = &phba->sli;
427 /* we pass cmdiocb to state machine which needs rspiocb as well */ 554 /* we pass cmdiocb to state machine which needs rspiocb as well */
428 cmdiocb->context_un.rsp_iocb = rspiocb; 555 cmdiocb->context_un.rsp_iocb = rspiocb;
429 556
430 inp = (struct lpfc_dmabuf *) cmdiocb->context1;
431 outp = (struct lpfc_dmabuf *) cmdiocb->context2; 557 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
432 bmp = (struct lpfc_dmabuf *) cmdiocb->context3; 558 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
433
434 irsp = &rspiocb->iocb; 559 irsp = &rspiocb->iocb;
435 if (irsp->ulpStatus) {
436 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
437 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
438 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED))) {
439 goto out;
440 }
441 560
561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
562 "GID_FT cmpl: status:x%x/x%x rtry:%d",
563 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry);
564
565 /* Don't bother processing response if vport is being torn down. */
566 if (vport->load_flag & FC_UNLOADING)
567 goto out;
568
569
570 if (lpfc_els_chk_latt(vport) || lpfc_error_lost_link(irsp)) {
571 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
572 "%d (%d):0216 Link event during NS query\n",
573 phba->brd_no, vport->vpi);
574 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
575 goto out;
576 }
577
578 if (irsp->ulpStatus) {
442 /* Check for retry */ 579 /* Check for retry */
443 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { 580 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
444 phba->fc_ns_retry++; 581 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
582 (irsp->un.ulpWord[4] != IOERR_NO_RESOURCES))
583 vport->fc_ns_retry++;
445 /* CT command is being retried */ 584 /* CT command is being retried */
446 ndlp = lpfc_findnode_did(phba, NameServer_DID); 585 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
447 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 586 vport->fc_ns_retry, 0);
448 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 587 if (rc == 0)
449 0) { 588 goto out;
450 goto out;
451 }
452 }
453 } 589 }
590 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
591 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
592 "%d (%d):0257 GID_FT Query error: 0x%x 0x%x\n",
593 phba->brd_no, vport->vpi, irsp->ulpStatus,
594 vport->fc_ns_retry);
454 } else { 595 } else {
455 /* Good status, continue checking */ 596 /* Good status, continue checking */
456 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 597 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
457 if (CTrsp->CommandResponse.bits.CmdRsp == 598 if (CTrsp->CommandResponse.bits.CmdRsp ==
458 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { 599 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
459 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 600 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
460 "%d:0208 NameServer Rsp " 601 "%d (%d):0208 NameServer Rsp "
461 "Data: x%x\n", 602 "Data: x%x\n",
462 phba->brd_no, 603 phba->brd_no, vport->vpi,
463 phba->fc_flag); 604 vport->fc_flag);
464 lpfc_ns_rsp(phba, outp, 605 lpfc_ns_rsp(vport, outp,
465 (uint32_t) (irsp->un.genreq64.bdl.bdeSize)); 606 (uint32_t) (irsp->un.genreq64.bdl.bdeSize));
466 } else if (CTrsp->CommandResponse.bits.CmdRsp == 607 } else if (CTrsp->CommandResponse.bits.CmdRsp ==
467 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 608 be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
468 /* NameServer Rsp Error */ 609 /* NameServer Rsp Error */
469 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 610 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
470 "%d:0240 NameServer Rsp Error " 611 "%d (%d):0240 NameServer Rsp Error "
471 "Data: x%x x%x x%x x%x\n", 612 "Data: x%x x%x x%x x%x\n",
472 phba->brd_no, 613 phba->brd_no, vport->vpi,
473 CTrsp->CommandResponse.bits.CmdRsp, 614 CTrsp->CommandResponse.bits.CmdRsp,
474 (uint32_t) CTrsp->ReasonCode, 615 (uint32_t) CTrsp->ReasonCode,
475 (uint32_t) CTrsp->Explanation, 616 (uint32_t) CTrsp->Explanation,
476 phba->fc_flag); 617 vport->fc_flag);
618
619 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
620 "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x",
621 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
622 (uint32_t) CTrsp->ReasonCode,
623 (uint32_t) CTrsp->Explanation);
624
477 } else { 625 } else {
478 /* NameServer Rsp Error */ 626 /* NameServer Rsp Error */
479 lpfc_printf_log(phba, 627 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
480 KERN_INFO, 628 "%d (%d):0241 NameServer Rsp Error "
481 LOG_DISCOVERY,
482 "%d:0241 NameServer Rsp Error "
483 "Data: x%x x%x x%x x%x\n", 629 "Data: x%x x%x x%x x%x\n",
484 phba->brd_no, 630 phba->brd_no, vport->vpi,
485 CTrsp->CommandResponse.bits.CmdRsp, 631 CTrsp->CommandResponse.bits.CmdRsp,
486 (uint32_t) CTrsp->ReasonCode, 632 (uint32_t) CTrsp->ReasonCode,
487 (uint32_t) CTrsp->Explanation, 633 (uint32_t) CTrsp->Explanation,
488 phba->fc_flag); 634 vport->fc_flag);
635
636 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
637 "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x",
638 (uint32_t)CTrsp->CommandResponse.bits.CmdRsp,
639 (uint32_t) CTrsp->ReasonCode,
640 (uint32_t) CTrsp->Explanation);
489 } 641 }
490 } 642 }
491 /* Link up / RSCN discovery */ 643 /* Link up / RSCN discovery */
492 lpfc_disc_start(phba); 644 if (vport->num_disc_nodes == 0) {
645 /*
646 * The driver has cycled through all Nports in the RSCN payload.
647 * Complete the handling by cleaning up and marking the
648 * current driver state.
649 */
650 if (vport->port_state >= LPFC_DISC_AUTH) {
651 if (vport->fc_flag & FC_RSCN_MODE) {
652 lpfc_els_flush_rscn(vport);
653 spin_lock_irq(shost->host_lock);
654 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
655 spin_unlock_irq(shost->host_lock);
656 }
657 else
658 lpfc_els_flush_rscn(vport);
659 }
660
661 lpfc_disc_start(vport);
662 }
493out: 663out:
494 lpfc_free_ct_rsp(phba, outp); 664 lpfc_ct_free_iocb(phba, cmdiocb);
495 lpfc_mbuf_free(phba, inp->virt, inp->phys);
496 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
497 kfree(inp);
498 kfree(bmp);
499 spin_lock_irq(phba->host->host_lock);
500 lpfc_sli_release_iocbq(phba, cmdiocb);
501 spin_unlock_irq(phba->host->host_lock);
502 return; 665 return;
503} 666}
504 667
668void
669lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
670 struct lpfc_iocbq *rspiocb)
671{
672 struct lpfc_vport *vport = cmdiocb->vport;
673 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
674 IOCB_t *irsp = &rspiocb->iocb;
675 struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *) cmdiocb->context1;
676 struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *) cmdiocb->context2;
677 struct lpfc_sli_ct_request *CTrsp;
678 int did;
679 uint8_t fbits;
680 struct lpfc_nodelist *ndlp;
681
682 did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
683 did = be32_to_cpu(did);
684
685 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
686 "GFF_ID cmpl: status:x%x/x%x did:x%x",
687 irsp->ulpStatus, irsp->un.ulpWord[4], did);
688
689 if (irsp->ulpStatus == IOSTAT_SUCCESS) {
690 /* Good status, continue checking */
691 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
692 fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
693
694 if (CTrsp->CommandResponse.bits.CmdRsp ==
695 be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
696 if ((fbits & FC4_FEATURE_INIT) &&
697 !(fbits & FC4_FEATURE_TARGET)) {
698 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
699 "%d (%d):0245 Skip x%x GFF "
700 "NameServer Rsp Data: (init) "
701 "x%x x%x\n", phba->brd_no,
702 vport->vpi, did, fbits,
703 vport->fc_rscn_id_cnt);
704 goto out;
705 }
706 }
707 }
708 else {
709 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
710 "%d (%d):0267 NameServer GFF Rsp"
711 " x%x Error (%d %d) Data: x%x x%x\n",
712 phba->brd_no, vport->vpi, did,
713 irsp->ulpStatus, irsp->un.ulpWord[4],
714 vport->fc_flag, vport->fc_rscn_id_cnt)
715 }
716
717 /* This is a target port, unregistered port, or the GFF_ID failed */
718 ndlp = lpfc_setup_disc_node(vport, did);
719 if (ndlp) {
720 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
721 "%d (%d):0242 Process x%x GFF "
722 "NameServer Rsp Data: x%x x%x x%x\n",
723 phba->brd_no, vport->vpi,
724 did, ndlp->nlp_flag, vport->fc_flag,
725 vport->fc_rscn_id_cnt);
726 } else {
727 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
728 "%d (%d):0243 Skip x%x GFF "
729 "NameServer Rsp Data: x%x x%x\n",
730 phba->brd_no, vport->vpi, did,
731 vport->fc_flag, vport->fc_rscn_id_cnt);
732 }
733out:
734 /* Link up / RSCN discovery */
735 if (vport->num_disc_nodes)
736 vport->num_disc_nodes--;
737 if (vport->num_disc_nodes == 0) {
738 /*
739 * The driver has cycled through all Nports in the RSCN payload.
740 * Complete the handling by cleaning up and marking the
741 * current driver state.
742 */
743 if (vport->port_state >= LPFC_DISC_AUTH) {
744 if (vport->fc_flag & FC_RSCN_MODE) {
745 lpfc_els_flush_rscn(vport);
746 spin_lock_irq(shost->host_lock);
747 vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
748 spin_unlock_irq(shost->host_lock);
749 }
750 else
751 lpfc_els_flush_rscn(vport);
752 }
753 lpfc_disc_start(vport);
754 }
755 lpfc_ct_free_iocb(phba, cmdiocb);
756 return;
757}
758
759
505static void 760static void
506lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 761lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
507 struct lpfc_iocbq * rspiocb) 762 struct lpfc_iocbq *rspiocb)
508{ 763{
509 struct lpfc_sli *psli; 764 struct lpfc_vport *vport = cmdiocb->vport;
510 struct lpfc_dmabuf *bmp;
511 struct lpfc_dmabuf *inp; 765 struct lpfc_dmabuf *inp;
512 struct lpfc_dmabuf *outp; 766 struct lpfc_dmabuf *outp;
513 IOCB_t *irsp; 767 IOCB_t *irsp;
514 struct lpfc_sli_ct_request *CTrsp; 768 struct lpfc_sli_ct_request *CTrsp;
769 int cmdcode, rc;
770 uint8_t retry;
771 uint32_t latt;
515 772
516 psli = &phba->sli;
517 /* we pass cmdiocb to state machine which needs rspiocb as well */ 773 /* we pass cmdiocb to state machine which needs rspiocb as well */
518 cmdiocb->context_un.rsp_iocb = rspiocb; 774 cmdiocb->context_un.rsp_iocb = rspiocb;
519 775
520 inp = (struct lpfc_dmabuf *) cmdiocb->context1; 776 inp = (struct lpfc_dmabuf *) cmdiocb->context1;
521 outp = (struct lpfc_dmabuf *) cmdiocb->context2; 777 outp = (struct lpfc_dmabuf *) cmdiocb->context2;
522 bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
523 irsp = &rspiocb->iocb; 778 irsp = &rspiocb->iocb;
524 779
780 cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
781 CommandResponse.bits.CmdRsp);
525 CTrsp = (struct lpfc_sli_ct_request *) outp->virt; 782 CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
526 783
784 latt = lpfc_els_chk_latt(vport);
785
527 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */ 786 /* RFT request completes status <ulpStatus> CmdRsp <CmdRsp> */
528 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 787 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
529 "%d:0209 RFT request completes ulpStatus x%x " 788 "%d (%d):0209 RFT request completes, latt %d, "
530 "CmdRsp x%x\n", phba->brd_no, irsp->ulpStatus, 789 "ulpStatus x%x CmdRsp x%x, Context x%x, Tag x%x\n",
531 CTrsp->CommandResponse.bits.CmdRsp); 790 phba->brd_no, vport->vpi, latt, irsp->ulpStatus,
791 CTrsp->CommandResponse.bits.CmdRsp,
792 cmdiocb->iocb.ulpContext, cmdiocb->iocb.ulpIoTag);
532 793
533 lpfc_free_ct_rsp(phba, outp); 794 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
534 lpfc_mbuf_free(phba, inp->virt, inp->phys); 795 "CT cmd cmpl: status:x%x/x%x cmd:x%x",
535 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 796 irsp->ulpStatus, irsp->un.ulpWord[4], cmdcode);
536 kfree(inp); 797
537 kfree(bmp); 798 if (irsp->ulpStatus) {
538 spin_lock_irq(phba->host->host_lock); 799 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
539 lpfc_sli_release_iocbq(phba, cmdiocb); 800 "%d (%d):0268 NS cmd %x Error (%d %d)\n",
540 spin_unlock_irq(phba->host->host_lock); 801 phba->brd_no, vport->vpi, cmdcode,
802 irsp->ulpStatus, irsp->un.ulpWord[4]);
803
804 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
805 ((irsp->un.ulpWord[4] == IOERR_SLI_DOWN) ||
806 (irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)))
807 goto out;
808
809 retry = cmdiocb->retry;
810 if (retry >= LPFC_MAX_NS_RETRY)
811 goto out;
812
813 retry++;
814 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
815 "%d (%d):0216 Retrying NS cmd %x\n",
816 phba->brd_no, vport->vpi, cmdcode);
817 rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
818 if (rc == 0)
819 goto out;
820 }
821
822out:
823 lpfc_ct_free_iocb(phba, cmdiocb);
541 return; 824 return;
542} 825}
543 826
544static void 827static void
545lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 828lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
546 struct lpfc_iocbq * rspiocb) 829 struct lpfc_iocbq *rspiocb)
547{ 830{
548 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 831 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
549 return; 832 return;
550} 833}
551 834
552static void 835static void
553lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 836lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
554 struct lpfc_iocbq * rspiocb) 837 struct lpfc_iocbq *rspiocb)
555{ 838{
556 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 839 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
557 return; 840 return;
558} 841}
559 842
560static void 843static void
561lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 844lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
562 struct lpfc_iocbq * rspiocb) 845 struct lpfc_iocbq *rspiocb)
563{ 846{
564 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb); 847 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
565 return; 848 return;
566} 849}
567 850
568void 851static void
569lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp) 852lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
853 struct lpfc_iocbq *rspiocb)
570{ 854{
571 char fwrev[16]; 855 IOCB_t *irsp = &rspiocb->iocb;
856 struct lpfc_vport *vport = cmdiocb->vport;
572 857
573 lpfc_decode_firmware_rev(phba, fwrev, 0); 858 if (irsp->ulpStatus != IOSTAT_SUCCESS)
859 vport->fc_flag |= FC_RFF_NOT_SUPPORTED;
574 860
575 sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName, 861 lpfc_cmpl_ct_cmd_rft_id(phba, cmdiocb, rspiocb);
576 fwrev, lpfc_release_version);
577 return; 862 return;
578} 863}
579 864
865int
866lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
867 size_t size)
868{
869 int n;
870 uint8_t *wwn = vport->phba->wwpn;
871
872 n = snprintf(symbol, size,
873 "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
874 wwn[0], wwn[1], wwn[2], wwn[3],
875 wwn[4], wwn[5], wwn[6], wwn[7]);
876
877 if (vport->port_type == LPFC_PHYSICAL_PORT)
878 return n;
879
880 if (n < size)
881 n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
882
883 if (n < size && vport->vname)
884 n += snprintf(symbol + n, size - n, " VName-%s", vport->vname);
885 return n;
886}
887
888int
889lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
890 size_t size)
891{
892 char fwrev[16];
893 int n;
894
895 lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
896
897 n = snprintf(symbol, size, "Emulex %s FV%s DV%s",
898 vport->phba->ModelName, fwrev, lpfc_release_version);
899 return n;
900}
901
580/* 902/*
581 * lpfc_ns_cmd 903 * lpfc_ns_cmd
582 * Description: 904 * Description:
@@ -585,55 +907,76 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
585 * LI_CTNS_RFT_ID 907 * LI_CTNS_RFT_ID
586 */ 908 */
587int 909int
588lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) 910lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
911 uint8_t retry, uint32_t context)
589{ 912{
913 struct lpfc_nodelist * ndlp;
914 struct lpfc_hba *phba = vport->phba;
590 struct lpfc_dmabuf *mp, *bmp; 915 struct lpfc_dmabuf *mp, *bmp;
591 struct lpfc_sli_ct_request *CtReq; 916 struct lpfc_sli_ct_request *CtReq;
592 struct ulp_bde64 *bpl; 917 struct ulp_bde64 *bpl;
593 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 918 void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
594 struct lpfc_iocbq *) = NULL; 919 struct lpfc_iocbq *) = NULL;
595 uint32_t rsp_size = 1024; 920 uint32_t rsp_size = 1024;
921 size_t size;
922 int rc = 0;
923
924 ndlp = lpfc_findnode_did(vport, NameServer_DID);
925 if (ndlp == NULL || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
926 rc=1;
927 goto ns_cmd_exit;
928 }
596 929
597 /* fill in BDEs for command */ 930 /* fill in BDEs for command */
598 /* Allocate buffer for command payload */ 931 /* Allocate buffer for command payload */
599 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 932 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
600 if (!mp) 933 if (!mp) {
934 rc=2;
601 goto ns_cmd_exit; 935 goto ns_cmd_exit;
936 }
602 937
603 INIT_LIST_HEAD(&mp->list); 938 INIT_LIST_HEAD(&mp->list);
604 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); 939 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
605 if (!mp->virt) 940 if (!mp->virt) {
941 rc=3;
606 goto ns_cmd_free_mp; 942 goto ns_cmd_free_mp;
943 }
607 944
608 /* Allocate buffer for Buffer ptr list */ 945 /* Allocate buffer for Buffer ptr list */
609 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 946 bmp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
610 if (!bmp) 947 if (!bmp) {
948 rc=4;
611 goto ns_cmd_free_mpvirt; 949 goto ns_cmd_free_mpvirt;
950 }
612 951
613 INIT_LIST_HEAD(&bmp->list); 952 INIT_LIST_HEAD(&bmp->list);
614 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); 953 bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
615 if (!bmp->virt) 954 if (!bmp->virt) {
955 rc=5;
616 goto ns_cmd_free_bmp; 956 goto ns_cmd_free_bmp;
957 }
617 958
618 /* NameServer Req */ 959 /* NameServer Req */
619 lpfc_printf_log(phba, 960 lpfc_printf_log(phba, KERN_INFO ,LOG_DISCOVERY,
620 KERN_INFO, 961 "%d (%d):0236 NameServer Req Data: x%x x%x x%x\n",
621 LOG_DISCOVERY, 962 phba->brd_no, vport->vpi, cmdcode, vport->fc_flag,
622 "%d:0236 NameServer Req Data: x%x x%x x%x\n", 963 vport->fc_rscn_id_cnt);
623 phba->brd_no, cmdcode, phba->fc_flag,
624 phba->fc_rscn_id_cnt);
625 964
626 bpl = (struct ulp_bde64 *) bmp->virt; 965 bpl = (struct ulp_bde64 *) bmp->virt;
627 memset(bpl, 0, sizeof(struct ulp_bde64)); 966 memset(bpl, 0, sizeof(struct ulp_bde64));
628 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 967 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
629 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 968 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
630 bpl->tus.f.bdeFlags = 0; 969 bpl->tus.f.bdeFlags = 0;
631 if (cmdcode == SLI_CTNS_GID_FT) 970 if (cmdcode == SLI_CTNS_GID_FT)
632 bpl->tus.f.bdeSize = GID_REQUEST_SZ; 971 bpl->tus.f.bdeSize = GID_REQUEST_SZ;
972 else if (cmdcode == SLI_CTNS_GFF_ID)
973 bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
633 else if (cmdcode == SLI_CTNS_RFT_ID) 974 else if (cmdcode == SLI_CTNS_RFT_ID)
634 bpl->tus.f.bdeSize = RFT_REQUEST_SZ; 975 bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
635 else if (cmdcode == SLI_CTNS_RNN_ID) 976 else if (cmdcode == SLI_CTNS_RNN_ID)
636 bpl->tus.f.bdeSize = RNN_REQUEST_SZ; 977 bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
978 else if (cmdcode == SLI_CTNS_RSPN_ID)
979 bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
637 else if (cmdcode == SLI_CTNS_RSNN_NN) 980 else if (cmdcode == SLI_CTNS_RSNN_NN)
638 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; 981 bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
639 else if (cmdcode == SLI_CTNS_RFF_ID) 982 else if (cmdcode == SLI_CTNS_RFF_ID)
@@ -654,56 +997,78 @@ lpfc_ns_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
654 CtReq->CommandResponse.bits.CmdRsp = 997 CtReq->CommandResponse.bits.CmdRsp =
655 be16_to_cpu(SLI_CTNS_GID_FT); 998 be16_to_cpu(SLI_CTNS_GID_FT);
656 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP; 999 CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
657 if (phba->hba_state < LPFC_HBA_READY) 1000 if (vport->port_state < LPFC_NS_QRY)
658 phba->hba_state = LPFC_NS_QRY; 1001 vport->port_state = LPFC_NS_QRY;
659 lpfc_set_disctmo(phba); 1002 lpfc_set_disctmo(vport);
660 cmpl = lpfc_cmpl_ct_cmd_gid_ft; 1003 cmpl = lpfc_cmpl_ct_cmd_gid_ft;
661 rsp_size = FC_MAX_NS_RSP; 1004 rsp_size = FC_MAX_NS_RSP;
662 break; 1005 break;
663 1006
1007 case SLI_CTNS_GFF_ID:
1008 CtReq->CommandResponse.bits.CmdRsp =
1009 be16_to_cpu(SLI_CTNS_GFF_ID);
1010 CtReq->un.gff.PortId = be32_to_cpu(context);
1011 cmpl = lpfc_cmpl_ct_cmd_gff_id;
1012 break;
1013
664 case SLI_CTNS_RFT_ID: 1014 case SLI_CTNS_RFT_ID:
665 CtReq->CommandResponse.bits.CmdRsp = 1015 CtReq->CommandResponse.bits.CmdRsp =
666 be16_to_cpu(SLI_CTNS_RFT_ID); 1016 be16_to_cpu(SLI_CTNS_RFT_ID);
667 CtReq->un.rft.PortId = be32_to_cpu(phba->fc_myDID); 1017 CtReq->un.rft.PortId = be32_to_cpu(vport->fc_myDID);
668 CtReq->un.rft.fcpReg = 1; 1018 CtReq->un.rft.fcpReg = 1;
669 cmpl = lpfc_cmpl_ct_cmd_rft_id; 1019 cmpl = lpfc_cmpl_ct_cmd_rft_id;
670 break; 1020 break;
671 1021
672 case SLI_CTNS_RFF_ID:
673 CtReq->CommandResponse.bits.CmdRsp =
674 be16_to_cpu(SLI_CTNS_RFF_ID);
675 CtReq->un.rff.PortId = be32_to_cpu(phba->fc_myDID);
676 CtReq->un.rff.feature_res = 0;
677 CtReq->un.rff.feature_tgt = 0;
678 CtReq->un.rff.type_code = FC_FCP_DATA;
679 CtReq->un.rff.feature_init = 1;
680 cmpl = lpfc_cmpl_ct_cmd_rff_id;
681 break;
682
683 case SLI_CTNS_RNN_ID: 1022 case SLI_CTNS_RNN_ID:
684 CtReq->CommandResponse.bits.CmdRsp = 1023 CtReq->CommandResponse.bits.CmdRsp =
685 be16_to_cpu(SLI_CTNS_RNN_ID); 1024 be16_to_cpu(SLI_CTNS_RNN_ID);
686 CtReq->un.rnn.PortId = be32_to_cpu(phba->fc_myDID); 1025 CtReq->un.rnn.PortId = be32_to_cpu(vport->fc_myDID);
687 memcpy(CtReq->un.rnn.wwnn, &phba->fc_nodename, 1026 memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename,
688 sizeof (struct lpfc_name)); 1027 sizeof (struct lpfc_name));
689 cmpl = lpfc_cmpl_ct_cmd_rnn_id; 1028 cmpl = lpfc_cmpl_ct_cmd_rnn_id;
690 break; 1029 break;
691 1030
1031 case SLI_CTNS_RSPN_ID:
1032 CtReq->CommandResponse.bits.CmdRsp =
1033 be16_to_cpu(SLI_CTNS_RSPN_ID);
1034 CtReq->un.rspn.PortId = be32_to_cpu(vport->fc_myDID);
1035 size = sizeof(CtReq->un.rspn.symbname);
1036 CtReq->un.rspn.len =
1037 lpfc_vport_symbolic_port_name(vport,
1038 CtReq->un.rspn.symbname, size);
1039 cmpl = lpfc_cmpl_ct_cmd_rspn_id;
1040 break;
692 case SLI_CTNS_RSNN_NN: 1041 case SLI_CTNS_RSNN_NN:
693 CtReq->CommandResponse.bits.CmdRsp = 1042 CtReq->CommandResponse.bits.CmdRsp =
694 be16_to_cpu(SLI_CTNS_RSNN_NN); 1043 be16_to_cpu(SLI_CTNS_RSNN_NN);
695 memcpy(CtReq->un.rsnn.wwnn, &phba->fc_nodename, 1044 memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
696 sizeof (struct lpfc_name)); 1045 sizeof (struct lpfc_name));
697 lpfc_get_hba_sym_node_name(phba, CtReq->un.rsnn.symbname); 1046 size = sizeof(CtReq->un.rsnn.symbname);
698 CtReq->un.rsnn.len = strlen(CtReq->un.rsnn.symbname); 1047 CtReq->un.rsnn.len =
1048 lpfc_vport_symbolic_node_name(vport,
1049 CtReq->un.rsnn.symbname, size);
699 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; 1050 cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
700 break; 1051 break;
1052 case SLI_CTNS_RFF_ID:
1053 vport->fc_flag &= ~FC_RFF_NOT_SUPPORTED;
1054 CtReq->CommandResponse.bits.CmdRsp =
1055 be16_to_cpu(SLI_CTNS_RFF_ID);
1056 CtReq->un.rff.PortId = be32_to_cpu(vport->fc_myDID);;
1057 CtReq->un.rff.fbits = FC4_FEATURE_INIT;
1058 CtReq->un.rff.type_code = FC_FCP_DATA;
1059 cmpl = lpfc_cmpl_ct_cmd_rff_id;
1060 break;
701 } 1061 }
702 1062
703 if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, rsp_size)) 1063 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
704 /* On success, The cmpl function will free the buffers */ 1064 /* On success, The cmpl function will free the buffers */
1065 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1066 "Issue CT cmd: cmd:x%x did:x%x",
1067 cmdcode, ndlp->nlp_DID, 0);
705 return 0; 1068 return 0;
1069 }
706 1070
1071 rc=6;
707 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1072 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
708ns_cmd_free_bmp: 1073ns_cmd_free_bmp:
709 kfree(bmp); 1074 kfree(bmp);
@@ -712,14 +1077,17 @@ ns_cmd_free_mpvirt:
712ns_cmd_free_mp: 1077ns_cmd_free_mp:
713 kfree(mp); 1078 kfree(mp);
714ns_cmd_exit: 1079ns_cmd_exit:
1080 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
1081 "%d (%d):0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
1082 phba->brd_no, vport->vpi, cmdcode, rc, vport->fc_flag,
1083 vport->fc_rscn_id_cnt);
715 return 1; 1084 return 1;
716} 1085}
717 1086
718static void 1087static void
719lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba, 1088lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
720 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb) 1089 struct lpfc_iocbq * rspiocb)
721{ 1090{
722 struct lpfc_dmabuf *bmp = cmdiocb->context3;
723 struct lpfc_dmabuf *inp = cmdiocb->context1; 1091 struct lpfc_dmabuf *inp = cmdiocb->context1;
724 struct lpfc_dmabuf *outp = cmdiocb->context2; 1092 struct lpfc_dmabuf *outp = cmdiocb->context2;
725 struct lpfc_sli_ct_request *CTrsp = outp->virt; 1093 struct lpfc_sli_ct_request *CTrsp = outp->virt;
@@ -727,48 +1095,60 @@ lpfc_cmpl_ct_cmd_fdmi(struct lpfc_hba * phba,
727 struct lpfc_nodelist *ndlp; 1095 struct lpfc_nodelist *ndlp;
728 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; 1096 uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
729 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; 1097 uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
1098 struct lpfc_vport *vport = cmdiocb->vport;
1099 IOCB_t *irsp = &rspiocb->iocb;
1100 uint32_t latt;
1101
1102 latt = lpfc_els_chk_latt(vport);
1103
1104 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1105 "FDMI cmpl: status:x%x/x%x latt:%d",
1106 irsp->ulpStatus, irsp->un.ulpWord[4], latt);
1107
1108 if (latt || irsp->ulpStatus) {
1109 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1110 "%d (%d):0229 FDMI cmd %04x failed, latt = %d "
1111 "ulpStatus: x%x, rid x%x\n",
1112 phba->brd_no, vport->vpi,
1113 be16_to_cpu(fdmi_cmd), latt, irsp->ulpStatus,
1114 irsp->un.ulpWord[4]);
1115 lpfc_ct_free_iocb(phba, cmdiocb);
1116 return;
1117 }
730 1118
731 ndlp = lpfc_findnode_did(phba, FDMI_DID); 1119 ndlp = lpfc_findnode_did(vport, FDMI_DID);
732 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { 1120 if (fdmi_rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
733 /* FDMI rsp failed */ 1121 /* FDMI rsp failed */
734 lpfc_printf_log(phba, 1122 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
735 KERN_INFO, 1123 "%d (%d):0220 FDMI rsp failed Data: x%x\n",
736 LOG_DISCOVERY, 1124 phba->brd_no, vport->vpi,
737 "%d:0220 FDMI rsp failed Data: x%x\n", 1125 be16_to_cpu(fdmi_cmd));
738 phba->brd_no,
739 be16_to_cpu(fdmi_cmd));
740 } 1126 }
741 1127
742 switch (be16_to_cpu(fdmi_cmd)) { 1128 switch (be16_to_cpu(fdmi_cmd)) {
743 case SLI_MGMT_RHBA: 1129 case SLI_MGMT_RHBA:
744 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RPA); 1130 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA);
745 break; 1131 break;
746 1132
747 case SLI_MGMT_RPA: 1133 case SLI_MGMT_RPA:
748 break; 1134 break;
749 1135
750 case SLI_MGMT_DHBA: 1136 case SLI_MGMT_DHBA:
751 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DPRT); 1137 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT);
752 break; 1138 break;
753 1139
754 case SLI_MGMT_DPRT: 1140 case SLI_MGMT_DPRT:
755 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_RHBA); 1141 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA);
756 break; 1142 break;
757 } 1143 }
758 1144 lpfc_ct_free_iocb(phba, cmdiocb);
759 lpfc_free_ct_rsp(phba, outp);
760 lpfc_mbuf_free(phba, inp->virt, inp->phys);
761 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
762 kfree(inp);
763 kfree(bmp);
764 spin_lock_irq(phba->host->host_lock);
765 lpfc_sli_release_iocbq(phba, cmdiocb);
766 spin_unlock_irq(phba->host->host_lock);
767 return; 1145 return;
768} 1146}
1147
769int 1148int
770lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode) 1149lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
771{ 1150{
1151 struct lpfc_hba *phba = vport->phba;
772 struct lpfc_dmabuf *mp, *bmp; 1152 struct lpfc_dmabuf *mp, *bmp;
773 struct lpfc_sli_ct_request *CtReq; 1153 struct lpfc_sli_ct_request *CtReq;
774 struct ulp_bde64 *bpl; 1154 struct ulp_bde64 *bpl;
@@ -805,12 +1185,10 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
805 INIT_LIST_HEAD(&bmp->list); 1185 INIT_LIST_HEAD(&bmp->list);
806 1186
807 /* FDMI request */ 1187 /* FDMI request */
808 lpfc_printf_log(phba, 1188 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
809 KERN_INFO, 1189 "%d (%d):0218 FDMI Request Data: x%x x%x x%x\n",
810 LOG_DISCOVERY, 1190 phba->brd_no, vport->vpi, vport->fc_flag,
811 "%d:0218 FDMI Request Data: x%x x%x x%x\n", 1191 vport->port_state, cmdcode);
812 phba->brd_no,
813 phba->fc_flag, phba->hba_state, cmdcode);
814 1192
815 CtReq = (struct lpfc_sli_ct_request *) mp->virt; 1193 CtReq = (struct lpfc_sli_ct_request *) mp->virt;
816 1194
@@ -833,11 +1211,11 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
833 be16_to_cpu(SLI_MGMT_RHBA); 1211 be16_to_cpu(SLI_MGMT_RHBA);
834 CtReq->CommandResponse.bits.Size = 0; 1212 CtReq->CommandResponse.bits.Size = 0;
835 rh = (REG_HBA *) & CtReq->un.PortID; 1213 rh = (REG_HBA *) & CtReq->un.PortID;
836 memcpy(&rh->hi.PortName, &phba->fc_sparam.portName, 1214 memcpy(&rh->hi.PortName, &vport->fc_sparam.portName,
837 sizeof (struct lpfc_name)); 1215 sizeof (struct lpfc_name));
838 /* One entry (port) per adapter */ 1216 /* One entry (port) per adapter */
839 rh->rpl.EntryCnt = be32_to_cpu(1); 1217 rh->rpl.EntryCnt = be32_to_cpu(1);
840 memcpy(&rh->rpl.pe, &phba->fc_sparam.portName, 1218 memcpy(&rh->rpl.pe, &vport->fc_sparam.portName,
841 sizeof (struct lpfc_name)); 1219 sizeof (struct lpfc_name));
842 1220
843 /* point to the HBA attribute block */ 1221 /* point to the HBA attribute block */
@@ -853,7 +1231,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
853 ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME); 1231 ae->ad.bits.AttrType = be16_to_cpu(NODE_NAME);
854 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES 1232 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES
855 + sizeof (struct lpfc_name)); 1233 + sizeof (struct lpfc_name));
856 memcpy(&ae->un.NodeName, &phba->fc_sparam.nodeName, 1234 memcpy(&ae->un.NodeName, &vport->fc_sparam.nodeName,
857 sizeof (struct lpfc_name)); 1235 sizeof (struct lpfc_name));
858 ab->EntryCnt++; 1236 ab->EntryCnt++;
859 size += FOURBYTES + sizeof (struct lpfc_name); 1237 size += FOURBYTES + sizeof (struct lpfc_name);
@@ -991,7 +1369,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
991 pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID; 1369 pab = (REG_PORT_ATTRIBUTE *) & CtReq->un.PortID;
992 size = sizeof (struct lpfc_name) + FOURBYTES; 1370 size = sizeof (struct lpfc_name) + FOURBYTES;
993 memcpy((uint8_t *) & pab->PortName, 1371 memcpy((uint8_t *) & pab->PortName,
994 (uint8_t *) & phba->fc_sparam.portName, 1372 (uint8_t *) & vport->fc_sparam.portName,
995 sizeof (struct lpfc_name)); 1373 sizeof (struct lpfc_name));
996 pab->ab.EntryCnt = 0; 1374 pab->ab.EntryCnt = 0;
997 1375
@@ -1053,7 +1431,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1053 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size); 1431 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) pab + size);
1054 ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE); 1432 ae->ad.bits.AttrType = be16_to_cpu(MAX_FRAME_SIZE);
1055 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4); 1433 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + 4);
1056 hsp = (struct serv_parm *) & phba->fc_sparam; 1434 hsp = (struct serv_parm *) & vport->fc_sparam;
1057 ae->un.MaxFrameSize = 1435 ae->un.MaxFrameSize =
1058 (((uint32_t) hsp->cmn. 1436 (((uint32_t) hsp->cmn.
1059 bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn. 1437 bbRcvSizeMsb) << 8) | (uint32_t) hsp->cmn.
@@ -1097,7 +1475,7 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1097 CtReq->CommandResponse.bits.Size = 0; 1475 CtReq->CommandResponse.bits.Size = 0;
1098 pe = (PORT_ENTRY *) & CtReq->un.PortID; 1476 pe = (PORT_ENTRY *) & CtReq->un.PortID;
1099 memcpy((uint8_t *) & pe->PortName, 1477 memcpy((uint8_t *) & pe->PortName,
1100 (uint8_t *) & phba->fc_sparam.portName, 1478 (uint8_t *) & vport->fc_sparam.portName,
1101 sizeof (struct lpfc_name)); 1479 sizeof (struct lpfc_name));
1102 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); 1480 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
1103 break; 1481 break;
@@ -1107,22 +1485,22 @@ lpfc_fdmi_cmd(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, int cmdcode)
1107 CtReq->CommandResponse.bits.Size = 0; 1485 CtReq->CommandResponse.bits.Size = 0;
1108 pe = (PORT_ENTRY *) & CtReq->un.PortID; 1486 pe = (PORT_ENTRY *) & CtReq->un.PortID;
1109 memcpy((uint8_t *) & pe->PortName, 1487 memcpy((uint8_t *) & pe->PortName,
1110 (uint8_t *) & phba->fc_sparam.portName, 1488 (uint8_t *) & vport->fc_sparam.portName,
1111 sizeof (struct lpfc_name)); 1489 sizeof (struct lpfc_name));
1112 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name); 1490 size = GID_REQUEST_SZ - 4 + sizeof (struct lpfc_name);
1113 break; 1491 break;
1114 } 1492 }
1115 1493
1116 bpl = (struct ulp_bde64 *) bmp->virt; 1494 bpl = (struct ulp_bde64 *) bmp->virt;
1117 bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); 1495 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
1118 bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); 1496 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
1119 bpl->tus.f.bdeFlags = 0; 1497 bpl->tus.f.bdeFlags = 0;
1120 bpl->tus.f.bdeSize = size; 1498 bpl->tus.f.bdeSize = size;
1121 bpl->tus.w = le32_to_cpu(bpl->tus.w); 1499 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1122 1500
1123 cmpl = lpfc_cmpl_ct_cmd_fdmi; 1501 cmpl = lpfc_cmpl_ct_cmd_fdmi;
1124 1502
1125 if (!lpfc_ct_cmd(phba, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP)) 1503 if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, FC_MAX_NS_RSP, 0))
1126 return 0; 1504 return 0;
1127 1505
1128 lpfc_mbuf_free(phba, bmp->virt, bmp->phys); 1506 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
@@ -1134,49 +1512,50 @@ fdmi_cmd_free_mp:
1134 kfree(mp); 1512 kfree(mp);
1135fdmi_cmd_exit: 1513fdmi_cmd_exit:
1136 /* Issue FDMI request failed */ 1514 /* Issue FDMI request failed */
1137 lpfc_printf_log(phba, 1515 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1138 KERN_INFO, 1516 "%d (%d):0244 Issue FDMI request failed Data: x%x\n",
1139 LOG_DISCOVERY, 1517 phba->brd_no, vport->vpi, cmdcode);
1140 "%d:0244 Issue FDMI request failed Data: x%x\n",
1141 phba->brd_no,
1142 cmdcode);
1143 return 1; 1518 return 1;
1144} 1519}
1145 1520
1146void 1521void
1147lpfc_fdmi_tmo(unsigned long ptr) 1522lpfc_fdmi_tmo(unsigned long ptr)
1148{ 1523{
1149 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1524 struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
1525 struct lpfc_hba *phba = vport->phba;
1150 unsigned long iflag; 1526 unsigned long iflag;
1151 1527
1152 spin_lock_irqsave(phba->host->host_lock, iflag); 1528 spin_lock_irqsave(&vport->work_port_lock, iflag);
1153 if (!(phba->work_hba_events & WORKER_FDMI_TMO)) { 1529 if (!(vport->work_port_events & WORKER_FDMI_TMO)) {
1154 phba->work_hba_events |= WORKER_FDMI_TMO; 1530 vport->work_port_events |= WORKER_FDMI_TMO;
1531 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1532
1533 spin_lock_irqsave(&phba->hbalock, iflag);
1155 if (phba->work_wait) 1534 if (phba->work_wait)
1156 wake_up(phba->work_wait); 1535 lpfc_worker_wake_up(phba);
1536 spin_unlock_irqrestore(&phba->hbalock, iflag);
1157 } 1537 }
1158 spin_unlock_irqrestore(phba->host->host_lock,iflag); 1538 else
1539 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
1159} 1540}
1160 1541
1161void 1542void
1162lpfc_fdmi_tmo_handler(struct lpfc_hba *phba) 1543lpfc_fdmi_timeout_handler(struct lpfc_vport *vport)
1163{ 1544{
1164 struct lpfc_nodelist *ndlp; 1545 struct lpfc_nodelist *ndlp;
1165 1546
1166 ndlp = lpfc_findnode_did(phba, FDMI_DID); 1547 ndlp = lpfc_findnode_did(vport, FDMI_DID);
1167 if (ndlp) { 1548 if (ndlp) {
1168 if (init_utsname()->nodename[0] != '\0') { 1549 if (init_utsname()->nodename[0] != '\0')
1169 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); 1550 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
1170 } else { 1551 else
1171 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60); 1552 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
1172 }
1173 } 1553 }
1174 return; 1554 return;
1175} 1555}
1176 1556
1177
1178void 1557void
1179lpfc_decode_firmware_rev(struct lpfc_hba * phba, char *fwrevision, int flag) 1558lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1180{ 1559{
1181 struct lpfc_sli *psli = &phba->sli; 1560 struct lpfc_sli *psli = &phba->sli;
1182 lpfc_vpd_t *vp = &phba->vpd; 1561 lpfc_vpd_t *vp = &phba->vpd;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
new file mode 100644
index 000000000000..673cfe11cc2b
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -0,0 +1,508 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#include <linux/blkdev.h>
22#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/idr.h>
25#include <linux/interrupt.h>
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/spinlock.h>
29#include <linux/ctype.h>
30#include <linux/version.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_hw.h"
38#include "lpfc_sli.h"
39#include "lpfc_disc.h"
40#include "lpfc_scsi.h"
41#include "lpfc.h"
42#include "lpfc_logmsg.h"
43#include "lpfc_crtn.h"
44#include "lpfc_vport.h"
45#include "lpfc_version.h"
46#include "lpfc_vport.h"
47#include "lpfc_debugfs.h"
48
49#ifdef CONFIG_LPFC_DEBUG_FS
50/* debugfs interface
51 *
52 * To access this interface the user should:
53 * # mkdir /debug
54 * # mount -t debugfs none /debug
55 *
56 * The lpfc debugfs directory hierachy is:
57 * lpfc/lpfcX/vportY
58 * where X is the lpfc hba unique_id
59 * where Y is the vport VPI on that hba
60 *
61 * Debugging services available per vport:
62 * discovery_trace
63 * This is an ACSII readable file that contains a trace of the last
64 * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
65 * See lpfc_debugfs.h for different categories of
66 * discovery events. To enable the discovery trace, the following
67 * module parameters must be set:
68 * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
69 * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
70 * EACH vport. X MUST also be a power of 2.
71 * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
72 * lpfc_debugfs.h .
73 */
74static int lpfc_debugfs_enable = 0;
75module_param(lpfc_debugfs_enable, int, 0);
76MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
77
78static int lpfc_debugfs_max_disc_trc = 0; /* This MUST be a power of 2 */
79module_param(lpfc_debugfs_max_disc_trc, int, 0);
80MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
81 "Set debugfs discovery trace depth");
82
83static int lpfc_debugfs_mask_disc_trc = 0;
84module_param(lpfc_debugfs_mask_disc_trc, int, 0);
85MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
86 "Set debugfs discovery trace mask");
87
88#include <linux/debugfs.h>
89
90/* size of discovery_trace output line */
91#define LPFC_DISC_TRC_ENTRY_SIZE 80
92
93/* nodelist output buffer size */
94#define LPFC_NODELIST_SIZE 8192
95#define LPFC_NODELIST_ENTRY_SIZE 120
96
97struct lpfc_debug {
98 char *buffer;
99 int len;
100};
101
102atomic_t lpfc_debugfs_disc_trc_cnt = ATOMIC_INIT(0);
103unsigned long lpfc_debugfs_start_time = 0L;
104
105static int
106lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
107{
108 int i, index, len, enable;
109 uint32_t ms;
110 struct lpfc_disc_trc *dtp;
111 char buffer[80];
112
113
114 enable = lpfc_debugfs_enable;
115 lpfc_debugfs_enable = 0;
116
117 len = 0;
118 index = (atomic_read(&vport->disc_trc_cnt) + 1) &
119 (lpfc_debugfs_max_disc_trc - 1);
120 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
121 dtp = vport->disc_trc + i;
122 if (!dtp->fmt)
123 continue;
124 ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
125 snprintf(buffer, 80, "%010d:%010d ms:%s\n",
126 dtp->seq_cnt, ms, dtp->fmt);
127 len += snprintf(buf+len, size-len, buffer,
128 dtp->data1, dtp->data2, dtp->data3);
129 }
130 for (i = 0; i < index; i++) {
131 dtp = vport->disc_trc + i;
132 if (!dtp->fmt)
133 continue;
134 ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
135 snprintf(buffer, 80, "%010d:%010d ms:%s\n",
136 dtp->seq_cnt, ms, dtp->fmt);
137 len += snprintf(buf+len, size-len, buffer,
138 dtp->data1, dtp->data2, dtp->data3);
139 }
140
141 lpfc_debugfs_enable = enable;
142 return len;
143}
144
145static int
146lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
147{
148 int len = 0;
149 int cnt;
150 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
151 struct lpfc_nodelist *ndlp;
152 unsigned char *statep, *name;
153
154 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
155
156 spin_lock_irq(shost->host_lock);
157 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
158 if (!cnt) {
159 len += snprintf(buf+len, size-len,
160 "Missing Nodelist Entries\n");
161 break;
162 }
163 cnt--;
164 switch (ndlp->nlp_state) {
165 case NLP_STE_UNUSED_NODE:
166 statep = "UNUSED";
167 break;
168 case NLP_STE_PLOGI_ISSUE:
169 statep = "PLOGI ";
170 break;
171 case NLP_STE_ADISC_ISSUE:
172 statep = "ADISC ";
173 break;
174 case NLP_STE_REG_LOGIN_ISSUE:
175 statep = "REGLOG";
176 break;
177 case NLP_STE_PRLI_ISSUE:
178 statep = "PRLI ";
179 break;
180 case NLP_STE_UNMAPPED_NODE:
181 statep = "UNMAP ";
182 break;
183 case NLP_STE_MAPPED_NODE:
184 statep = "MAPPED";
185 break;
186 case NLP_STE_NPR_NODE:
187 statep = "NPR ";
188 break;
189 default:
190 statep = "UNKNOWN";
191 }
192 len += snprintf(buf+len, size-len, "%s DID:x%06x ",
193 statep, ndlp->nlp_DID);
194 name = (unsigned char *)&ndlp->nlp_portname;
195 len += snprintf(buf+len, size-len,
196 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
197 *name, *(name+1), *(name+2), *(name+3),
198 *(name+4), *(name+5), *(name+6), *(name+7));
199 name = (unsigned char *)&ndlp->nlp_nodename;
200 len += snprintf(buf+len, size-len,
201 "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
202 *name, *(name+1), *(name+2), *(name+3),
203 *(name+4), *(name+5), *(name+6), *(name+7));
204 len += snprintf(buf+len, size-len, "RPI:%03d flag:x%08x ",
205 ndlp->nlp_rpi, ndlp->nlp_flag);
206 if (!ndlp->nlp_type)
207 len += snprintf(buf+len, size-len, "UNKNOWN_TYPE");
208 if (ndlp->nlp_type & NLP_FC_NODE)
209 len += snprintf(buf+len, size-len, "FC_NODE ");
210 if (ndlp->nlp_type & NLP_FABRIC)
211 len += snprintf(buf+len, size-len, "FABRIC ");
212 if (ndlp->nlp_type & NLP_FCP_TARGET)
213 len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
214 ndlp->nlp_sid);
215 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
216 len += snprintf(buf+len, size-len, "FCP_INITIATOR");
217 len += snprintf(buf+len, size-len, "\n");
218 }
219 spin_unlock_irq(shost->host_lock);
220 return len;
221}
222#endif
223
224
225inline void
226lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
227 uint32_t data1, uint32_t data2, uint32_t data3)
228{
229#ifdef CONFIG_LPFC_DEBUG_FS
230 struct lpfc_disc_trc *dtp;
231 int index;
232
233 if (!(lpfc_debugfs_mask_disc_trc & mask))
234 return;
235
236 if (!lpfc_debugfs_enable || !lpfc_debugfs_max_disc_trc ||
237 !vport || !vport->disc_trc)
238 return;
239
240 index = atomic_inc_return(&vport->disc_trc_cnt) &
241 (lpfc_debugfs_max_disc_trc - 1);
242 dtp = vport->disc_trc + index;
243 dtp->fmt = fmt;
244 dtp->data1 = data1;
245 dtp->data2 = data2;
246 dtp->data3 = data3;
247 dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_disc_trc_cnt);
248 dtp->jif = jiffies;
249#endif
250 return;
251}
252
253#ifdef CONFIG_LPFC_DEBUG_FS
254static int
255lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file)
256{
257 struct lpfc_vport *vport = inode->i_private;
258 struct lpfc_debug *debug;
259 int size;
260 int rc = -ENOMEM;
261
262 if (!lpfc_debugfs_max_disc_trc) {
263 rc = -ENOSPC;
264 goto out;
265 }
266
267 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
268 if (!debug)
269 goto out;
270
271 /* Round to page boundry */
272 size = (lpfc_debugfs_max_disc_trc * LPFC_DISC_TRC_ENTRY_SIZE);
273 size = PAGE_ALIGN(size);
274
275 debug->buffer = kmalloc(size, GFP_KERNEL);
276 if (!debug->buffer) {
277 kfree(debug);
278 goto out;
279 }
280
281 debug->len = lpfc_debugfs_disc_trc_data(vport, debug->buffer, size);
282 file->private_data = debug;
283
284 rc = 0;
285out:
286 return rc;
287}
288
289static int
290lpfc_debugfs_nodelist_open(struct inode *inode, struct file *file)
291{
292 struct lpfc_vport *vport = inode->i_private;
293 struct lpfc_debug *debug;
294 int rc = -ENOMEM;
295
296 debug = kmalloc(sizeof(*debug), GFP_KERNEL);
297 if (!debug)
298 goto out;
299
300 /* Round to page boundry */
301 debug->buffer = kmalloc(LPFC_NODELIST_SIZE, GFP_KERNEL);
302 if (!debug->buffer) {
303 kfree(debug);
304 goto out;
305 }
306
307 debug->len = lpfc_debugfs_nodelist_data(vport, debug->buffer,
308 LPFC_NODELIST_SIZE);
309 file->private_data = debug;
310
311 rc = 0;
312out:
313 return rc;
314}
315
316static loff_t
317lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
318{
319 struct lpfc_debug *debug;
320 loff_t pos = -1;
321
322 debug = file->private_data;
323
324 switch (whence) {
325 case 0:
326 pos = off;
327 break;
328 case 1:
329 pos = file->f_pos + off;
330 break;
331 case 2:
332 pos = debug->len - off;
333 }
334 return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
335}
336
337static ssize_t
338lpfc_debugfs_read(struct file *file, char __user *buf,
339 size_t nbytes, loff_t *ppos)
340{
341 struct lpfc_debug *debug = file->private_data;
342 return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
343 debug->len);
344}
345
346static int
347lpfc_debugfs_release(struct inode *inode, struct file *file)
348{
349 struct lpfc_debug *debug = file->private_data;
350
351 kfree(debug->buffer);
352 kfree(debug);
353
354 return 0;
355}
356
357#undef lpfc_debugfs_op_disc_trc
358static struct file_operations lpfc_debugfs_op_disc_trc = {
359 .owner = THIS_MODULE,
360 .open = lpfc_debugfs_disc_trc_open,
361 .llseek = lpfc_debugfs_lseek,
362 .read = lpfc_debugfs_read,
363 .release = lpfc_debugfs_release,
364};
365
366#undef lpfc_debugfs_op_nodelist
367static struct file_operations lpfc_debugfs_op_nodelist = {
368 .owner = THIS_MODULE,
369 .open = lpfc_debugfs_nodelist_open,
370 .llseek = lpfc_debugfs_lseek,
371 .read = lpfc_debugfs_read,
372 .release = lpfc_debugfs_release,
373};
374
375static struct dentry *lpfc_debugfs_root = NULL;
376static atomic_t lpfc_debugfs_hba_count;
377#endif
378
379inline void
380lpfc_debugfs_initialize(struct lpfc_vport *vport)
381{
382#ifdef CONFIG_LPFC_DEBUG_FS
383 struct lpfc_hba *phba = vport->phba;
384 char name[64];
385 uint32_t num, i;
386
387 if (!lpfc_debugfs_enable)
388 return;
389
390 if (lpfc_debugfs_max_disc_trc) {
391 num = lpfc_debugfs_max_disc_trc - 1;
392 if (num & lpfc_debugfs_max_disc_trc) {
393 /* Change to be a power of 2 */
394 num = lpfc_debugfs_max_disc_trc;
395 i = 0;
396 while (num > 1) {
397 num = num >> 1;
398 i++;
399 }
400 lpfc_debugfs_max_disc_trc = (1 << i);
401 printk(KERN_ERR
402 "lpfc_debugfs_max_disc_trc changed to %d\n",
403 lpfc_debugfs_max_disc_trc);
404 }
405 }
406
407 if (!lpfc_debugfs_root) {
408 lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
409 atomic_set(&lpfc_debugfs_hba_count, 0);
410 if (!lpfc_debugfs_root)
411 goto debug_failed;
412 }
413
414 snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
415 if (!phba->hba_debugfs_root) {
416 phba->hba_debugfs_root =
417 debugfs_create_dir(name, lpfc_debugfs_root);
418 if (!phba->hba_debugfs_root)
419 goto debug_failed;
420 atomic_inc(&lpfc_debugfs_hba_count);
421 atomic_set(&phba->debugfs_vport_count, 0);
422 }
423
424 snprintf(name, sizeof(name), "vport%d", vport->vpi);
425 if (!vport->vport_debugfs_root) {
426 vport->vport_debugfs_root =
427 debugfs_create_dir(name, phba->hba_debugfs_root);
428 if (!vport->vport_debugfs_root)
429 goto debug_failed;
430 atomic_inc(&phba->debugfs_vport_count);
431 }
432
433 if (!lpfc_debugfs_start_time)
434 lpfc_debugfs_start_time = jiffies;
435
436 vport->disc_trc = kmalloc(
437 (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc),
438 GFP_KERNEL);
439
440 if (!vport->disc_trc)
441 goto debug_failed;
442 memset(vport->disc_trc, 0,
443 (sizeof(struct lpfc_disc_trc) * lpfc_debugfs_max_disc_trc));
444
445 snprintf(name, sizeof(name), "discovery_trace");
446 vport->debug_disc_trc =
447 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
448 vport->vport_debugfs_root,
449 vport, &lpfc_debugfs_op_disc_trc);
450 if (!vport->debug_disc_trc) {
451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
452 "%d:0409 Cannot create debugfs",
453 phba->brd_no);
454 goto debug_failed;
455 }
456 snprintf(name, sizeof(name), "nodelist");
457 vport->debug_nodelist =
458 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
459 vport->vport_debugfs_root,
460 vport, &lpfc_debugfs_op_nodelist);
461 if (!vport->debug_nodelist) {
462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
463 "%d:0409 Cannot create debugfs",
464 phba->brd_no);
465 goto debug_failed;
466 }
467debug_failed:
468 return;
469#endif
470}
471
472
473inline void
474lpfc_debugfs_terminate(struct lpfc_vport *vport)
475{
476#ifdef CONFIG_LPFC_DEBUG_FS
477 struct lpfc_hba *phba = vport->phba;
478
479 if (vport->disc_trc) {
480 kfree(vport->disc_trc);
481 vport->disc_trc = NULL;
482 }
483 if (vport->debug_disc_trc) {
484 debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
485 vport->debug_disc_trc = NULL;
486 }
487 if (vport->debug_nodelist) {
488 debugfs_remove(vport->debug_nodelist); /* nodelist */
489 vport->debug_nodelist = NULL;
490 }
491 if (vport->vport_debugfs_root) {
492 debugfs_remove(vport->vport_debugfs_root); /* vportX */
493 vport->vport_debugfs_root = NULL;
494 atomic_dec(&phba->debugfs_vport_count);
495 }
496 if (atomic_read(&phba->debugfs_vport_count) == 0) {
497 debugfs_remove(vport->phba->hba_debugfs_root); /* lpfcX */
498 vport->phba->hba_debugfs_root = NULL;
499 atomic_dec(&lpfc_debugfs_hba_count);
500 if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
501 debugfs_remove(lpfc_debugfs_root); /* lpfc */
502 lpfc_debugfs_root = NULL;
503 }
504 }
505#endif
506}
507
508
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
new file mode 100644
index 000000000000..fffb678426a4
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -0,0 +1,50 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2007 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
20
21#ifndef _H_LPFC_DEBUG_FS
22#define _H_LPFC_DEBUG_FS
23
24#ifdef CONFIG_LPFC_DEBUG_FS
25struct lpfc_disc_trc {
26 char *fmt;
27 uint32_t data1;
28 uint32_t data2;
29 uint32_t data3;
30 uint32_t seq_cnt;
31 unsigned long jif;
32};
33#endif
34
35/* Mask for discovery_trace */
36#define LPFC_DISC_TRC_ELS_CMD 0x1 /* Trace ELS commands */
37#define LPFC_DISC_TRC_ELS_RSP 0x2 /* Trace ELS response */
38#define LPFC_DISC_TRC_ELS_UNSOL 0x4 /* Trace ELS rcv'ed */
39#define LPFC_DISC_TRC_ELS_ALL 0x7 /* Trace ELS */
40#define LPFC_DISC_TRC_MBOX_VPORT 0x8 /* Trace vport MBOXs */
41#define LPFC_DISC_TRC_MBOX 0x10 /* Trace other MBOXs */
42#define LPFC_DISC_TRC_MBOX_ALL 0x18 /* Trace all MBOXs */
43#define LPFC_DISC_TRC_CT 0x20 /* Trace disc CT requests */
44#define LPFC_DISC_TRC_DSM 0x40 /* Trace DSM events */
45#define LPFC_DISC_TRC_RPORT 0x80 /* Trace rport events */
46#define LPFC_DISC_TRC_NODE 0x100 /* Trace ndlp state changes */
47
48#define LPFC_DISC_TRC_DISCOVERY 0xef /* common mask for general
49 * discovery */
50#endif /* H_LPFC_DEBUG_FS */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 498059f3f7f4..aacac9ac5381 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -36,21 +36,23 @@ enum lpfc_work_type {
36 LPFC_EVT_WARM_START, 36 LPFC_EVT_WARM_START,
37 LPFC_EVT_KILL, 37 LPFC_EVT_KILL,
38 LPFC_EVT_ELS_RETRY, 38 LPFC_EVT_ELS_RETRY,
39 LPFC_EVT_DEV_LOSS_DELAY,
40 LPFC_EVT_DEV_LOSS,
39}; 41};
40 42
41/* structure used to queue event to the discovery tasklet */ 43/* structure used to queue event to the discovery tasklet */
42struct lpfc_work_evt { 44struct lpfc_work_evt {
43 struct list_head evt_listp; 45 struct list_head evt_listp;
44 void * evt_arg1; 46 void *evt_arg1;
45 void * evt_arg2; 47 void *evt_arg2;
46 enum lpfc_work_type evt; 48 enum lpfc_work_type evt;
47}; 49};
48 50
49 51
50struct lpfc_nodelist { 52struct lpfc_nodelist {
51 struct list_head nlp_listp; 53 struct list_head nlp_listp;
52 struct lpfc_name nlp_portname; /* port name */ 54 struct lpfc_name nlp_portname;
53 struct lpfc_name nlp_nodename; /* node name */ 55 struct lpfc_name nlp_nodename;
54 uint32_t nlp_flag; /* entry flags */ 56 uint32_t nlp_flag; /* entry flags */
55 uint32_t nlp_DID; /* FC D_ID of entry */ 57 uint32_t nlp_DID; /* FC D_ID of entry */
56 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ 58 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
@@ -75,8 +77,9 @@ struct lpfc_nodelist {
75 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ 77 struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
76 struct fc_rport *rport; /* Corresponding FC transport 78 struct fc_rport *rport; /* Corresponding FC transport
77 port structure */ 79 port structure */
78 struct lpfc_hba *nlp_phba; 80 struct lpfc_vport *vport;
79 struct lpfc_work_evt els_retry_evt; 81 struct lpfc_work_evt els_retry_evt;
82 struct lpfc_work_evt dev_loss_evt;
80 unsigned long last_ramp_up_time; /* jiffy of last ramp up */ 83 unsigned long last_ramp_up_time; /* jiffy of last ramp up */
81 unsigned long last_q_full_time; /* jiffy of last queue full */ 84 unsigned long last_q_full_time; /* jiffy of last queue full */
82 struct kref kref; 85 struct kref kref;
@@ -98,7 +101,9 @@ struct lpfc_nodelist {
98 ACC */ 101 ACC */
99#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from 102#define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from
100 NPR list */ 103 NPR list */
104#define NLP_RM_DFLT_RPI 0x4000000 /* need to remove leftover dflt RPI */
101#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ 105#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */
106#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
102 107
103/* There are 4 different double linked lists nodelist entries can reside on. 108/* There are 4 different double linked lists nodelist entries can reside on.
104 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used 109 * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 638b3cd677bd..33fbc1666946 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -35,38 +35,38 @@
35#include "lpfc.h" 35#include "lpfc.h"
36#include "lpfc_logmsg.h" 36#include "lpfc_logmsg.h"
37#include "lpfc_crtn.h" 37#include "lpfc_crtn.h"
38#include "lpfc_vport.h"
39#include "lpfc_debugfs.h"
38 40
39static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 41static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
40 struct lpfc_iocbq *); 42 struct lpfc_iocbq *);
43static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
44 struct lpfc_iocbq *);
45
41static int lpfc_max_els_tries = 3; 46static int lpfc_max_els_tries = 3;
42 47
43static int 48int
44lpfc_els_chk_latt(struct lpfc_hba * phba) 49lpfc_els_chk_latt(struct lpfc_vport *vport)
45{ 50{
46 struct lpfc_sli *psli; 51 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
47 LPFC_MBOXQ_t *mbox; 52 struct lpfc_hba *phba = vport->phba;
48 uint32_t ha_copy; 53 uint32_t ha_copy;
49 int rc;
50 54
51 psli = &phba->sli; 55 if (vport->port_state >= LPFC_VPORT_READY ||
52 56 phba->link_state == LPFC_LINK_DOWN)
53 if ((phba->hba_state >= LPFC_HBA_READY) ||
54 (phba->hba_state == LPFC_LINK_DOWN))
55 return 0; 57 return 0;
56 58
57 /* Read the HBA Host Attention Register */ 59 /* Read the HBA Host Attention Register */
58 spin_lock_irq(phba->host->host_lock);
59 ha_copy = readl(phba->HAregaddr); 60 ha_copy = readl(phba->HAregaddr);
60 spin_unlock_irq(phba->host->host_lock);
61 61
62 if (!(ha_copy & HA_LATT)) 62 if (!(ha_copy & HA_LATT))
63 return 0; 63 return 0;
64 64
65 /* Pending Link Event during Discovery */ 65 /* Pending Link Event during Discovery */
66 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY, 66 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
67 "%d:0237 Pending Link Event during " 67 "%d (%d):0237 Pending Link Event during "
68 "Discovery: State x%x\n", 68 "Discovery: State x%x\n",
69 phba->brd_no, phba->hba_state); 69 phba->brd_no, vport->vpi, phba->pport->port_state);
70 70
71 /* CLEAR_LA should re-enable link attention events and 71 /* CLEAR_LA should re-enable link attention events and
72 * we should then imediately take a LATT event. The 72 * we should then imediately take a LATT event. The
@@ -74,48 +74,34 @@ lpfc_els_chk_latt(struct lpfc_hba * phba)
74 * will cleanup any left over in-progress discovery 74 * will cleanup any left over in-progress discovery
75 * events. 75 * events.
76 */ 76 */
77 spin_lock_irq(phba->host->host_lock); 77 spin_lock_irq(shost->host_lock);
78 phba->fc_flag |= FC_ABORT_DISCOVERY; 78 vport->fc_flag |= FC_ABORT_DISCOVERY;
79 spin_unlock_irq(phba->host->host_lock); 79 spin_unlock_irq(shost->host_lock);
80
81 if (phba->hba_state != LPFC_CLEAR_LA) {
82 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
83 phba->hba_state = LPFC_CLEAR_LA;
84 lpfc_clear_la(phba, mbox);
85 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
86 rc = lpfc_sli_issue_mbox (phba, mbox,
87 (MBX_NOWAIT | MBX_STOP_IOCB));
88 if (rc == MBX_NOT_FINISHED) {
89 mempool_free(mbox, phba->mbox_mem_pool);
90 phba->hba_state = LPFC_HBA_ERROR;
91 }
92 }
93 }
94 80
95 return 1; 81 if (phba->link_state != LPFC_CLEAR_LA)
82 lpfc_issue_clear_la(phba, vport);
96 83
84 return 1;
97} 85}
98 86
99static struct lpfc_iocbq * 87static struct lpfc_iocbq *
100lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp, 88lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
101 uint16_t cmdSize, uint8_t retry, struct lpfc_nodelist * ndlp, 89 uint16_t cmdSize, uint8_t retry,
102 uint32_t did, uint32_t elscmd) 90 struct lpfc_nodelist *ndlp, uint32_t did,
91 uint32_t elscmd)
103{ 92{
104 struct lpfc_sli_ring *pring; 93 struct lpfc_hba *phba = vport->phba;
105 struct lpfc_iocbq *elsiocb; 94 struct lpfc_iocbq *elsiocb;
106 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 95 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
107 struct ulp_bde64 *bpl; 96 struct ulp_bde64 *bpl;
108 IOCB_t *icmd; 97 IOCB_t *icmd;
109 98
110 pring = &phba->sli.ring[LPFC_ELS_RING];
111 99
112 if (phba->hba_state < LPFC_LINK_UP) 100 if (!lpfc_is_link_up(phba))
113 return NULL; 101 return NULL;
114 102
115 /* Allocate buffer for command iocb */ 103 /* Allocate buffer for command iocb */
116 spin_lock_irq(phba->host->host_lock);
117 elsiocb = lpfc_sli_get_iocbq(phba); 104 elsiocb = lpfc_sli_get_iocbq(phba);
118 spin_unlock_irq(phba->host->host_lock);
119 105
120 if (elsiocb == NULL) 106 if (elsiocb == NULL)
121 return NULL; 107 return NULL;
@@ -123,14 +109,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
123 109
124 /* fill in BDEs for command */ 110 /* fill in BDEs for command */
125 /* Allocate buffer for command payload */ 111 /* Allocate buffer for command payload */
126 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) || 112 if (((pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||
127 ((pcmd->virt = lpfc_mbuf_alloc(phba, 113 ((pcmd->virt = lpfc_mbuf_alloc(phba,
128 MEM_PRI, &(pcmd->phys))) == 0)) { 114 MEM_PRI, &(pcmd->phys))) == 0)) {
129 kfree(pcmd); 115 kfree(pcmd);
130 116
131 spin_lock_irq(phba->host->host_lock);
132 lpfc_sli_release_iocbq(phba, elsiocb); 117 lpfc_sli_release_iocbq(phba, elsiocb);
133 spin_unlock_irq(phba->host->host_lock);
134 return NULL; 118 return NULL;
135 } 119 }
136 120
@@ -138,7 +122,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
138 122
139 /* Allocate buffer for response payload */ 123 /* Allocate buffer for response payload */
140 if (expectRsp) { 124 if (expectRsp) {
141 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 125 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
142 if (prsp) 126 if (prsp)
143 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 127 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
144 &prsp->phys); 128 &prsp->phys);
@@ -146,9 +130,7 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
146 kfree(prsp); 130 kfree(prsp);
147 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 131 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
148 kfree(pcmd); 132 kfree(pcmd);
149 spin_lock_irq(phba->host->host_lock);
150 lpfc_sli_release_iocbq(phba, elsiocb); 133 lpfc_sli_release_iocbq(phba, elsiocb);
151 spin_unlock_irq(phba->host->host_lock);
152 return NULL; 134 return NULL;
153 } 135 }
154 INIT_LIST_HEAD(&prsp->list); 136 INIT_LIST_HEAD(&prsp->list);
@@ -157,14 +139,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
157 } 139 }
158 140
159 /* Allocate buffer for Buffer ptr list */ 141 /* Allocate buffer for Buffer ptr list */
160 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 142 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
161 if (pbuflist) 143 if (pbuflist)
162 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 144 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
163 &pbuflist->phys); 145 &pbuflist->phys);
164 if (pbuflist == 0 || pbuflist->virt == 0) { 146 if (pbuflist == 0 || pbuflist->virt == 0) {
165 spin_lock_irq(phba->host->host_lock);
166 lpfc_sli_release_iocbq(phba, elsiocb); 147 lpfc_sli_release_iocbq(phba, elsiocb);
167 spin_unlock_irq(phba->host->host_lock);
168 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 148 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
169 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 149 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
170 kfree(pcmd); 150 kfree(pcmd);
@@ -178,20 +158,28 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
178 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 158 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
179 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 159 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
180 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL; 160 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
161 icmd->un.elsreq64.remoteID = did; /* DID */
181 if (expectRsp) { 162 if (expectRsp) {
182 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); 163 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
183 icmd->un.elsreq64.remoteID = did; /* DID */
184 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 164 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
185 icmd->ulpTimeout = phba->fc_ratov * 2; 165 icmd->ulpTimeout = phba->fc_ratov * 2;
186 } else { 166 } else {
187 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64); 167 icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
188 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 168 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
189 } 169 }
190
191 icmd->ulpBdeCount = 1; 170 icmd->ulpBdeCount = 1;
192 icmd->ulpLe = 1; 171 icmd->ulpLe = 1;
193 icmd->ulpClass = CLASS3; 172 icmd->ulpClass = CLASS3;
194 173
174 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
175 icmd->un.elsreq64.myID = vport->fc_myDID;
176
177 /* For ELS_REQUEST64_CR, use the VPI by default */
178 icmd->ulpContext = vport->vpi;
179 icmd->ulpCt_h = 0;
180 icmd->ulpCt_l = 1;
181 }
182
195 bpl = (struct ulp_bde64 *) pbuflist->virt; 183 bpl = (struct ulp_bde64 *) pbuflist->virt;
196 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 184 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
197 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 185 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
@@ -209,10 +197,12 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
209 } 197 }
210 198
211 /* Save for completion so we can release these resources */ 199 /* Save for completion so we can release these resources */
212 elsiocb->context1 = lpfc_nlp_get(ndlp); 200 if (elscmd != ELS_CMD_LS_RJT)
201 elsiocb->context1 = lpfc_nlp_get(ndlp);
213 elsiocb->context2 = pcmd; 202 elsiocb->context2 = pcmd;
214 elsiocb->context3 = pbuflist; 203 elsiocb->context3 = pbuflist;
215 elsiocb->retry = retry; 204 elsiocb->retry = retry;
205 elsiocb->vport = vport;
216 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 206 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
217 207
218 if (prsp) { 208 if (prsp) {
@@ -222,16 +212,16 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
222 if (expectRsp) { 212 if (expectRsp) {
223 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 213 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
224 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 214 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
225 "%d:0116 Xmit ELS command x%x to remote " 215 "%d (%d):0116 Xmit ELS command x%x to remote "
226 "NPORT x%x I/O tag: x%x, HBA state: x%x\n", 216 "NPORT x%x I/O tag: x%x, port state: x%x\n",
227 phba->brd_no, elscmd, 217 phba->brd_no, vport->vpi, elscmd, did,
228 did, elsiocb->iotag, phba->hba_state); 218 elsiocb->iotag, vport->port_state);
229 } else { 219 } else {
230 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 220 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
231 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 221 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
232 "%d:0117 Xmit ELS response x%x to remote " 222 "%d (%d):0117 Xmit ELS response x%x to remote "
233 "NPORT x%x I/O tag: x%x, size: x%x\n", 223 "NPORT x%x I/O tag: x%x, size: x%x\n",
234 phba->brd_no, elscmd, 224 phba->brd_no, vport->vpi, elscmd,
235 ndlp->nlp_DID, elsiocb->iotag, cmdSize); 225 ndlp->nlp_DID, elsiocb->iotag, cmdSize);
236 } 226 }
237 227
@@ -240,16 +230,79 @@ lpfc_prep_els_iocb(struct lpfc_hba * phba, uint8_t expectRsp,
240 230
241 231
242static int 232static int
243lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 233lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
244 struct serv_parm *sp, IOCB_t *irsp)
245{ 234{
235 struct lpfc_hba *phba = vport->phba;
246 LPFC_MBOXQ_t *mbox; 236 LPFC_MBOXQ_t *mbox;
247 struct lpfc_dmabuf *mp; 237 struct lpfc_dmabuf *mp;
238 struct lpfc_nodelist *ndlp;
239 struct serv_parm *sp;
248 int rc; 240 int rc;
249 241
250 spin_lock_irq(phba->host->host_lock); 242 sp = &phba->fc_fabparam;
251 phba->fc_flag |= FC_FABRIC; 243 ndlp = lpfc_findnode_did(vport, Fabric_DID);
252 spin_unlock_irq(phba->host->host_lock); 244 if (!ndlp)
245 goto fail;
246
247 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
248 if (!mbox)
249 goto fail;
250
251 vport->port_state = LPFC_FABRIC_CFG_LINK;
252 lpfc_config_link(phba, mbox);
253 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
254 mbox->vport = vport;
255
256 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
257 if (rc == MBX_NOT_FINISHED)
258 goto fail_free_mbox;
259
260 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
261 if (!mbox)
262 goto fail;
263 rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
264 0);
265 if (rc)
266 goto fail_free_mbox;
267
268 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
269 mbox->vport = vport;
270 mbox->context2 = lpfc_nlp_get(ndlp);
271
272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);
273 if (rc == MBX_NOT_FINISHED)
274 goto fail_issue_reg_login;
275
276 return 0;
277
278fail_issue_reg_login:
279 lpfc_nlp_put(ndlp);
280 mp = (struct lpfc_dmabuf *) mbox->context1;
281 lpfc_mbuf_free(phba, mp->virt, mp->phys);
282 kfree(mp);
283fail_free_mbox:
284 mempool_free(mbox, phba->mbox_mem_pool);
285
286fail:
287 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
288 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
289 "%d (%d):0249 Cannot issue Register Fabric login\n",
290 phba->brd_no, vport->vpi);
291 return -ENXIO;
292}
293
294static int
295lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
296 struct serv_parm *sp, IOCB_t *irsp)
297{
298 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
299 struct lpfc_hba *phba = vport->phba;
300 struct lpfc_nodelist *np;
301 struct lpfc_nodelist *next_np;
302
303 spin_lock_irq(shost->host_lock);
304 vport->fc_flag |= FC_FABRIC;
305 spin_unlock_irq(shost->host_lock);
253 306
254 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 307 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
255 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 308 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */
@@ -258,20 +311,20 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
258 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 311 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
259 312
260 if (phba->fc_topology == TOPOLOGY_LOOP) { 313 if (phba->fc_topology == TOPOLOGY_LOOP) {
261 spin_lock_irq(phba->host->host_lock); 314 spin_lock_irq(shost->host_lock);
262 phba->fc_flag |= FC_PUBLIC_LOOP; 315 vport->fc_flag |= FC_PUBLIC_LOOP;
263 spin_unlock_irq(phba->host->host_lock); 316 spin_unlock_irq(shost->host_lock);
264 } else { 317 } else {
265 /* 318 /*
266 * If we are a N-port connected to a Fabric, fixup sparam's so 319 * If we are a N-port connected to a Fabric, fixup sparam's so
267 * logins to devices on remote loops work. 320 * logins to devices on remote loops work.
268 */ 321 */
269 phba->fc_sparam.cmn.altBbCredit = 1; 322 vport->fc_sparam.cmn.altBbCredit = 1;
270 } 323 }
271 324
272 phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 325 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
273 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 326 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
274 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 327 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
275 ndlp->nlp_class_sup = 0; 328 ndlp->nlp_class_sup = 0;
276 if (sp->cls1.classValid) 329 if (sp->cls1.classValid)
277 ndlp->nlp_class_sup |= FC_COS_CLASS1; 330 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -285,68 +338,85 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
285 sp->cmn.bbRcvSizeLsb; 338 sp->cmn.bbRcvSizeLsb;
286 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 339 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
287 340
288 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 341 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
289 if (!mbox) 342 if (sp->cmn.response_multiple_NPort) {
290 goto fail; 343 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
291 344 "%d:1816 FLOGI NPIV supported, "
292 phba->hba_state = LPFC_FABRIC_CFG_LINK; 345 "response data 0x%x\n",
293 lpfc_config_link(phba, mbox); 346 phba->brd_no,
294 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 347 sp->cmn.response_multiple_NPort);
348 phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
295 349
296 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 350 } else {
297 if (rc == MBX_NOT_FINISHED) 351 /* Because we asked f/w for NPIV it still expects us
298 goto fail_free_mbox; 352 to call reg_vnpid atleast for the physcial host */
299 353 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_VPORT,
300 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 354 "%d:1817 Fabric does not support NPIV "
301 if (!mbox) 355 "- configuring single port mode.\n",
302 goto fail; 356 phba->brd_no);
357 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
358 }
359 }
303 360
304 if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) 361 if ((vport->fc_prevDID != vport->fc_myDID) &&
305 goto fail_free_mbox; 362 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
306 363
307 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 364 /* If our NportID changed, we need to ensure all
308 mbox->context2 = lpfc_nlp_get(ndlp); 365 * remaining NPORTs get unreg_login'ed.
366 */
367 list_for_each_entry_safe(np, next_np,
368 &vport->fc_nodes, nlp_listp) {
369 if ((np->nlp_state != NLP_STE_NPR_NODE) ||
370 !(np->nlp_flag & NLP_NPR_ADISC))
371 continue;
372 spin_lock_irq(shost->host_lock);
373 np->nlp_flag &= ~NLP_NPR_ADISC;
374 spin_unlock_irq(shost->host_lock);
375 lpfc_unreg_rpi(vport, np);
376 }
377 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
378 lpfc_mbx_unreg_vpi(vport);
379 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
380 }
381 }
309 382
310 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB); 383 ndlp->nlp_sid = irsp->un.ulpWord[4] & Mask_DID;
311 if (rc == MBX_NOT_FINISHED) 384 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
312 goto fail_issue_reg_login;
313 385
386 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
387 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
388 lpfc_register_new_vport(phba, vport, ndlp);
389 return 0;
390 }
391 lpfc_issue_fabric_reglogin(vport);
314 return 0; 392 return 0;
315
316 fail_issue_reg_login:
317 lpfc_nlp_put(ndlp);
318 mp = (struct lpfc_dmabuf *) mbox->context1;
319 lpfc_mbuf_free(phba, mp->virt, mp->phys);
320 kfree(mp);
321 fail_free_mbox:
322 mempool_free(mbox, phba->mbox_mem_pool);
323 fail:
324 return -ENXIO;
325} 393}
326 394
327/* 395/*
328 * We FLOGIed into an NPort, initiate pt2pt protocol 396 * We FLOGIed into an NPort, initiate pt2pt protocol
329 */ 397 */
330static int 398static int
331lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 399lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
332 struct serv_parm *sp) 400 struct serv_parm *sp)
333{ 401{
402 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
403 struct lpfc_hba *phba = vport->phba;
334 LPFC_MBOXQ_t *mbox; 404 LPFC_MBOXQ_t *mbox;
335 int rc; 405 int rc;
336 406
337 spin_lock_irq(phba->host->host_lock); 407 spin_lock_irq(shost->host_lock);
338 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 408 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
339 spin_unlock_irq(phba->host->host_lock); 409 spin_unlock_irq(shost->host_lock);
340 410
341 phba->fc_edtov = FF_DEF_EDTOV; 411 phba->fc_edtov = FF_DEF_EDTOV;
342 phba->fc_ratov = FF_DEF_RATOV; 412 phba->fc_ratov = FF_DEF_RATOV;
343 rc = memcmp(&phba->fc_portname, &sp->portName, 413 rc = memcmp(&vport->fc_portname, &sp->portName,
344 sizeof(struct lpfc_name)); 414 sizeof(vport->fc_portname));
345 if (rc >= 0) { 415 if (rc >= 0) {
346 /* This side will initiate the PLOGI */ 416 /* This side will initiate the PLOGI */
347 spin_lock_irq(phba->host->host_lock); 417 spin_lock_irq(shost->host_lock);
348 phba->fc_flag |= FC_PT2PT_PLOGI; 418 vport->fc_flag |= FC_PT2PT_PLOGI;
349 spin_unlock_irq(phba->host->host_lock); 419 spin_unlock_irq(shost->host_lock);
350 420
351 /* 421 /*
352 * N_Port ID cannot be 0, set our to LocalID the other 422 * N_Port ID cannot be 0, set our to LocalID the other
@@ -355,7 +425,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
355 425
356 /* not equal */ 426 /* not equal */
357 if (rc) 427 if (rc)
358 phba->fc_myDID = PT2PT_LocalID; 428 vport->fc_myDID = PT2PT_LocalID;
359 429
360 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 430 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
361 if (!mbox) 431 if (!mbox)
@@ -364,15 +434,16 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
364 lpfc_config_link(phba, mbox); 434 lpfc_config_link(phba, mbox);
365 435
366 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 436 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
437 mbox->vport = vport;
367 rc = lpfc_sli_issue_mbox(phba, mbox, 438 rc = lpfc_sli_issue_mbox(phba, mbox,
368 MBX_NOWAIT | MBX_STOP_IOCB); 439 MBX_NOWAIT | MBX_STOP_IOCB);
369 if (rc == MBX_NOT_FINISHED) { 440 if (rc == MBX_NOT_FINISHED) {
370 mempool_free(mbox, phba->mbox_mem_pool); 441 mempool_free(mbox, phba->mbox_mem_pool);
371 goto fail; 442 goto fail;
372 } 443 }
373 lpfc_nlp_put(ndlp); 444 lpfc_nlp_put(ndlp);
374 445
375 ndlp = lpfc_findnode_did(phba, PT2PT_RemoteID); 446 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
376 if (!ndlp) { 447 if (!ndlp) {
377 /* 448 /*
378 * Cannot find existing Fabric ndlp, so allocate a 449 * Cannot find existing Fabric ndlp, so allocate a
@@ -382,28 +453,30 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
382 if (!ndlp) 453 if (!ndlp)
383 goto fail; 454 goto fail;
384 455
385 lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID); 456 lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
386 } 457 }
387 458
388 memcpy(&ndlp->nlp_portname, &sp->portName, 459 memcpy(&ndlp->nlp_portname, &sp->portName,
389 sizeof(struct lpfc_name)); 460 sizeof(struct lpfc_name));
390 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 461 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
391 sizeof(struct lpfc_name)); 462 sizeof(struct lpfc_name));
392 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 463 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
464 spin_lock_irq(shost->host_lock);
393 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 465 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
466 spin_unlock_irq(shost->host_lock);
394 } else { 467 } else {
395 /* This side will wait for the PLOGI */ 468 /* This side will wait for the PLOGI */
396 lpfc_nlp_put(ndlp); 469 lpfc_nlp_put(ndlp);
397 } 470 }
398 471
399 spin_lock_irq(phba->host->host_lock); 472 spin_lock_irq(shost->host_lock);
400 phba->fc_flag |= FC_PT2PT; 473 vport->fc_flag |= FC_PT2PT;
401 spin_unlock_irq(phba->host->host_lock); 474 spin_unlock_irq(shost->host_lock);
402 475
403 /* Start discovery - this should just do CLEAR_LA */ 476 /* Start discovery - this should just do CLEAR_LA */
404 lpfc_disc_start(phba); 477 lpfc_disc_start(vport);
405 return 0; 478 return 0;
406 fail: 479fail:
407 return -ENXIO; 480 return -ENXIO;
408} 481}
409 482
@@ -411,6 +484,8 @@ static void
411lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 484lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
412 struct lpfc_iocbq *rspiocb) 485 struct lpfc_iocbq *rspiocb)
413{ 486{
487 struct lpfc_vport *vport = cmdiocb->vport;
488 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
414 IOCB_t *irsp = &rspiocb->iocb; 489 IOCB_t *irsp = &rspiocb->iocb;
415 struct lpfc_nodelist *ndlp = cmdiocb->context1; 490 struct lpfc_nodelist *ndlp = cmdiocb->context1;
416 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 491 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
@@ -418,21 +493,25 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
418 int rc; 493 int rc;
419 494
420 /* Check to see if link went down during discovery */ 495 /* Check to see if link went down during discovery */
421 if (lpfc_els_chk_latt(phba)) { 496 if (lpfc_els_chk_latt(vport)) {
422 lpfc_nlp_put(ndlp); 497 lpfc_nlp_put(ndlp);
423 goto out; 498 goto out;
424 } 499 }
425 500
501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
502 "FLOGI cmpl: status:x%x/x%x state:x%x",
503 irsp->ulpStatus, irsp->un.ulpWord[4],
504 vport->port_state);
505
426 if (irsp->ulpStatus) { 506 if (irsp->ulpStatus) {
427 /* Check for retry */ 507 /* Check for retry */
428 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 508 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
429 /* ELS command is being retried */
430 goto out; 509 goto out;
431 } 510
432 /* FLOGI failed, so there is no fabric */ 511 /* FLOGI failed, so there is no fabric */
433 spin_lock_irq(phba->host->host_lock); 512 spin_lock_irq(shost->host_lock);
434 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 513 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
435 spin_unlock_irq(phba->host->host_lock); 514 spin_unlock_irq(shost->host_lock);
436 515
437 /* If private loop, then allow max outstanding els to be 516 /* If private loop, then allow max outstanding els to be
438 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 517 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
@@ -443,11 +522,10 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
443 } 522 }
444 523
445 /* FLOGI failure */ 524 /* FLOGI failure */
446 lpfc_printf_log(phba, 525 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
447 KERN_INFO, 526 "%d (%d):0100 FLOGI failure Data: x%x x%x "
448 LOG_ELS, 527 "x%x\n",
449 "%d:0100 FLOGI failure Data: x%x x%x x%x\n", 528 phba->brd_no, vport->vpi,
450 phba->brd_no,
451 irsp->ulpStatus, irsp->un.ulpWord[4], 529 irsp->ulpStatus, irsp->un.ulpWord[4],
452 irsp->ulpTimeout); 530 irsp->ulpTimeout);
453 goto flogifail; 531 goto flogifail;
@@ -463,21 +541,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
463 541
464 /* FLOGI completes successfully */ 542 /* FLOGI completes successfully */
465 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 543 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
466 "%d:0101 FLOGI completes sucessfully " 544 "%d (%d):0101 FLOGI completes sucessfully "
467 "Data: x%x x%x x%x x%x\n", 545 "Data: x%x x%x x%x x%x\n",
468 phba->brd_no, 546 phba->brd_no, vport->vpi,
469 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 547 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
470 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); 548 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
471 549
472 if (phba->hba_state == LPFC_FLOGI) { 550 if (vport->port_state == LPFC_FLOGI) {
473 /* 551 /*
474 * If Common Service Parameters indicate Nport 552 * If Common Service Parameters indicate Nport
475 * we are point to point, if Fport we are Fabric. 553 * we are point to point, if Fport we are Fabric.
476 */ 554 */
477 if (sp->cmn.fPort) 555 if (sp->cmn.fPort)
478 rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp); 556 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
479 else 557 else
480 rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp); 558 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
481 559
482 if (!rc) 560 if (!rc)
483 goto out; 561 goto out;
@@ -486,14 +564,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
486flogifail: 564flogifail:
487 lpfc_nlp_put(ndlp); 565 lpfc_nlp_put(ndlp);
488 566
489 if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT || 567 if (!lpfc_error_lost_link(irsp)) {
490 (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&
491 irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {
492 /* FLOGI failed, so just use loop map to make discovery list */ 568 /* FLOGI failed, so just use loop map to make discovery list */
493 lpfc_disc_list_loopmap(phba); 569 lpfc_disc_list_loopmap(vport);
494 570
495 /* Start discovery */ 571 /* Start discovery */
496 lpfc_disc_start(phba); 572 lpfc_disc_start(vport);
497 } 573 }
498 574
499out: 575out:
@@ -501,9 +577,10 @@ out:
501} 577}
502 578
503static int 579static int
504lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 580lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
505 uint8_t retry) 581 uint8_t retry)
506{ 582{
583 struct lpfc_hba *phba = vport->phba;
507 struct serv_parm *sp; 584 struct serv_parm *sp;
508 IOCB_t *icmd; 585 IOCB_t *icmd;
509 struct lpfc_iocbq *elsiocb; 586 struct lpfc_iocbq *elsiocb;
@@ -515,9 +592,10 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
515 592
516 pring = &phba->sli.ring[LPFC_ELS_RING]; 593 pring = &phba->sli.ring[LPFC_ELS_RING];
517 594
518 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 595 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
519 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 596 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
520 ndlp->nlp_DID, ELS_CMD_FLOGI); 597 ndlp->nlp_DID, ELS_CMD_FLOGI);
598
521 if (!elsiocb) 599 if (!elsiocb)
522 return 1; 600 return 1;
523 601
@@ -526,8 +604,8 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
526 604
527 /* For FLOGI request, remainder of payload is service parameters */ 605 /* For FLOGI request, remainder of payload is service parameters */
528 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 606 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
529 pcmd += sizeof (uint32_t); 607 pcmd += sizeof(uint32_t);
530 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 608 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
531 sp = (struct serv_parm *) pcmd; 609 sp = (struct serv_parm *) pcmd;
532 610
533 /* Setup CSPs accordingly for Fabric */ 611 /* Setup CSPs accordingly for Fabric */
@@ -541,16 +619,32 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
541 if (sp->cmn.fcphHigh < FC_PH3) 619 if (sp->cmn.fcphHigh < FC_PH3)
542 sp->cmn.fcphHigh = FC_PH3; 620 sp->cmn.fcphHigh = FC_PH3;
543 621
622 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
623 sp->cmn.request_multiple_Nport = 1;
624
625 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
626 icmd->ulpCt_h = 1;
627 icmd->ulpCt_l = 0;
628 }
629
630 if (phba->fc_topology != TOPOLOGY_LOOP) {
631 icmd->un.elsreq64.myID = 0;
632 icmd->un.elsreq64.fl = 1;
633 }
634
544 tmo = phba->fc_ratov; 635 tmo = phba->fc_ratov;
545 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 636 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
546 lpfc_set_disctmo(phba); 637 lpfc_set_disctmo(vport);
547 phba->fc_ratov = tmo; 638 phba->fc_ratov = tmo;
548 639
549 phba->fc_stat.elsXmitFLOGI++; 640 phba->fc_stat.elsXmitFLOGI++;
550 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 641 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
551 spin_lock_irq(phba->host->host_lock); 642
552 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 643 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
553 spin_unlock_irq(phba->host->host_lock); 644 "Issue FLOGI: opt:x%x",
645 phba->sli3_options, 0, 0);
646
647 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
554 if (rc == IOCB_ERROR) { 648 if (rc == IOCB_ERROR) {
555 lpfc_els_free_iocb(phba, elsiocb); 649 lpfc_els_free_iocb(phba, elsiocb);
556 return 1; 650 return 1;
@@ -559,7 +653,7 @@ lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
559} 653}
560 654
561int 655int
562lpfc_els_abort_flogi(struct lpfc_hba * phba) 656lpfc_els_abort_flogi(struct lpfc_hba *phba)
563{ 657{
564 struct lpfc_sli_ring *pring; 658 struct lpfc_sli_ring *pring;
565 struct lpfc_iocbq *iocb, *next_iocb; 659 struct lpfc_iocbq *iocb, *next_iocb;
@@ -577,73 +671,99 @@ lpfc_els_abort_flogi(struct lpfc_hba * phba)
577 * Check the txcmplq for an iocb that matches the nport the driver is 671 * Check the txcmplq for an iocb that matches the nport the driver is
578 * searching for. 672 * searching for.
579 */ 673 */
580 spin_lock_irq(phba->host->host_lock); 674 spin_lock_irq(&phba->hbalock);
581 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 675 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
582 icmd = &iocb->iocb; 676 icmd = &iocb->iocb;
583 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 677 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
678 icmd->un.elsreq64.bdl.ulpIoTag32) {
584 ndlp = (struct lpfc_nodelist *)(iocb->context1); 679 ndlp = (struct lpfc_nodelist *)(iocb->context1);
585 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) 680 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) {
586 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 681 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
682 }
587 } 683 }
588 } 684 }
589 spin_unlock_irq(phba->host->host_lock); 685 spin_unlock_irq(&phba->hbalock);
590 686
591 return 0; 687 return 0;
592} 688}
593 689
594int 690int
595lpfc_initial_flogi(struct lpfc_hba *phba) 691lpfc_initial_flogi(struct lpfc_vport *vport)
596{ 692{
693 struct lpfc_hba *phba = vport->phba;
597 struct lpfc_nodelist *ndlp; 694 struct lpfc_nodelist *ndlp;
598 695
599 /* First look for the Fabric ndlp */ 696 /* First look for the Fabric ndlp */
600 ndlp = lpfc_findnode_did(phba, Fabric_DID); 697 ndlp = lpfc_findnode_did(vport, Fabric_DID);
601 if (!ndlp) { 698 if (!ndlp) {
602 /* Cannot find existing Fabric ndlp, so allocate a new one */ 699 /* Cannot find existing Fabric ndlp, so allocate a new one */
603 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 700 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
604 if (!ndlp) 701 if (!ndlp)
605 return 0; 702 return 0;
606 lpfc_nlp_init(phba, ndlp, Fabric_DID); 703 lpfc_nlp_init(vport, ndlp, Fabric_DID);
607 } else { 704 } else {
608 lpfc_dequeue_node(phba, ndlp); 705 lpfc_dequeue_node(vport, ndlp);
609 } 706 }
610 if (lpfc_issue_els_flogi(phba, ndlp, 0)) { 707 if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
611 lpfc_nlp_put(ndlp); 708 lpfc_nlp_put(ndlp);
612 } 709 }
613 return 1; 710 return 1;
614} 711}
615 712
713int
714lpfc_initial_fdisc(struct lpfc_vport *vport)
715{
716 struct lpfc_hba *phba = vport->phba;
717 struct lpfc_nodelist *ndlp;
718
719 /* First look for the Fabric ndlp */
720 ndlp = lpfc_findnode_did(vport, Fabric_DID);
721 if (!ndlp) {
722 /* Cannot find existing Fabric ndlp, so allocate a new one */
723 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
724 if (!ndlp)
725 return 0;
726 lpfc_nlp_init(vport, ndlp, Fabric_DID);
727 } else {
728 lpfc_dequeue_node(vport, ndlp);
729 }
730 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
731 lpfc_nlp_put(ndlp);
732 }
733 return 1;
734}
616static void 735static void
617lpfc_more_plogi(struct lpfc_hba * phba) 736lpfc_more_plogi(struct lpfc_vport *vport)
618{ 737{
619 int sentplogi; 738 int sentplogi;
739 struct lpfc_hba *phba = vport->phba;
620 740
621 if (phba->num_disc_nodes) 741 if (vport->num_disc_nodes)
622 phba->num_disc_nodes--; 742 vport->num_disc_nodes--;
623 743
624 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 744 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
625 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 745 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
626 "%d:0232 Continue discovery with %d PLOGIs to go " 746 "%d (%d):0232 Continue discovery with %d PLOGIs to go "
627 "Data: x%x x%x x%x\n", 747 "Data: x%x x%x x%x\n",
628 phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt, 748 phba->brd_no, vport->vpi, vport->num_disc_nodes,
629 phba->fc_flag, phba->hba_state); 749 vport->fc_plogi_cnt, vport->fc_flag, vport->port_state);
630 750
631 /* Check to see if there are more PLOGIs to be sent */ 751 /* Check to see if there are more PLOGIs to be sent */
632 if (phba->fc_flag & FC_NLP_MORE) { 752 if (vport->fc_flag & FC_NLP_MORE)
633 /* go thru NPR list and issue any remaining ELS PLOGIs */ 753 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
634 sentplogi = lpfc_els_disc_plogi(phba); 754 sentplogi = lpfc_els_disc_plogi(vport);
635 } 755
636 return; 756 return;
637} 757}
638 758
639static struct lpfc_nodelist * 759static struct lpfc_nodelist *
640lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp, 760lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
641 struct lpfc_nodelist *ndlp) 761 struct lpfc_nodelist *ndlp)
642{ 762{
763 struct lpfc_vport *vport = ndlp->vport;
643 struct lpfc_nodelist *new_ndlp; 764 struct lpfc_nodelist *new_ndlp;
644 uint32_t *lp;
645 struct serv_parm *sp; 765 struct serv_parm *sp;
646 uint8_t name[sizeof (struct lpfc_name)]; 766 uint8_t name[sizeof(struct lpfc_name)];
647 uint32_t rc; 767 uint32_t rc;
648 768
649 /* Fabric nodes can have the same WWPN so we don't bother searching 769 /* Fabric nodes can have the same WWPN so we don't bother searching
@@ -652,50 +772,51 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, struct lpfc_dmabuf *prsp,
652 if (ndlp->nlp_type & NLP_FABRIC) 772 if (ndlp->nlp_type & NLP_FABRIC)
653 return ndlp; 773 return ndlp;
654 774
655 lp = (uint32_t *) prsp->virt; 775 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
656 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
657 memset(name, 0, sizeof(struct lpfc_name)); 776 memset(name, 0, sizeof(struct lpfc_name));
658 777
659 /* Now we find out if the NPort we are logging into, matches the WWPN 778 /* Now we find out if the NPort we are logging into, matches the WWPN
660 * we have for that ndlp. If not, we have some work to do. 779 * we have for that ndlp. If not, we have some work to do.
661 */ 780 */
662 new_ndlp = lpfc_findnode_wwpn(phba, &sp->portName); 781 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
663 782
664 if (new_ndlp == ndlp) 783 if (new_ndlp == ndlp)
665 return ndlp; 784 return ndlp;
666 785
667 if (!new_ndlp) { 786 if (!new_ndlp) {
668 rc = 787 rc = memcmp(&ndlp->nlp_portname, name,
669 memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)); 788 sizeof(struct lpfc_name));
670 if (!rc) 789 if (!rc)
671 return ndlp; 790 return ndlp;
672 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); 791 new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
673 if (!new_ndlp) 792 if (!new_ndlp)
674 return ndlp; 793 return ndlp;
675 794
676 lpfc_nlp_init(phba, new_ndlp, ndlp->nlp_DID); 795 lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
677 } 796 }
678 797
679 lpfc_unreg_rpi(phba, new_ndlp); 798 lpfc_unreg_rpi(vport, new_ndlp);
680 new_ndlp->nlp_DID = ndlp->nlp_DID; 799 new_ndlp->nlp_DID = ndlp->nlp_DID;
681 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 800 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
682 lpfc_nlp_set_state(phba, new_ndlp, ndlp->nlp_state); 801 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
683 802
684 /* Move this back to NPR list */ 803 /* Move this back to NPR state */
685 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) 804 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0)
686 lpfc_drop_node(phba, ndlp); 805 lpfc_drop_node(vport, ndlp);
687 else { 806 else {
688 lpfc_unreg_rpi(phba, ndlp); 807 lpfc_unreg_rpi(vport, ndlp);
689 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */ 808 ndlp->nlp_DID = 0; /* Two ndlps cannot have the same did */
690 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 809 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
691 } 810 }
692 return new_ndlp; 811 return new_ndlp;
693} 812}
694 813
695static void 814static void
696lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 815lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
697 struct lpfc_iocbq * rspiocb) 816 struct lpfc_iocbq *rspiocb)
698{ 817{
818 struct lpfc_vport *vport = cmdiocb->vport;
819 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
699 IOCB_t *irsp; 820 IOCB_t *irsp;
700 struct lpfc_nodelist *ndlp; 821 struct lpfc_nodelist *ndlp;
701 struct lpfc_dmabuf *prsp; 822 struct lpfc_dmabuf *prsp;
@@ -705,32 +826,43 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
705 cmdiocb->context_un.rsp_iocb = rspiocb; 826 cmdiocb->context_un.rsp_iocb = rspiocb;
706 827
707 irsp = &rspiocb->iocb; 828 irsp = &rspiocb->iocb;
708 ndlp = lpfc_findnode_did(phba, irsp->un.elsreq64.remoteID); 829 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
709 if (!ndlp) 830 "PLOGI cmpl: status:x%x/x%x did:x%x",
831 irsp->ulpStatus, irsp->un.ulpWord[4],
832 irsp->un.elsreq64.remoteID);
833
834 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
835 if (!ndlp) {
836 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
837 "%d (%d):0136 PLOGI completes to NPort x%x "
838 "with no ndlp. Data: x%x x%x x%x\n",
839 phba->brd_no, vport->vpi, irsp->un.elsreq64.remoteID,
840 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpIoTag);
710 goto out; 841 goto out;
842 }
711 843
712 /* Since ndlp can be freed in the disc state machine, note if this node 844 /* Since ndlp can be freed in the disc state machine, note if this node
713 * is being used during discovery. 845 * is being used during discovery.
714 */ 846 */
847 spin_lock_irq(shost->host_lock);
715 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 848 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
716 spin_lock_irq(phba->host->host_lock);
717 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 849 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
718 spin_unlock_irq(phba->host->host_lock); 850 spin_unlock_irq(shost->host_lock);
719 rc = 0; 851 rc = 0;
720 852
721 /* PLOGI completes to NPort <nlp_DID> */ 853 /* PLOGI completes to NPort <nlp_DID> */
722 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 854 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
723 "%d:0102 PLOGI completes to NPort x%x " 855 "%d (%d):0102 PLOGI completes to NPort x%x "
724 "Data: x%x x%x x%x x%x x%x\n", 856 "Data: x%x x%x x%x x%x x%x\n",
725 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 857 phba->brd_no, vport->vpi, ndlp->nlp_DID,
726 irsp->un.ulpWord[4], irsp->ulpTimeout, disc, 858 irsp->ulpStatus, irsp->un.ulpWord[4],
727 phba->num_disc_nodes); 859 irsp->ulpTimeout, disc, vport->num_disc_nodes);
728 860
729 /* Check to see if link went down during discovery */ 861 /* Check to see if link went down during discovery */
730 if (lpfc_els_chk_latt(phba)) { 862 if (lpfc_els_chk_latt(vport)) {
731 spin_lock_irq(phba->host->host_lock); 863 spin_lock_irq(shost->host_lock);
732 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 864 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
733 spin_unlock_irq(phba->host->host_lock); 865 spin_unlock_irq(shost->host_lock);
734 goto out; 866 goto out;
735 } 867 }
736 868
@@ -743,56 +875,62 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
743 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 875 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
744 /* ELS command is being retried */ 876 /* ELS command is being retried */
745 if (disc) { 877 if (disc) {
746 spin_lock_irq(phba->host->host_lock); 878 spin_lock_irq(shost->host_lock);
747 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 879 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
748 spin_unlock_irq(phba->host->host_lock); 880 spin_unlock_irq(shost->host_lock);
749 } 881 }
750 goto out; 882 goto out;
751 } 883 }
752 884
753 /* PLOGI failed */ 885 /* PLOGI failed */
886 if (ndlp->nlp_DID == NameServer_DID) {
887 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
888 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
889 "%d (%d):0250 Nameserver login error: "
890 "0x%x / 0x%x\n",
891 phba->brd_no, vport->vpi,
892 irsp->ulpStatus, irsp->un.ulpWord[4]);
893 }
894
754 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 895 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
755 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 896 if (lpfc_error_lost_link(irsp)) {
756 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
757 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
758 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
759 rc = NLP_STE_FREED_NODE; 897 rc = NLP_STE_FREED_NODE;
760 } else { 898 } else {
761 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 899 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
762 NLP_EVT_CMPL_PLOGI); 900 NLP_EVT_CMPL_PLOGI);
763 } 901 }
764 } else { 902 } else {
765 /* Good status, call state machine */ 903 /* Good status, call state machine */
766 prsp = list_entry(((struct lpfc_dmabuf *) 904 prsp = list_entry(((struct lpfc_dmabuf *)
767 cmdiocb->context2)->list.next, 905 cmdiocb->context2)->list.next,
768 struct lpfc_dmabuf, list); 906 struct lpfc_dmabuf, list);
769 ndlp = lpfc_plogi_confirm_nport(phba, prsp, ndlp); 907 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
770 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb, 908 rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
771 NLP_EVT_CMPL_PLOGI); 909 NLP_EVT_CMPL_PLOGI);
772 } 910 }
773 911
774 if (disc && phba->num_disc_nodes) { 912 if (disc && vport->num_disc_nodes) {
775 /* Check to see if there are more PLOGIs to be sent */ 913 /* Check to see if there are more PLOGIs to be sent */
776 lpfc_more_plogi(phba); 914 lpfc_more_plogi(vport);
777 915
778 if (phba->num_disc_nodes == 0) { 916 if (vport->num_disc_nodes == 0) {
779 spin_lock_irq(phba->host->host_lock); 917 spin_lock_irq(shost->host_lock);
780 phba->fc_flag &= ~FC_NDISC_ACTIVE; 918 vport->fc_flag &= ~FC_NDISC_ACTIVE;
781 spin_unlock_irq(phba->host->host_lock); 919 spin_unlock_irq(shost->host_lock);
782 920
783 lpfc_can_disctmo(phba); 921 lpfc_can_disctmo(vport);
784 if (phba->fc_flag & FC_RSCN_MODE) { 922 if (vport->fc_flag & FC_RSCN_MODE) {
785 /* 923 /*
786 * Check to see if more RSCNs came in while 924 * Check to see if more RSCNs came in while
787 * we were processing this one. 925 * we were processing this one.
788 */ 926 */
789 if ((phba->fc_rscn_id_cnt == 0) && 927 if ((vport->fc_rscn_id_cnt == 0) &&
790 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 928 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
791 spin_lock_irq(phba->host->host_lock); 929 spin_lock_irq(shost->host_lock);
792 phba->fc_flag &= ~FC_RSCN_MODE; 930 vport->fc_flag &= ~FC_RSCN_MODE;
793 spin_unlock_irq(phba->host->host_lock); 931 spin_unlock_irq(shost->host_lock);
794 } else { 932 } else {
795 lpfc_els_handle_rscn(phba); 933 lpfc_els_handle_rscn(vport);
796 } 934 }
797 } 935 }
798 } 936 }
@@ -804,8 +942,9 @@ out:
804} 942}
805 943
806int 944int
807lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry) 945lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
808{ 946{
947 struct lpfc_hba *phba = vport->phba;
809 struct serv_parm *sp; 948 struct serv_parm *sp;
810 IOCB_t *icmd; 949 IOCB_t *icmd;
811 struct lpfc_iocbq *elsiocb; 950 struct lpfc_iocbq *elsiocb;
@@ -813,13 +952,14 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
813 struct lpfc_sli *psli; 952 struct lpfc_sli *psli;
814 uint8_t *pcmd; 953 uint8_t *pcmd;
815 uint16_t cmdsize; 954 uint16_t cmdsize;
955 int ret;
816 956
817 psli = &phba->sli; 957 psli = &phba->sli;
818 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 958 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
819 959
820 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm)); 960 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
821 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, NULL, did, 961 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, NULL, did,
822 ELS_CMD_PLOGI); 962 ELS_CMD_PLOGI);
823 if (!elsiocb) 963 if (!elsiocb)
824 return 1; 964 return 1;
825 965
@@ -828,8 +968,8 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
828 968
829 /* For PLOGI request, remainder of payload is service parameters */ 969 /* For PLOGI request, remainder of payload is service parameters */
830 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 970 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
831 pcmd += sizeof (uint32_t); 971 pcmd += sizeof(uint32_t);
832 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 972 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
833 sp = (struct serv_parm *) pcmd; 973 sp = (struct serv_parm *) pcmd;
834 974
835 if (sp->cmn.fcphLow < FC_PH_4_3) 975 if (sp->cmn.fcphLow < FC_PH_4_3)
@@ -838,22 +978,27 @@ lpfc_issue_els_plogi(struct lpfc_hba * phba, uint32_t did, uint8_t retry)
838 if (sp->cmn.fcphHigh < FC_PH3) 978 if (sp->cmn.fcphHigh < FC_PH3)
839 sp->cmn.fcphHigh = FC_PH3; 979 sp->cmn.fcphHigh = FC_PH3;
840 980
981 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
982 "Issue PLOGI: did:x%x",
983 did, 0, 0);
984
841 phba->fc_stat.elsXmitPLOGI++; 985 phba->fc_stat.elsXmitPLOGI++;
842 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 986 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
843 spin_lock_irq(phba->host->host_lock); 987 ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
844 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 988
845 spin_unlock_irq(phba->host->host_lock); 989 if (ret == IOCB_ERROR) {
846 lpfc_els_free_iocb(phba, elsiocb); 990 lpfc_els_free_iocb(phba, elsiocb);
847 return 1; 991 return 1;
848 } 992 }
849 spin_unlock_irq(phba->host->host_lock);
850 return 0; 993 return 0;
851} 994}
852 995
853static void 996static void
854lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 997lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
855 struct lpfc_iocbq * rspiocb) 998 struct lpfc_iocbq *rspiocb)
856{ 999{
1000 struct lpfc_vport *vport = cmdiocb->vport;
1001 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
857 IOCB_t *irsp; 1002 IOCB_t *irsp;
858 struct lpfc_sli *psli; 1003 struct lpfc_sli *psli;
859 struct lpfc_nodelist *ndlp; 1004 struct lpfc_nodelist *ndlp;
@@ -864,21 +1009,26 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
864 1009
865 irsp = &(rspiocb->iocb); 1010 irsp = &(rspiocb->iocb);
866 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1011 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
867 spin_lock_irq(phba->host->host_lock); 1012 spin_lock_irq(shost->host_lock);
868 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1013 ndlp->nlp_flag &= ~NLP_PRLI_SND;
869 spin_unlock_irq(phba->host->host_lock); 1014 spin_unlock_irq(shost->host_lock);
1015
1016 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1017 "PRLI cmpl: status:x%x/x%x did:x%x",
1018 irsp->ulpStatus, irsp->un.ulpWord[4],
1019 ndlp->nlp_DID);
870 1020
871 /* PRLI completes to NPort <nlp_DID> */ 1021 /* PRLI completes to NPort <nlp_DID> */
872 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1022 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
873 "%d:0103 PRLI completes to NPort x%x " 1023 "%d (%d):0103 PRLI completes to NPort x%x "
874 "Data: x%x x%x x%x x%x\n", 1024 "Data: x%x x%x x%x x%x\n",
875 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1025 phba->brd_no, vport->vpi, ndlp->nlp_DID,
876 irsp->un.ulpWord[4], irsp->ulpTimeout, 1026 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
877 phba->num_disc_nodes); 1027 vport->num_disc_nodes);
878 1028
879 phba->fc_prli_sent--; 1029 vport->fc_prli_sent--;
880 /* Check to see if link went down during discovery */ 1030 /* Check to see if link went down during discovery */
881 if (lpfc_els_chk_latt(phba)) 1031 if (lpfc_els_chk_latt(vport))
882 goto out; 1032 goto out;
883 1033
884 if (irsp->ulpStatus) { 1034 if (irsp->ulpStatus) {
@@ -889,18 +1039,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
889 } 1039 }
890 /* PRLI failed */ 1040 /* PRLI failed */
891 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1041 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
892 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1042 if (lpfc_error_lost_link(irsp)) {
893 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
894 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
895 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
896 goto out; 1043 goto out;
897 } else { 1044 } else {
898 lpfc_disc_state_machine(phba, ndlp, cmdiocb, 1045 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
899 NLP_EVT_CMPL_PRLI); 1046 NLP_EVT_CMPL_PRLI);
900 } 1047 }
901 } else { 1048 } else {
902 /* Good status, call state machine */ 1049 /* Good status, call state machine */
903 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI); 1050 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1051 NLP_EVT_CMPL_PRLI);
904 } 1052 }
905 1053
906out: 1054out:
@@ -909,9 +1057,11 @@ out:
909} 1057}
910 1058
911int 1059int
912lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 1060lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
913 uint8_t retry) 1061 uint8_t retry)
914{ 1062{
1063 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1064 struct lpfc_hba *phba = vport->phba;
915 PRLI *npr; 1065 PRLI *npr;
916 IOCB_t *icmd; 1066 IOCB_t *icmd;
917 struct lpfc_iocbq *elsiocb; 1067 struct lpfc_iocbq *elsiocb;
@@ -923,9 +1073,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
923 psli = &phba->sli; 1073 psli = &phba->sli;
924 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1074 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
925 1075
926 cmdsize = (sizeof (uint32_t) + sizeof (PRLI)); 1076 cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
927 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1077 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
928 ndlp->nlp_DID, ELS_CMD_PRLI); 1078 ndlp->nlp_DID, ELS_CMD_PRLI);
929 if (!elsiocb) 1079 if (!elsiocb)
930 return 1; 1080 return 1;
931 1081
@@ -933,9 +1083,9 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
933 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1083 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
934 1084
935 /* For PRLI request, remainder of payload is service parameters */ 1085 /* For PRLI request, remainder of payload is service parameters */
936 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t))); 1086 memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
937 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI; 1087 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
938 pcmd += sizeof (uint32_t); 1088 pcmd += sizeof(uint32_t);
939 1089
940 /* For PRLI, remainder of payload is PRLI parameter page */ 1090 /* For PRLI, remainder of payload is PRLI parameter page */
941 npr = (PRLI *) pcmd; 1091 npr = (PRLI *) pcmd;
@@ -955,81 +1105,88 @@ lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
955 npr->prliType = PRLI_FCP_TYPE; 1105 npr->prliType = PRLI_FCP_TYPE;
956 npr->initiatorFunc = 1; 1106 npr->initiatorFunc = 1;
957 1107
1108 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1109 "Issue PRLI: did:x%x",
1110 ndlp->nlp_DID, 0, 0);
1111
958 phba->fc_stat.elsXmitPRLI++; 1112 phba->fc_stat.elsXmitPRLI++;
959 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 1113 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
960 spin_lock_irq(phba->host->host_lock); 1114 spin_lock_irq(shost->host_lock);
961 ndlp->nlp_flag |= NLP_PRLI_SND; 1115 ndlp->nlp_flag |= NLP_PRLI_SND;
1116 spin_unlock_irq(shost->host_lock);
962 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1117 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1118 spin_lock_irq(shost->host_lock);
963 ndlp->nlp_flag &= ~NLP_PRLI_SND; 1119 ndlp->nlp_flag &= ~NLP_PRLI_SND;
964 spin_unlock_irq(phba->host->host_lock); 1120 spin_unlock_irq(shost->host_lock);
965 lpfc_els_free_iocb(phba, elsiocb); 1121 lpfc_els_free_iocb(phba, elsiocb);
966 return 1; 1122 return 1;
967 } 1123 }
968 spin_unlock_irq(phba->host->host_lock); 1124 vport->fc_prli_sent++;
969 phba->fc_prli_sent++;
970 return 0; 1125 return 0;
971} 1126}
972 1127
973static void 1128static void
974lpfc_more_adisc(struct lpfc_hba * phba) 1129lpfc_more_adisc(struct lpfc_vport *vport)
975{ 1130{
976 int sentadisc; 1131 int sentadisc;
1132 struct lpfc_hba *phba = vport->phba;
977 1133
978 if (phba->num_disc_nodes) 1134 if (vport->num_disc_nodes)
979 phba->num_disc_nodes--; 1135 vport->num_disc_nodes--;
980 1136
981 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 1137 /* Continue discovery with <num_disc_nodes> ADISCs to go */
982 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1138 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
983 "%d:0210 Continue discovery with %d ADISCs to go " 1139 "%d (%d):0210 Continue discovery with %d ADISCs to go "
984 "Data: x%x x%x x%x\n", 1140 "Data: x%x x%x x%x\n",
985 phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt, 1141 phba->brd_no, vport->vpi, vport->num_disc_nodes,
986 phba->fc_flag, phba->hba_state); 1142 vport->fc_adisc_cnt, vport->fc_flag, vport->port_state);
987 1143
988 /* Check to see if there are more ADISCs to be sent */ 1144 /* Check to see if there are more ADISCs to be sent */
989 if (phba->fc_flag & FC_NLP_MORE) { 1145 if (vport->fc_flag & FC_NLP_MORE) {
990 lpfc_set_disctmo(phba); 1146 lpfc_set_disctmo(vport);
991 1147 /* go thru NPR nodes and issue any remaining ELS ADISCs */
992 /* go thru NPR list and issue any remaining ELS ADISCs */ 1148 sentadisc = lpfc_els_disc_adisc(vport);
993 sentadisc = lpfc_els_disc_adisc(phba);
994 } 1149 }
995 return; 1150 return;
996} 1151}
997 1152
998static void 1153static void
999lpfc_rscn_disc(struct lpfc_hba * phba) 1154lpfc_rscn_disc(struct lpfc_vport *vport)
1000{ 1155{
1156 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1157
1158 lpfc_can_disctmo(vport);
1159
1001 /* RSCN discovery */ 1160 /* RSCN discovery */
1002 /* go thru NPR list and issue ELS PLOGIs */ 1161 /* go thru NPR nodes and issue ELS PLOGIs */
1003 if (phba->fc_npr_cnt) { 1162 if (vport->fc_npr_cnt)
1004 if (lpfc_els_disc_plogi(phba)) 1163 if (lpfc_els_disc_plogi(vport))
1005 return; 1164 return;
1006 } 1165
1007 if (phba->fc_flag & FC_RSCN_MODE) { 1166 if (vport->fc_flag & FC_RSCN_MODE) {
1008 /* Check to see if more RSCNs came in while we were 1167 /* Check to see if more RSCNs came in while we were
1009 * processing this one. 1168 * processing this one.
1010 */ 1169 */
1011 if ((phba->fc_rscn_id_cnt == 0) && 1170 if ((vport->fc_rscn_id_cnt == 0) &&
1012 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 1171 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
1013 spin_lock_irq(phba->host->host_lock); 1172 spin_lock_irq(shost->host_lock);
1014 phba->fc_flag &= ~FC_RSCN_MODE; 1173 vport->fc_flag &= ~FC_RSCN_MODE;
1015 spin_unlock_irq(phba->host->host_lock); 1174 spin_unlock_irq(shost->host_lock);
1016 } else { 1175 } else {
1017 lpfc_els_handle_rscn(phba); 1176 lpfc_els_handle_rscn(vport);
1018 } 1177 }
1019 } 1178 }
1020} 1179}
1021 1180
1022static void 1181static void
1023lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1182lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1024 struct lpfc_iocbq * rspiocb) 1183 struct lpfc_iocbq *rspiocb)
1025{ 1184{
1185 struct lpfc_vport *vport = cmdiocb->vport;
1186 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026 IOCB_t *irsp; 1187 IOCB_t *irsp;
1027 struct lpfc_sli *psli;
1028 struct lpfc_nodelist *ndlp; 1188 struct lpfc_nodelist *ndlp;
1029 LPFC_MBOXQ_t *mbox; 1189 int disc;
1030 int disc, rc;
1031
1032 psli = &phba->sli;
1033 1190
1034 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1191 /* we pass cmdiocb to state machine which needs rspiocb as well */
1035 cmdiocb->context_un.rsp_iocb = rspiocb; 1192 cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1037,27 +1194,32 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1037 irsp = &(rspiocb->iocb); 1194 irsp = &(rspiocb->iocb);
1038 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1195 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1039 1196
1197 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1198 "ADISC cmpl: status:x%x/x%x did:x%x",
1199 irsp->ulpStatus, irsp->un.ulpWord[4],
1200 ndlp->nlp_DID);
1201
1040 /* Since ndlp can be freed in the disc state machine, note if this node 1202 /* Since ndlp can be freed in the disc state machine, note if this node
1041 * is being used during discovery. 1203 * is being used during discovery.
1042 */ 1204 */
1205 spin_lock_irq(shost->host_lock);
1043 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1206 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1044 spin_lock_irq(phba->host->host_lock);
1045 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 1207 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1046 spin_unlock_irq(phba->host->host_lock); 1208 spin_unlock_irq(shost->host_lock);
1047 1209
1048 /* ADISC completes to NPort <nlp_DID> */ 1210 /* ADISC completes to NPort <nlp_DID> */
1049 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1211 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1050 "%d:0104 ADISC completes to NPort x%x " 1212 "%d (%d):0104 ADISC completes to NPort x%x "
1051 "Data: x%x x%x x%x x%x x%x\n", 1213 "Data: x%x x%x x%x x%x x%x\n",
1052 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1214 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1053 irsp->un.ulpWord[4], irsp->ulpTimeout, disc, 1215 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
1054 phba->num_disc_nodes); 1216 disc, vport->num_disc_nodes);
1055 1217
1056 /* Check to see if link went down during discovery */ 1218 /* Check to see if link went down during discovery */
1057 if (lpfc_els_chk_latt(phba)) { 1219 if (lpfc_els_chk_latt(vport)) {
1058 spin_lock_irq(phba->host->host_lock); 1220 spin_lock_irq(shost->host_lock);
1059 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1221 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1060 spin_unlock_irq(phba->host->host_lock); 1222 spin_unlock_irq(shost->host_lock);
1061 goto out; 1223 goto out;
1062 } 1224 }
1063 1225
@@ -1066,67 +1228,68 @@ lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1066 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1228 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1067 /* ELS command is being retried */ 1229 /* ELS command is being retried */
1068 if (disc) { 1230 if (disc) {
1069 spin_lock_irq(phba->host->host_lock); 1231 spin_lock_irq(shost->host_lock);
1070 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1232 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1071 spin_unlock_irq(phba->host->host_lock); 1233 spin_unlock_irq(shost->host_lock);
1072 lpfc_set_disctmo(phba); 1234 lpfc_set_disctmo(vport);
1073 } 1235 }
1074 goto out; 1236 goto out;
1075 } 1237 }
1076 /* ADISC failed */ 1238 /* ADISC failed */
1077 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1239 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1078 if ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1240 if (!lpfc_error_lost_link(irsp)) {
1079 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) && 1241 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1080 (irsp->un.ulpWord[4] != IOERR_LINK_DOWN) && 1242 NLP_EVT_CMPL_ADISC);
1081 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
1082 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1083 NLP_EVT_CMPL_ADISC);
1084 } 1243 }
1085 } else { 1244 } else {
1086 /* Good status, call state machine */ 1245 /* Good status, call state machine */
1087 lpfc_disc_state_machine(phba, ndlp, cmdiocb, 1246 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1088 NLP_EVT_CMPL_ADISC); 1247 NLP_EVT_CMPL_ADISC);
1089 } 1248 }
1090 1249
1091 if (disc && phba->num_disc_nodes) { 1250 if (disc && vport->num_disc_nodes) {
1092 /* Check to see if there are more ADISCs to be sent */ 1251 /* Check to see if there are more ADISCs to be sent */
1093 lpfc_more_adisc(phba); 1252 lpfc_more_adisc(vport);
1094 1253
1095 /* Check to see if we are done with ADISC authentication */ 1254 /* Check to see if we are done with ADISC authentication */
1096 if (phba->num_disc_nodes == 0) { 1255 if (vport->num_disc_nodes == 0) {
1097 lpfc_can_disctmo(phba); 1256 /* If we get here, there is nothing left to ADISC */
1098 /* If we get here, there is nothing left to wait for */ 1257 /*
1099 if ((phba->hba_state < LPFC_HBA_READY) && 1258 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1100 (phba->hba_state != LPFC_CLEAR_LA)) { 1259 * and continue discovery.
1101 /* Link up discovery */ 1260 */
1102 if ((mbox = mempool_alloc(phba->mbox_mem_pool, 1261 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1103 GFP_KERNEL))) { 1262 !(vport->fc_flag & FC_RSCN_MODE)) {
1104 phba->hba_state = LPFC_CLEAR_LA; 1263 lpfc_issue_reg_vpi(phba, vport);
1105 lpfc_clear_la(phba, mbox); 1264 goto out;
1106 mbox->mbox_cmpl = 1265 }
1107 lpfc_mbx_cmpl_clear_la; 1266 /*
1108 rc = lpfc_sli_issue_mbox 1267 * For SLI2, we need to set port_state to READY
1109 (phba, mbox, 1268 * and continue discovery.
1110 (MBX_NOWAIT | MBX_STOP_IOCB)); 1269 */
1111 if (rc == MBX_NOT_FINISHED) { 1270 if (vport->port_state < LPFC_VPORT_READY) {
1112 mempool_free(mbox, 1271 /* If we get here, there is nothing to ADISC */
1113 phba->mbox_mem_pool); 1272 if (vport->port_type == LPFC_PHYSICAL_PORT)
1114 lpfc_disc_flush_list(phba); 1273 lpfc_issue_clear_la(phba, vport);
1115 psli->ring[(psli->extra_ring)]. 1274
1116 flag &= 1275 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1117 ~LPFC_STOP_IOCB_EVENT; 1276 vport->num_disc_nodes = 0;
1118 psli->ring[(psli->fcp_ring)]. 1277 /* go thru NPR list, issue ELS PLOGIs */
1119 flag &= 1278 if (vport->fc_npr_cnt)
1120 ~LPFC_STOP_IOCB_EVENT; 1279 lpfc_els_disc_plogi(vport);
1121 psli->ring[(psli->next_ring)]. 1280
1122 flag &= 1281 if (!vport->num_disc_nodes) {
1123 ~LPFC_STOP_IOCB_EVENT; 1282 spin_lock_irq(shost->host_lock);
1124 phba->hba_state = 1283 vport->fc_flag &=
1125 LPFC_HBA_READY; 1284 ~FC_NDISC_ACTIVE;
1285 spin_unlock_irq(
1286 shost->host_lock);
1287 lpfc_can_disctmo(vport);
1126 } 1288 }
1127 } 1289 }
1290 vport->port_state = LPFC_VPORT_READY;
1128 } else { 1291 } else {
1129 lpfc_rscn_disc(phba); 1292 lpfc_rscn_disc(vport);
1130 } 1293 }
1131 } 1294 }
1132 } 1295 }
@@ -1136,23 +1299,22 @@ out:
1136} 1299}
1137 1300
1138int 1301int
1139lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 1302lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1140 uint8_t retry) 1303 uint8_t retry)
1141{ 1304{
1305 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1306 struct lpfc_hba *phba = vport->phba;
1142 ADISC *ap; 1307 ADISC *ap;
1143 IOCB_t *icmd; 1308 IOCB_t *icmd;
1144 struct lpfc_iocbq *elsiocb; 1309 struct lpfc_iocbq *elsiocb;
1145 struct lpfc_sli_ring *pring; 1310 struct lpfc_sli *psli = &phba->sli;
1146 struct lpfc_sli *psli; 1311 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
1147 uint8_t *pcmd; 1312 uint8_t *pcmd;
1148 uint16_t cmdsize; 1313 uint16_t cmdsize;
1149 1314
1150 psli = &phba->sli; 1315 cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
1151 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1316 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1152 1317 ndlp->nlp_DID, ELS_CMD_ADISC);
1153 cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
1154 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1155 ndlp->nlp_DID, ELS_CMD_ADISC);
1156 if (!elsiocb) 1318 if (!elsiocb)
1157 return 1; 1319 return 1;
1158 1320
@@ -1161,81 +1323,97 @@ lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1161 1323
1162 /* For ADISC request, remainder of payload is service parameters */ 1324 /* For ADISC request, remainder of payload is service parameters */
1163 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 1325 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1164 pcmd += sizeof (uint32_t); 1326 pcmd += sizeof(uint32_t);
1165 1327
1166 /* Fill in ADISC payload */ 1328 /* Fill in ADISC payload */
1167 ap = (ADISC *) pcmd; 1329 ap = (ADISC *) pcmd;
1168 ap->hardAL_PA = phba->fc_pref_ALPA; 1330 ap->hardAL_PA = phba->fc_pref_ALPA;
1169 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name)); 1331 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
1170 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 1332 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1171 ap->DID = be32_to_cpu(phba->fc_myDID); 1333 ap->DID = be32_to_cpu(vport->fc_myDID);
1334
1335 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1336 "Issue ADISC: did:x%x",
1337 ndlp->nlp_DID, 0, 0);
1172 1338
1173 phba->fc_stat.elsXmitADISC++; 1339 phba->fc_stat.elsXmitADISC++;
1174 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 1340 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1175 spin_lock_irq(phba->host->host_lock); 1341 spin_lock_irq(shost->host_lock);
1176 ndlp->nlp_flag |= NLP_ADISC_SND; 1342 ndlp->nlp_flag |= NLP_ADISC_SND;
1343 spin_unlock_irq(shost->host_lock);
1177 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1344 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1345 spin_lock_irq(shost->host_lock);
1178 ndlp->nlp_flag &= ~NLP_ADISC_SND; 1346 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1179 spin_unlock_irq(phba->host->host_lock); 1347 spin_unlock_irq(shost->host_lock);
1180 lpfc_els_free_iocb(phba, elsiocb); 1348 lpfc_els_free_iocb(phba, elsiocb);
1181 return 1; 1349 return 1;
1182 } 1350 }
1183 spin_unlock_irq(phba->host->host_lock);
1184 return 0; 1351 return 0;
1185} 1352}
1186 1353
1187static void 1354static void
1188lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1355lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1189 struct lpfc_iocbq * rspiocb) 1356 struct lpfc_iocbq *rspiocb)
1190{ 1357{
1358 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1359 struct lpfc_vport *vport = ndlp->vport;
1360 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1191 IOCB_t *irsp; 1361 IOCB_t *irsp;
1192 struct lpfc_sli *psli; 1362 struct lpfc_sli *psli;
1193 struct lpfc_nodelist *ndlp;
1194 1363
1195 psli = &phba->sli; 1364 psli = &phba->sli;
1196 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1365 /* we pass cmdiocb to state machine which needs rspiocb as well */
1197 cmdiocb->context_un.rsp_iocb = rspiocb; 1366 cmdiocb->context_un.rsp_iocb = rspiocb;
1198 1367
1199 irsp = &(rspiocb->iocb); 1368 irsp = &(rspiocb->iocb);
1200 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 1369 spin_lock_irq(shost->host_lock);
1201 spin_lock_irq(phba->host->host_lock);
1202 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1370 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1203 spin_unlock_irq(phba->host->host_lock); 1371 spin_unlock_irq(shost->host_lock);
1372
1373 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1374 "LOGO cmpl: status:x%x/x%x did:x%x",
1375 irsp->ulpStatus, irsp->un.ulpWord[4],
1376 ndlp->nlp_DID);
1204 1377
1205 /* LOGO completes to NPort <nlp_DID> */ 1378 /* LOGO completes to NPort <nlp_DID> */
1206 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1379 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1207 "%d:0105 LOGO completes to NPort x%x " 1380 "%d (%d):0105 LOGO completes to NPort x%x "
1208 "Data: x%x x%x x%x x%x\n", 1381 "Data: x%x x%x x%x x%x\n",
1209 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus, 1382 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1210 irsp->un.ulpWord[4], irsp->ulpTimeout, 1383 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout,
1211 phba->num_disc_nodes); 1384 vport->num_disc_nodes);
1212 1385
1213 /* Check to see if link went down during discovery */ 1386 /* Check to see if link went down during discovery */
1214 if (lpfc_els_chk_latt(phba)) 1387 if (lpfc_els_chk_latt(vport))
1215 goto out; 1388 goto out;
1216 1389
1390 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
1391 /* NLP_EVT_DEVICE_RM should unregister the RPI
1392 * which should abort all outstanding IOs.
1393 */
1394 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1395 NLP_EVT_DEVICE_RM);
1396 goto out;
1397 }
1398
1217 if (irsp->ulpStatus) { 1399 if (irsp->ulpStatus) {
1218 /* Check for retry */ 1400 /* Check for retry */
1219 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1401 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1220 /* ELS command is being retried */ 1402 /* ELS command is being retried */
1221 goto out; 1403 goto out;
1222 }
1223 /* LOGO failed */ 1404 /* LOGO failed */
1224 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1405 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1225 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1406 if (lpfc_error_lost_link(irsp))
1226 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1227 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
1228 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1229 goto out; 1407 goto out;
1230 } else { 1408 else
1231 lpfc_disc_state_machine(phba, ndlp, cmdiocb, 1409 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1232 NLP_EVT_CMPL_LOGO); 1410 NLP_EVT_CMPL_LOGO);
1233 }
1234 } else { 1411 } else {
1235 /* Good status, call state machine. 1412 /* Good status, call state machine.
1236 * This will unregister the rpi if needed. 1413 * This will unregister the rpi if needed.
1237 */ 1414 */
1238 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 1415 lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1416 NLP_EVT_CMPL_LOGO);
1239 } 1417 }
1240 1418
1241out: 1419out:
@@ -1244,75 +1422,91 @@ out:
1244} 1422}
1245 1423
1246int 1424int
1247lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 1425lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1248 uint8_t retry) 1426 uint8_t retry)
1249{ 1427{
1428 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1429 struct lpfc_hba *phba = vport->phba;
1250 IOCB_t *icmd; 1430 IOCB_t *icmd;
1251 struct lpfc_iocbq *elsiocb; 1431 struct lpfc_iocbq *elsiocb;
1252 struct lpfc_sli_ring *pring; 1432 struct lpfc_sli_ring *pring;
1253 struct lpfc_sli *psli; 1433 struct lpfc_sli *psli;
1254 uint8_t *pcmd; 1434 uint8_t *pcmd;
1255 uint16_t cmdsize; 1435 uint16_t cmdsize;
1436 int rc;
1256 1437
1257 psli = &phba->sli; 1438 psli = &phba->sli;
1258 pring = &psli->ring[LPFC_ELS_RING]; 1439 pring = &psli->ring[LPFC_ELS_RING];
1259 1440
1260 cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); 1441 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
1261 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1442 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1262 ndlp->nlp_DID, ELS_CMD_LOGO); 1443 ndlp->nlp_DID, ELS_CMD_LOGO);
1263 if (!elsiocb) 1444 if (!elsiocb)
1264 return 1; 1445 return 1;
1265 1446
1266 icmd = &elsiocb->iocb; 1447 icmd = &elsiocb->iocb;
1267 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1448 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1268 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 1449 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1269 pcmd += sizeof (uint32_t); 1450 pcmd += sizeof(uint32_t);
1270 1451
1271 /* Fill in LOGO payload */ 1452 /* Fill in LOGO payload */
1272 *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID); 1453 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
1273 pcmd += sizeof (uint32_t); 1454 pcmd += sizeof(uint32_t);
1274 memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name)); 1455 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
1456
1457 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1458 "Issue LOGO: did:x%x",
1459 ndlp->nlp_DID, 0, 0);
1275 1460
1276 phba->fc_stat.elsXmitLOGO++; 1461 phba->fc_stat.elsXmitLOGO++;
1277 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 1462 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1278 spin_lock_irq(phba->host->host_lock); 1463 spin_lock_irq(shost->host_lock);
1279 ndlp->nlp_flag |= NLP_LOGO_SND; 1464 ndlp->nlp_flag |= NLP_LOGO_SND;
1280 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1465 spin_unlock_irq(shost->host_lock);
1466 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
1467
1468 if (rc == IOCB_ERROR) {
1469 spin_lock_irq(shost->host_lock);
1281 ndlp->nlp_flag &= ~NLP_LOGO_SND; 1470 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1282 spin_unlock_irq(phba->host->host_lock); 1471 spin_unlock_irq(shost->host_lock);
1283 lpfc_els_free_iocb(phba, elsiocb); 1472 lpfc_els_free_iocb(phba, elsiocb);
1284 return 1; 1473 return 1;
1285 } 1474 }
1286 spin_unlock_irq(phba->host->host_lock);
1287 return 0; 1475 return 0;
1288} 1476}
1289 1477
1290static void 1478static void
1291lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1479lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1292 struct lpfc_iocbq * rspiocb) 1480 struct lpfc_iocbq *rspiocb)
1293{ 1481{
1482 struct lpfc_vport *vport = cmdiocb->vport;
1294 IOCB_t *irsp; 1483 IOCB_t *irsp;
1295 1484
1296 irsp = &rspiocb->iocb; 1485 irsp = &rspiocb->iocb;
1297 1486
1487 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1488 "ELS cmd cmpl: status:x%x/x%x did:x%x",
1489 irsp->ulpStatus, irsp->un.ulpWord[4],
1490 irsp->un.elsreq64.remoteID);
1491
1298 /* ELS cmd tag <ulpIoTag> completes */ 1492 /* ELS cmd tag <ulpIoTag> completes */
1299 lpfc_printf_log(phba, 1493 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1300 KERN_INFO, 1494 "%d (%d):0106 ELS cmd tag x%x completes Data: x%x x%x "
1301 LOG_ELS, 1495 "x%x\n",
1302 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 1496 phba->brd_no, vport->vpi,
1303 phba->brd_no,
1304 irsp->ulpIoTag, irsp->ulpStatus, 1497 irsp->ulpIoTag, irsp->ulpStatus,
1305 irsp->un.ulpWord[4], irsp->ulpTimeout); 1498 irsp->un.ulpWord[4], irsp->ulpTimeout);
1306 1499
1307 /* Check to see if link went down during discovery */ 1500 /* Check to see if link went down during discovery */
1308 lpfc_els_chk_latt(phba); 1501 lpfc_els_chk_latt(vport);
1309 lpfc_els_free_iocb(phba, cmdiocb); 1502 lpfc_els_free_iocb(phba, cmdiocb);
1310 return; 1503 return;
1311} 1504}
1312 1505
1313int 1506int
1314lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) 1507lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1315{ 1508{
1509 struct lpfc_hba *phba = vport->phba;
1316 IOCB_t *icmd; 1510 IOCB_t *icmd;
1317 struct lpfc_iocbq *elsiocb; 1511 struct lpfc_iocbq *elsiocb;
1318 struct lpfc_sli_ring *pring; 1512 struct lpfc_sli_ring *pring;
@@ -1323,15 +1517,16 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1323 1517
1324 psli = &phba->sli; 1518 psli = &phba->sli;
1325 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1519 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1326 cmdsize = (sizeof (uint32_t) + sizeof (SCR)); 1520 cmdsize = (sizeof(uint32_t) + sizeof(SCR));
1327 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1521 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1328 if (!ndlp) 1522 if (!ndlp)
1329 return 1; 1523 return 1;
1330 1524
1331 lpfc_nlp_init(phba, ndlp, nportid); 1525 lpfc_nlp_init(vport, ndlp, nportid);
1526
1527 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1528 ndlp->nlp_DID, ELS_CMD_SCR);
1332 1529
1333 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp,
1334 ndlp->nlp_DID, ELS_CMD_SCR);
1335 if (!elsiocb) { 1530 if (!elsiocb) {
1336 lpfc_nlp_put(ndlp); 1531 lpfc_nlp_put(ndlp);
1337 return 1; 1532 return 1;
@@ -1341,29 +1536,31 @@ lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1341 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1536 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1342 1537
1343 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 1538 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1344 pcmd += sizeof (uint32_t); 1539 pcmd += sizeof(uint32_t);
1345 1540
1346 /* For SCR, remainder of payload is SCR parameter page */ 1541 /* For SCR, remainder of payload is SCR parameter page */
1347 memset(pcmd, 0, sizeof (SCR)); 1542 memset(pcmd, 0, sizeof(SCR));
1348 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 1543 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1349 1544
1545 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1546 "Issue SCR: did:x%x",
1547 ndlp->nlp_DID, 0, 0);
1548
1350 phba->fc_stat.elsXmitSCR++; 1549 phba->fc_stat.elsXmitSCR++;
1351 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1550 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1352 spin_lock_irq(phba->host->host_lock);
1353 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1551 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1354 spin_unlock_irq(phba->host->host_lock);
1355 lpfc_nlp_put(ndlp); 1552 lpfc_nlp_put(ndlp);
1356 lpfc_els_free_iocb(phba, elsiocb); 1553 lpfc_els_free_iocb(phba, elsiocb);
1357 return 1; 1554 return 1;
1358 } 1555 }
1359 spin_unlock_irq(phba->host->host_lock);
1360 lpfc_nlp_put(ndlp); 1556 lpfc_nlp_put(ndlp);
1361 return 0; 1557 return 0;
1362} 1558}
1363 1559
1364static int 1560static int
1365lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry) 1561lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
1366{ 1562{
1563 struct lpfc_hba *phba = vport->phba;
1367 IOCB_t *icmd; 1564 IOCB_t *icmd;
1368 struct lpfc_iocbq *elsiocb; 1565 struct lpfc_iocbq *elsiocb;
1369 struct lpfc_sli_ring *pring; 1566 struct lpfc_sli_ring *pring;
@@ -1377,14 +1574,15 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1377 1574
1378 psli = &phba->sli; 1575 psli = &phba->sli;
1379 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 1576 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1380 cmdsize = (sizeof (uint32_t) + sizeof (FARP)); 1577 cmdsize = (sizeof(uint32_t) + sizeof(FARP));
1381 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 1578 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1382 if (!ndlp) 1579 if (!ndlp)
1383 return 1; 1580 return 1;
1384 lpfc_nlp_init(phba, ndlp, nportid);
1385 1581
1386 elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, 1582 lpfc_nlp_init(vport, ndlp, nportid);
1387 ndlp->nlp_DID, ELS_CMD_RNID); 1583
1584 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1585 ndlp->nlp_DID, ELS_CMD_RNID);
1388 if (!elsiocb) { 1586 if (!elsiocb) {
1389 lpfc_nlp_put(ndlp); 1587 lpfc_nlp_put(ndlp);
1390 return 1; 1588 return 1;
@@ -1394,44 +1592,71 @@ lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1394 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1592 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1395 1593
1396 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 1594 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1397 pcmd += sizeof (uint32_t); 1595 pcmd += sizeof(uint32_t);
1398 1596
1399 /* Fill in FARPR payload */ 1597 /* Fill in FARPR payload */
1400 fp = (FARP *) (pcmd); 1598 fp = (FARP *) (pcmd);
1401 memset(fp, 0, sizeof (FARP)); 1599 memset(fp, 0, sizeof(FARP));
1402 lp = (uint32_t *) pcmd; 1600 lp = (uint32_t *) pcmd;
1403 *lp++ = be32_to_cpu(nportid); 1601 *lp++ = be32_to_cpu(nportid);
1404 *lp++ = be32_to_cpu(phba->fc_myDID); 1602 *lp++ = be32_to_cpu(vport->fc_myDID);
1405 fp->Rflags = 0; 1603 fp->Rflags = 0;
1406 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 1604 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1407 1605
1408 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name)); 1606 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
1409 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 1607 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
1410 if ((ondlp = lpfc_findnode_did(phba, nportid))) { 1608 ondlp = lpfc_findnode_did(vport, nportid);
1609 if (ondlp) {
1411 memcpy(&fp->OportName, &ondlp->nlp_portname, 1610 memcpy(&fp->OportName, &ondlp->nlp_portname,
1412 sizeof (struct lpfc_name)); 1611 sizeof(struct lpfc_name));
1413 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 1612 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1414 sizeof (struct lpfc_name)); 1613 sizeof(struct lpfc_name));
1415 } 1614 }
1416 1615
1616 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1617 "Issue FARPR: did:x%x",
1618 ndlp->nlp_DID, 0, 0);
1619
1417 phba->fc_stat.elsXmitFARPR++; 1620 phba->fc_stat.elsXmitFARPR++;
1418 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 1621 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1419 spin_lock_irq(phba->host->host_lock);
1420 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 1622 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1421 spin_unlock_irq(phba->host->host_lock);
1422 lpfc_nlp_put(ndlp); 1623 lpfc_nlp_put(ndlp);
1423 lpfc_els_free_iocb(phba, elsiocb); 1624 lpfc_els_free_iocb(phba, elsiocb);
1424 return 1; 1625 return 1;
1425 } 1626 }
1426 spin_unlock_irq(phba->host->host_lock);
1427 lpfc_nlp_put(ndlp); 1627 lpfc_nlp_put(ndlp);
1428 return 0; 1628 return 0;
1429} 1629}
1430 1630
1631static void
1632lpfc_end_rscn(struct lpfc_vport *vport)
1633{
1634 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1635
1636 if (vport->fc_flag & FC_RSCN_MODE) {
1637 /*
1638 * Check to see if more RSCNs came in while we were
1639 * processing this one.
1640 */
1641 if (vport->fc_rscn_id_cnt ||
1642 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1643 lpfc_els_handle_rscn(vport);
1644 else {
1645 spin_lock_irq(shost->host_lock);
1646 vport->fc_flag &= ~FC_RSCN_MODE;
1647 spin_unlock_irq(shost->host_lock);
1648 }
1649 }
1650}
1651
1431void 1652void
1432lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) 1653lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
1433{ 1654{
1655 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1656
1657 spin_lock_irq(shost->host_lock);
1434 nlp->nlp_flag &= ~NLP_DELAY_TMO; 1658 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1659 spin_unlock_irq(shost->host_lock);
1435 del_timer_sync(&nlp->nlp_delayfunc); 1660 del_timer_sync(&nlp->nlp_delayfunc);
1436 nlp->nlp_last_elscmd = 0; 1661 nlp->nlp_last_elscmd = 0;
1437 1662
@@ -1439,30 +1664,21 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1439 list_del_init(&nlp->els_retry_evt.evt_listp); 1664 list_del_init(&nlp->els_retry_evt.evt_listp);
1440 1665
1441 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 1666 if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
1667 spin_lock_irq(shost->host_lock);
1442 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1668 nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1443 if (phba->num_disc_nodes) { 1669 spin_unlock_irq(shost->host_lock);
1670 if (vport->num_disc_nodes) {
1444 /* Check to see if there are more 1671 /* Check to see if there are more
1445 * PLOGIs to be sent 1672 * PLOGIs to be sent
1446 */ 1673 */
1447 lpfc_more_plogi(phba); 1674 lpfc_more_plogi(vport);
1448 1675
1449 if (phba->num_disc_nodes == 0) { 1676 if (vport->num_disc_nodes == 0) {
1450 phba->fc_flag &= ~FC_NDISC_ACTIVE; 1677 spin_lock_irq(shost->host_lock);
1451 lpfc_can_disctmo(phba); 1678 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1452 if (phba->fc_flag & FC_RSCN_MODE) { 1679 spin_unlock_irq(shost->host_lock);
1453 /* 1680 lpfc_can_disctmo(vport);
1454 * Check to see if more RSCNs 1681 lpfc_end_rscn(vport);
1455 * came in while we were
1456 * processing this one.
1457 */
1458 if((phba->fc_rscn_id_cnt==0) &&
1459 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
1460 phba->fc_flag &= ~FC_RSCN_MODE;
1461 }
1462 else {
1463 lpfc_els_handle_rscn(phba);
1464 }
1465 }
1466 } 1682 }
1467 } 1683 }
1468 } 1684 }
@@ -1472,18 +1688,19 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp)
1472void 1688void
1473lpfc_els_retry_delay(unsigned long ptr) 1689lpfc_els_retry_delay(unsigned long ptr)
1474{ 1690{
1475 struct lpfc_nodelist *ndlp; 1691 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
1476 struct lpfc_hba *phba; 1692 struct lpfc_vport *vport = ndlp->vport;
1477 unsigned long iflag; 1693 struct lpfc_hba *phba = vport->phba;
1478 struct lpfc_work_evt *evtp; 1694 unsigned long flags;
1479 1695 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt;
1480 ndlp = (struct lpfc_nodelist *)ptr; 1696
1481 phba = ndlp->nlp_phba; 1697 ndlp = (struct lpfc_nodelist *) ptr;
1698 phba = ndlp->vport->phba;
1482 evtp = &ndlp->els_retry_evt; 1699 evtp = &ndlp->els_retry_evt;
1483 1700
1484 spin_lock_irqsave(phba->host->host_lock, iflag); 1701 spin_lock_irqsave(&phba->hbalock, flags);
1485 if (!list_empty(&evtp->evt_listp)) { 1702 if (!list_empty(&evtp->evt_listp)) {
1486 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1703 spin_unlock_irqrestore(&phba->hbalock, flags);
1487 return; 1704 return;
1488 } 1705 }
1489 1706
@@ -1491,33 +1708,31 @@ lpfc_els_retry_delay(unsigned long ptr)
1491 evtp->evt = LPFC_EVT_ELS_RETRY; 1708 evtp->evt = LPFC_EVT_ELS_RETRY;
1492 list_add_tail(&evtp->evt_listp, &phba->work_list); 1709 list_add_tail(&evtp->evt_listp, &phba->work_list);
1493 if (phba->work_wait) 1710 if (phba->work_wait)
1494 wake_up(phba->work_wait); 1711 lpfc_worker_wake_up(phba);
1495 1712
1496 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1713 spin_unlock_irqrestore(&phba->hbalock, flags);
1497 return; 1714 return;
1498} 1715}
1499 1716
1500void 1717void
1501lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 1718lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1502{ 1719{
1503 struct lpfc_hba *phba; 1720 struct lpfc_vport *vport = ndlp->vport;
1504 uint32_t cmd; 1721 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1505 uint32_t did; 1722 uint32_t cmd, did, retry;
1506 uint8_t retry;
1507 1723
1508 phba = ndlp->nlp_phba; 1724 spin_lock_irq(shost->host_lock);
1509 spin_lock_irq(phba->host->host_lock);
1510 did = ndlp->nlp_DID; 1725 did = ndlp->nlp_DID;
1511 cmd = ndlp->nlp_last_elscmd; 1726 cmd = ndlp->nlp_last_elscmd;
1512 ndlp->nlp_last_elscmd = 0; 1727 ndlp->nlp_last_elscmd = 0;
1513 1728
1514 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1729 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1515 spin_unlock_irq(phba->host->host_lock); 1730 spin_unlock_irq(shost->host_lock);
1516 return; 1731 return;
1517 } 1732 }
1518 1733
1519 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 1734 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1520 spin_unlock_irq(phba->host->host_lock); 1735 spin_unlock_irq(shost->host_lock);
1521 /* 1736 /*
1522 * If a discovery event readded nlp_delayfunc after timer 1737 * If a discovery event readded nlp_delayfunc after timer
1523 * firing and before processing the timer, cancel the 1738 * firing and before processing the timer, cancel the
@@ -1528,57 +1743,54 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
1528 1743
1529 switch (cmd) { 1744 switch (cmd) {
1530 case ELS_CMD_FLOGI: 1745 case ELS_CMD_FLOGI:
1531 lpfc_issue_els_flogi(phba, ndlp, retry); 1746 lpfc_issue_els_flogi(vport, ndlp, retry);
1532 break; 1747 break;
1533 case ELS_CMD_PLOGI: 1748 case ELS_CMD_PLOGI:
1534 if(!lpfc_issue_els_plogi(phba, ndlp->nlp_DID, retry)) { 1749 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
1535 ndlp->nlp_prev_state = ndlp->nlp_state; 1750 ndlp->nlp_prev_state = ndlp->nlp_state;
1536 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1751 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1537 } 1752 }
1538 break; 1753 break;
1539 case ELS_CMD_ADISC: 1754 case ELS_CMD_ADISC:
1540 if (!lpfc_issue_els_adisc(phba, ndlp, retry)) { 1755 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
1541 ndlp->nlp_prev_state = ndlp->nlp_state; 1756 ndlp->nlp_prev_state = ndlp->nlp_state;
1542 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 1757 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1543 } 1758 }
1544 break; 1759 break;
1545 case ELS_CMD_PRLI: 1760 case ELS_CMD_PRLI:
1546 if (!lpfc_issue_els_prli(phba, ndlp, retry)) { 1761 if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
1547 ndlp->nlp_prev_state = ndlp->nlp_state; 1762 ndlp->nlp_prev_state = ndlp->nlp_state;
1548 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); 1763 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1549 } 1764 }
1550 break; 1765 break;
1551 case ELS_CMD_LOGO: 1766 case ELS_CMD_LOGO:
1552 if (!lpfc_issue_els_logo(phba, ndlp, retry)) { 1767 if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
1553 ndlp->nlp_prev_state = ndlp->nlp_state; 1768 ndlp->nlp_prev_state = ndlp->nlp_state;
1554 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1769 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1555 } 1770 }
1556 break; 1771 break;
1772 case ELS_CMD_FDISC:
1773 lpfc_issue_els_fdisc(vport, ndlp, retry);
1774 break;
1557 } 1775 }
1558 return; 1776 return;
1559} 1777}
1560 1778
1561static int 1779static int
1562lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 1780lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1563 struct lpfc_iocbq * rspiocb) 1781 struct lpfc_iocbq *rspiocb)
1564{ 1782{
1565 IOCB_t *irsp; 1783 struct lpfc_vport *vport = cmdiocb->vport;
1566 struct lpfc_dmabuf *pcmd; 1784 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1567 struct lpfc_nodelist *ndlp; 1785 IOCB_t *irsp = &rspiocb->iocb;
1786 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1787 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1568 uint32_t *elscmd; 1788 uint32_t *elscmd;
1569 struct ls_rjt stat; 1789 struct ls_rjt stat;
1570 int retry, maxretry; 1790 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
1571 int delay; 1791 uint32_t cmd = 0;
1572 uint32_t cmd;
1573 uint32_t did; 1792 uint32_t did;
1574 1793
1575 retry = 0;
1576 delay = 0;
1577 maxretry = lpfc_max_els_tries;
1578 irsp = &rspiocb->iocb;
1579 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1580 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1581 cmd = 0;
1582 1794
1583 /* Note: context2 may be 0 for internal driver abort 1795 /* Note: context2 may be 0 for internal driver abort
1584 * of delays ELS command. 1796 * of delays ELS command.
@@ -1594,11 +1806,15 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1594 else { 1806 else {
1595 /* We should only hit this case for retrying PLOGI */ 1807 /* We should only hit this case for retrying PLOGI */
1596 did = irsp->un.elsreq64.remoteID; 1808 did = irsp->un.elsreq64.remoteID;
1597 ndlp = lpfc_findnode_did(phba, did); 1809 ndlp = lpfc_findnode_did(vport, did);
1598 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 1810 if (!ndlp && (cmd != ELS_CMD_PLOGI))
1599 return 1; 1811 return 1;
1600 } 1812 }
1601 1813
1814 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1815 "Retry ELS: wd7:x%x wd4:x%x did:x%x",
1816 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
1817
1602 switch (irsp->ulpStatus) { 1818 switch (irsp->ulpStatus) {
1603 case IOSTAT_FCP_RSP_ERROR: 1819 case IOSTAT_FCP_RSP_ERROR:
1604 case IOSTAT_REMOTE_STOP: 1820 case IOSTAT_REMOTE_STOP:
@@ -1607,25 +1823,37 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1607 case IOSTAT_LOCAL_REJECT: 1823 case IOSTAT_LOCAL_REJECT:
1608 switch ((irsp->un.ulpWord[4] & 0xff)) { 1824 switch ((irsp->un.ulpWord[4] & 0xff)) {
1609 case IOERR_LOOP_OPEN_FAILURE: 1825 case IOERR_LOOP_OPEN_FAILURE:
1610 if (cmd == ELS_CMD_PLOGI) { 1826 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
1611 if (cmdiocb->retry == 0) { 1827 delay = 1000;
1612 delay = 1;
1613 }
1614 }
1615 retry = 1; 1828 retry = 1;
1616 break; 1829 break;
1617 1830
1618 case IOERR_SEQUENCE_TIMEOUT: 1831 case IOERR_ILLEGAL_COMMAND:
1619 retry = 1; 1832 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) &&
1833 (cmd == ELS_CMD_FDISC)) {
1834 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1835 "%d (%d):0124 FDISC failed (3/6) retrying...\n",
1836 phba->brd_no, vport->vpi);
1837 lpfc_mbx_unreg_vpi(vport);
1838 retry = 1;
1839 /* Always retry for this case */
1840 cmdiocb->retry = 0;
1841 }
1620 break; 1842 break;
1621 1843
1622 case IOERR_NO_RESOURCES: 1844 case IOERR_NO_RESOURCES:
1623 if (cmd == ELS_CMD_PLOGI) { 1845 retry = 1;
1624 delay = 1; 1846 if (cmdiocb->retry > 100)
1625 } 1847 delay = 100;
1848 maxretry = 250;
1849 break;
1850
1851 case IOERR_ILLEGAL_FRAME:
1852 delay = 100;
1626 retry = 1; 1853 retry = 1;
1627 break; 1854 break;
1628 1855
1856 case IOERR_SEQUENCE_TIMEOUT:
1629 case IOERR_INVALID_RPI: 1857 case IOERR_INVALID_RPI:
1630 retry = 1; 1858 retry = 1;
1631 break; 1859 break;
@@ -1655,27 +1883,57 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1655 if (stat.un.b.lsRjtRsnCodeExp == 1883 if (stat.un.b.lsRjtRsnCodeExp ==
1656 LSEXP_CMD_IN_PROGRESS) { 1884 LSEXP_CMD_IN_PROGRESS) {
1657 if (cmd == ELS_CMD_PLOGI) { 1885 if (cmd == ELS_CMD_PLOGI) {
1658 delay = 1; 1886 delay = 1000;
1659 maxretry = 48; 1887 maxretry = 48;
1660 } 1888 }
1661 retry = 1; 1889 retry = 1;
1662 break; 1890 break;
1663 } 1891 }
1664 if (cmd == ELS_CMD_PLOGI) { 1892 if (cmd == ELS_CMD_PLOGI) {
1665 delay = 1; 1893 delay = 1000;
1666 maxretry = lpfc_max_els_tries + 1; 1894 maxretry = lpfc_max_els_tries + 1;
1667 retry = 1; 1895 retry = 1;
1668 break; 1896 break;
1669 } 1897 }
1898 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1899 (cmd == ELS_CMD_FDISC) &&
1900 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
1901 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1902 "%d (%d):0125 FDISC Failed (x%x)."
1903 " Fabric out of resources\n",
1904 phba->brd_no, vport->vpi, stat.un.lsRjtError);
1905 lpfc_vport_set_state(vport,
1906 FC_VPORT_NO_FABRIC_RSCS);
1907 }
1670 break; 1908 break;
1671 1909
1672 case LSRJT_LOGICAL_BSY: 1910 case LSRJT_LOGICAL_BSY:
1673 if (cmd == ELS_CMD_PLOGI) { 1911 if ((cmd == ELS_CMD_PLOGI) ||
1674 delay = 1; 1912 (cmd == ELS_CMD_PRLI)) {
1913 delay = 1000;
1675 maxretry = 48; 1914 maxretry = 48;
1915 } else if (cmd == ELS_CMD_FDISC) {
1916 /* Always retry for this case */
1917 cmdiocb->retry = 0;
1676 } 1918 }
1677 retry = 1; 1919 retry = 1;
1678 break; 1920 break;
1921
1922 case LSRJT_LOGICAL_ERR:
1923 case LSRJT_PROTOCOL_ERR:
1924 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1925 (cmd == ELS_CMD_FDISC) &&
1926 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
1927 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
1928 ) {
1929 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1930 "%d (%d):0123 FDISC Failed (x%x)."
1931 " Fabric Detected Bad WWN\n",
1932 phba->brd_no, vport->vpi, stat.un.lsRjtError);
1933 lpfc_vport_set_state(vport,
1934 FC_VPORT_FABRIC_REJ_WWN);
1935 }
1936 break;
1679 } 1937 }
1680 break; 1938 break;
1681 1939
@@ -1695,21 +1953,27 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1695 retry = 0; 1953 retry = 0;
1696 } 1954 }
1697 1955
1956 if ((vport->load_flag & FC_UNLOADING) != 0)
1957 retry = 0;
1958
1698 if (retry) { 1959 if (retry) {
1699 1960
1700 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 1961 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1701 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1962 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1702 "%d:0107 Retry ELS command x%x to remote " 1963 "%d (%d):0107 Retry ELS command x%x to remote "
1703 "NPORT x%x Data: x%x x%x\n", 1964 "NPORT x%x Data: x%x x%x\n",
1704 phba->brd_no, 1965 phba->brd_no, vport->vpi,
1705 cmd, did, cmdiocb->retry, delay); 1966 cmd, did, cmdiocb->retry, delay);
1706 1967
1707 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) { 1968 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
1969 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1970 ((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
1971 /* Don't reset timer for no resources */
1972
1708 /* If discovery / RSCN timer is running, reset it */ 1973 /* If discovery / RSCN timer is running, reset it */
1709 if (timer_pending(&phba->fc_disctmo) || 1974 if (timer_pending(&vport->fc_disctmo) ||
1710 (phba->fc_flag & FC_RSCN_MODE)) { 1975 (vport->fc_flag & FC_RSCN_MODE))
1711 lpfc_set_disctmo(phba); 1976 lpfc_set_disctmo(vport);
1712 }
1713 } 1977 }
1714 1978
1715 phba->fc_stat.elsXmitRetry++; 1979 phba->fc_stat.elsXmitRetry++;
@@ -1717,50 +1981,62 @@ lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1717 phba->fc_stat.elsDelayRetry++; 1981 phba->fc_stat.elsDelayRetry++;
1718 ndlp->nlp_retry = cmdiocb->retry; 1982 ndlp->nlp_retry = cmdiocb->retry;
1719 1983
1720 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1984 /* delay is specified in milliseconds */
1985 mod_timer(&ndlp->nlp_delayfunc,
1986 jiffies + msecs_to_jiffies(delay));
1987 spin_lock_irq(shost->host_lock);
1721 ndlp->nlp_flag |= NLP_DELAY_TMO; 1988 ndlp->nlp_flag |= NLP_DELAY_TMO;
1989 spin_unlock_irq(shost->host_lock);
1722 1990
1723 ndlp->nlp_prev_state = ndlp->nlp_state; 1991 ndlp->nlp_prev_state = ndlp->nlp_state;
1724 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1992 if (cmd == ELS_CMD_PRLI)
1993 lpfc_nlp_set_state(vport, ndlp,
1994 NLP_STE_REG_LOGIN_ISSUE);
1995 else
1996 lpfc_nlp_set_state(vport, ndlp,
1997 NLP_STE_NPR_NODE);
1725 ndlp->nlp_last_elscmd = cmd; 1998 ndlp->nlp_last_elscmd = cmd;
1726 1999
1727 return 1; 2000 return 1;
1728 } 2001 }
1729 switch (cmd) { 2002 switch (cmd) {
1730 case ELS_CMD_FLOGI: 2003 case ELS_CMD_FLOGI:
1731 lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry); 2004 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2005 return 1;
2006 case ELS_CMD_FDISC:
2007 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
1732 return 1; 2008 return 1;
1733 case ELS_CMD_PLOGI: 2009 case ELS_CMD_PLOGI:
1734 if (ndlp) { 2010 if (ndlp) {
1735 ndlp->nlp_prev_state = ndlp->nlp_state; 2011 ndlp->nlp_prev_state = ndlp->nlp_state;
1736 lpfc_nlp_set_state(phba, ndlp, 2012 lpfc_nlp_set_state(vport, ndlp,
1737 NLP_STE_PLOGI_ISSUE); 2013 NLP_STE_PLOGI_ISSUE);
1738 } 2014 }
1739 lpfc_issue_els_plogi(phba, did, cmdiocb->retry); 2015 lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
1740 return 1; 2016 return 1;
1741 case ELS_CMD_ADISC: 2017 case ELS_CMD_ADISC:
1742 ndlp->nlp_prev_state = ndlp->nlp_state; 2018 ndlp->nlp_prev_state = ndlp->nlp_state;
1743 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 2019 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1744 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry); 2020 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
1745 return 1; 2021 return 1;
1746 case ELS_CMD_PRLI: 2022 case ELS_CMD_PRLI:
1747 ndlp->nlp_prev_state = ndlp->nlp_state; 2023 ndlp->nlp_prev_state = ndlp->nlp_state;
1748 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); 2024 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1749 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry); 2025 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
1750 return 1; 2026 return 1;
1751 case ELS_CMD_LOGO: 2027 case ELS_CMD_LOGO:
1752 ndlp->nlp_prev_state = ndlp->nlp_state; 2028 ndlp->nlp_prev_state = ndlp->nlp_state;
1753 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 2029 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1754 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry); 2030 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
1755 return 1; 2031 return 1;
1756 } 2032 }
1757 } 2033 }
1758 2034
1759 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 2035 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1760 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2036 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1761 "%d:0108 No retry ELS command x%x to remote NPORT x%x " 2037 "%d (%d):0108 No retry ELS command x%x to remote "
1762 "Data: x%x\n", 2038 "NPORT x%x Data: x%x\n",
1763 phba->brd_no, 2039 phba->brd_no, vport->vpi,
1764 cmd, did, cmdiocb->retry); 2040 cmd, did, cmdiocb->retry);
1765 2041
1766 return 0; 2042 return 0;
@@ -1795,33 +2071,36 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
1795 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 2071 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1796 kfree(buf_ptr); 2072 kfree(buf_ptr);
1797 } 2073 }
1798 spin_lock_irq(phba->host->host_lock);
1799 lpfc_sli_release_iocbq(phba, elsiocb); 2074 lpfc_sli_release_iocbq(phba, elsiocb);
1800 spin_unlock_irq(phba->host->host_lock);
1801 return 0; 2075 return 0;
1802} 2076}
1803 2077
1804static void 2078static void
1805lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2079lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1806 struct lpfc_iocbq * rspiocb) 2080 struct lpfc_iocbq *rspiocb)
1807{ 2081{
1808 struct lpfc_nodelist *ndlp; 2082 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2083 struct lpfc_vport *vport = cmdiocb->vport;
2084 IOCB_t *irsp;
1809 2085
1810 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2086 irsp = &rspiocb->iocb;
2087 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2088 "ACC LOGO cmpl: status:x%x/x%x did:x%x",
2089 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
1811 2090
1812 /* ACC to LOGO completes to NPort <nlp_DID> */ 2091 /* ACC to LOGO completes to NPort <nlp_DID> */
1813 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2092 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1814 "%d:0109 ACC to LOGO completes to NPort x%x " 2093 "%d (%d):0109 ACC to LOGO completes to NPort x%x "
1815 "Data: x%x x%x x%x\n", 2094 "Data: x%x x%x x%x\n",
1816 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 2095 phba->brd_no, vport->vpi, ndlp->nlp_DID,
1817 ndlp->nlp_state, ndlp->nlp_rpi); 2096 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1818 2097
1819 switch (ndlp->nlp_state) { 2098 switch (ndlp->nlp_state) {
1820 case NLP_STE_UNUSED_NODE: /* node is just allocated */ 2099 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1821 lpfc_drop_node(phba, ndlp); 2100 lpfc_drop_node(vport, ndlp);
1822 break; 2101 break;
1823 case NLP_STE_NPR_NODE: /* NPort Recovery mode */ 2102 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1824 lpfc_unreg_rpi(phba, ndlp); 2103 lpfc_unreg_rpi(vport, ndlp);
1825 break; 2104 break;
1826 default: 2105 default:
1827 break; 2106 break;
@@ -1830,24 +2109,38 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1830 return; 2109 return;
1831} 2110}
1832 2111
2112void
2113lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2114{
2115 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2116 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2117
2118 pmb->context1 = NULL;
2119 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2120 kfree(mp);
2121 mempool_free(pmb, phba->mbox_mem_pool);
2122 lpfc_nlp_put(ndlp);
2123 return;
2124}
2125
1833static void 2126static void
1834lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2127lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1835 struct lpfc_iocbq *rspiocb) 2128 struct lpfc_iocbq *rspiocb)
1836{ 2129{
2130 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2131 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
2132 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
1837 IOCB_t *irsp; 2133 IOCB_t *irsp;
1838 struct lpfc_nodelist *ndlp;
1839 LPFC_MBOXQ_t *mbox = NULL; 2134 LPFC_MBOXQ_t *mbox = NULL;
1840 struct lpfc_dmabuf *mp; 2135 struct lpfc_dmabuf *mp = NULL;
1841 2136
1842 irsp = &rspiocb->iocb; 2137 irsp = &rspiocb->iocb;
1843 2138
1844 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1845 if (cmdiocb->context_un.mbox) 2139 if (cmdiocb->context_un.mbox)
1846 mbox = cmdiocb->context_un.mbox; 2140 mbox = cmdiocb->context_un.mbox;
1847 2141
1848
1849 /* Check to see if link went down during discovery */ 2142 /* Check to see if link went down during discovery */
1850 if (lpfc_els_chk_latt(phba) || !ndlp) { 2143 if (!ndlp || lpfc_els_chk_latt(vport)) {
1851 if (mbox) { 2144 if (mbox) {
1852 mp = (struct lpfc_dmabuf *) mbox->context1; 2145 mp = (struct lpfc_dmabuf *) mbox->context1;
1853 if (mp) { 2146 if (mp) {
@@ -1859,24 +2152,37 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1859 goto out; 2152 goto out;
1860 } 2153 }
1861 2154
2155 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2156 "ACC cmpl: status:x%x/x%x did:x%x",
2157 irsp->ulpStatus, irsp->un.ulpWord[4],
2158 irsp->un.rcvels.remoteID);
2159
1862 /* ELS response tag <ulpIoTag> completes */ 2160 /* ELS response tag <ulpIoTag> completes */
1863 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2161 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1864 "%d:0110 ELS response tag x%x completes " 2162 "%d (%d):0110 ELS response tag x%x completes "
1865 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 2163 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
1866 phba->brd_no, 2164 phba->brd_no, vport->vpi,
1867 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 2165 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1868 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 2166 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
1869 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 2167 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
1870 ndlp->nlp_rpi); 2168 ndlp->nlp_rpi);
1871 2169
1872 if (mbox) { 2170 if (mbox) {
1873 if ((rspiocb->iocb.ulpStatus == 0) 2171 if ((rspiocb->iocb.ulpStatus == 0)
1874 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 2172 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1875 lpfc_unreg_rpi(phba, ndlp); 2173 lpfc_unreg_rpi(vport, ndlp);
1876 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1877 mbox->context2 = lpfc_nlp_get(ndlp); 2174 mbox->context2 = lpfc_nlp_get(ndlp);
1878 ndlp->nlp_prev_state = ndlp->nlp_state; 2175 mbox->vport = vport;
1879 lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE); 2176 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
2177 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
2178 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
2179 }
2180 else {
2181 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
2182 ndlp->nlp_prev_state = ndlp->nlp_state;
2183 lpfc_nlp_set_state(vport, ndlp,
2184 NLP_STE_REG_LOGIN_ISSUE);
2185 }
1880 if (lpfc_sli_issue_mbox(phba, mbox, 2186 if (lpfc_sli_issue_mbox(phba, mbox,
1881 (MBX_NOWAIT | MBX_STOP_IOCB)) 2187 (MBX_NOWAIT | MBX_STOP_IOCB))
1882 != MBX_NOT_FINISHED) { 2188 != MBX_NOT_FINISHED) {
@@ -1886,15 +2192,11 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1886 /* NOTE: we should have messages for unsuccessful 2192 /* NOTE: we should have messages for unsuccessful
1887 reglogin */ 2193 reglogin */
1888 } else { 2194 } else {
1889 /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */ 2195 /* Do not drop node for lpfc_els_abort'ed ELS cmds */
1890 if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2196 if (!lpfc_error_lost_link(irsp) &&
1891 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) || 2197 ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1892 (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) || 2198 lpfc_drop_node(vport, ndlp);
1893 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) { 2199 ndlp = NULL;
1894 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1895 lpfc_drop_node(phba, ndlp);
1896 ndlp = NULL;
1897 }
1898 } 2200 }
1899 } 2201 }
1900 mp = (struct lpfc_dmabuf *) mbox->context1; 2202 mp = (struct lpfc_dmabuf *) mbox->context1;
@@ -1906,19 +2208,21 @@ lpfc_cmpl_els_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1906 } 2208 }
1907out: 2209out:
1908 if (ndlp) { 2210 if (ndlp) {
1909 spin_lock_irq(phba->host->host_lock); 2211 spin_lock_irq(shost->host_lock);
1910 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 2212 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
1911 spin_unlock_irq(phba->host->host_lock); 2213 spin_unlock_irq(shost->host_lock);
1912 } 2214 }
1913 lpfc_els_free_iocb(phba, cmdiocb); 2215 lpfc_els_free_iocb(phba, cmdiocb);
1914 return; 2216 return;
1915} 2217}
1916 2218
1917int 2219int
1918lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, 2220lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
1919 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp, 2221 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
1920 LPFC_MBOXQ_t * mbox, uint8_t newnode) 2222 LPFC_MBOXQ_t *mbox, uint8_t newnode)
1921{ 2223{
2224 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2225 struct lpfc_hba *phba = vport->phba;
1922 IOCB_t *icmd; 2226 IOCB_t *icmd;
1923 IOCB_t *oldcmd; 2227 IOCB_t *oldcmd;
1924 struct lpfc_iocbq *elsiocb; 2228 struct lpfc_iocbq *elsiocb;
@@ -1935,23 +2239,30 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1935 2239
1936 switch (flag) { 2240 switch (flag) {
1937 case ELS_CMD_ACC: 2241 case ELS_CMD_ACC:
1938 cmdsize = sizeof (uint32_t); 2242 cmdsize = sizeof(uint32_t);
1939 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2243 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1940 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2244 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1941 if (!elsiocb) { 2245 if (!elsiocb) {
2246 spin_lock_irq(shost->host_lock);
1942 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2247 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2248 spin_unlock_irq(shost->host_lock);
1943 return 1; 2249 return 1;
1944 } 2250 }
2251
1945 icmd = &elsiocb->iocb; 2252 icmd = &elsiocb->iocb;
1946 icmd->ulpContext = oldcmd->ulpContext; /* Xri */ 2253 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1947 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2254 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1948 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2255 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1949 pcmd += sizeof (uint32_t); 2256 pcmd += sizeof(uint32_t);
2257
2258 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2259 "Issue ACC: did:x%x flg:x%x",
2260 ndlp->nlp_DID, ndlp->nlp_flag, 0);
1950 break; 2261 break;
1951 case ELS_CMD_PLOGI: 2262 case ELS_CMD_PLOGI:
1952 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t)); 2263 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
1953 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2264 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1954 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2265 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
1955 if (!elsiocb) 2266 if (!elsiocb)
1956 return 1; 2267 return 1;
1957 2268
@@ -1963,12 +2274,16 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1963 elsiocb->context_un.mbox = mbox; 2274 elsiocb->context_un.mbox = mbox;
1964 2275
1965 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2276 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1966 pcmd += sizeof (uint32_t); 2277 pcmd += sizeof(uint32_t);
1967 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); 2278 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2279
2280 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2281 "Issue ACC PLOGI: did:x%x flg:x%x",
2282 ndlp->nlp_DID, ndlp->nlp_flag, 0);
1968 break; 2283 break;
1969 case ELS_CMD_PRLO: 2284 case ELS_CMD_PRLO:
1970 cmdsize = sizeof (uint32_t) + sizeof (PRLO); 2285 cmdsize = sizeof(uint32_t) + sizeof(PRLO);
1971 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2286 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
1972 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 2287 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
1973 if (!elsiocb) 2288 if (!elsiocb)
1974 return 1; 2289 return 1;
@@ -1978,10 +2293,14 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1978 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2293 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1979 2294
1980 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 2295 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
1981 sizeof (uint32_t) + sizeof (PRLO)); 2296 sizeof(uint32_t) + sizeof(PRLO));
1982 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 2297 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
1983 els_pkt_ptr = (ELS_PKT *) pcmd; 2298 els_pkt_ptr = (ELS_PKT *) pcmd;
1984 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 2299 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
2300
2301 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2302 "Issue ACC PRLO: did:x%x flg:x%x",
2303 ndlp->nlp_DID, ndlp->nlp_flag, 0);
1985 break; 2304 break;
1986 default: 2305 default:
1987 return 1; 2306 return 1;
@@ -1994,25 +2313,23 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1994 2313
1995 /* Xmit ELS ACC response tag <ulpIoTag> */ 2314 /* Xmit ELS ACC response tag <ulpIoTag> */
1996 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2315 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1997 "%d:0128 Xmit ELS ACC response tag x%x, XRI: x%x, " 2316 "%d (%d):0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
1998 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n", 2317 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
1999 phba->brd_no, elsiocb->iotag, 2318 phba->brd_no, vport->vpi, elsiocb->iotag,
2000 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2319 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2001 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2320 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2002 2321
2003 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 2322 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2004 spin_lock_irq(phba->host->host_lock); 2323 spin_lock_irq(shost->host_lock);
2005 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2324 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2006 spin_unlock_irq(phba->host->host_lock); 2325 spin_unlock_irq(shost->host_lock);
2007 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 2326 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
2008 } else { 2327 } else {
2009 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2328 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2010 } 2329 }
2011 2330
2012 phba->fc_stat.elsXmitACC++; 2331 phba->fc_stat.elsXmitACC++;
2013 spin_lock_irq(phba->host->host_lock);
2014 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2332 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2015 spin_unlock_irq(phba->host->host_lock);
2016 if (rc == IOCB_ERROR) { 2333 if (rc == IOCB_ERROR) {
2017 lpfc_els_free_iocb(phba, elsiocb); 2334 lpfc_els_free_iocb(phba, elsiocb);
2018 return 1; 2335 return 1;
@@ -2021,9 +2338,11 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
2021} 2338}
2022 2339
2023int 2340int
2024lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError, 2341lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
2025 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 2342 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
2343 LPFC_MBOXQ_t *mbox)
2026{ 2344{
2345 struct lpfc_hba *phba = vport->phba;
2027 IOCB_t *icmd; 2346 IOCB_t *icmd;
2028 IOCB_t *oldcmd; 2347 IOCB_t *oldcmd;
2029 struct lpfc_iocbq *elsiocb; 2348 struct lpfc_iocbq *elsiocb;
@@ -2036,9 +2355,9 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2036 psli = &phba->sli; 2355 psli = &phba->sli;
2037 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2356 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2038 2357
2039 cmdsize = 2 * sizeof (uint32_t); 2358 cmdsize = 2 * sizeof(uint32_t);
2040 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2359 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2041 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 2360 ndlp->nlp_DID, ELS_CMD_LS_RJT);
2042 if (!elsiocb) 2361 if (!elsiocb)
2043 return 1; 2362 return 1;
2044 2363
@@ -2048,22 +2367,30 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2048 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2367 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2049 2368
2050 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 2369 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
2051 pcmd += sizeof (uint32_t); 2370 pcmd += sizeof(uint32_t);
2052 *((uint32_t *) (pcmd)) = rejectError; 2371 *((uint32_t *) (pcmd)) = rejectError;
2053 2372
2373 if (mbox) {
2374 elsiocb->context_un.mbox = mbox;
2375 elsiocb->context1 = lpfc_nlp_get(ndlp);
2376 }
2377
2054 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 2378 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
2055 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2379 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2056 "%d:0129 Xmit ELS RJT x%x response tag x%x xri x%x, " 2380 "%d (%d):0129 Xmit ELS RJT x%x response tag x%x "
2057 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2381 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2058 phba->brd_no, rejectError, elsiocb->iotag, 2382 "rpi x%x\n",
2383 phba->brd_no, vport->vpi, rejectError, elsiocb->iotag,
2059 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2384 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2060 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2385 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2061 2386
2387 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2388 "Issue LS_RJT: did:x%x flg:x%x err:x%x",
2389 ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
2390
2062 phba->fc_stat.elsXmitLSRJT++; 2391 phba->fc_stat.elsXmitLSRJT++;
2063 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2392 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2064 spin_lock_irq(phba->host->host_lock);
2065 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2393 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2066 spin_unlock_irq(phba->host->host_lock);
2067 if (rc == IOCB_ERROR) { 2394 if (rc == IOCB_ERROR) {
2068 lpfc_els_free_iocb(phba, elsiocb); 2395 lpfc_els_free_iocb(phba, elsiocb);
2069 return 1; 2396 return 1;
@@ -2072,25 +2399,22 @@ lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
2072} 2399}
2073 2400
2074int 2401int
2075lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba, 2402lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2076 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 2403 struct lpfc_nodelist *ndlp)
2077{ 2404{
2405 struct lpfc_hba *phba = vport->phba;
2406 struct lpfc_sli *psli = &phba->sli;
2407 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2078 ADISC *ap; 2408 ADISC *ap;
2079 IOCB_t *icmd; 2409 IOCB_t *icmd, *oldcmd;
2080 IOCB_t *oldcmd;
2081 struct lpfc_iocbq *elsiocb; 2410 struct lpfc_iocbq *elsiocb;
2082 struct lpfc_sli_ring *pring;
2083 struct lpfc_sli *psli;
2084 uint8_t *pcmd; 2411 uint8_t *pcmd;
2085 uint16_t cmdsize; 2412 uint16_t cmdsize;
2086 int rc; 2413 int rc;
2087 2414
2088 psli = &phba->sli; 2415 cmdsize = sizeof(uint32_t) + sizeof(ADISC);
2089 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2416 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2090 2417 ndlp->nlp_DID, ELS_CMD_ACC);
2091 cmdsize = sizeof (uint32_t) + sizeof (ADISC);
2092 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2093 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2094 if (!elsiocb) 2418 if (!elsiocb)
2095 return 1; 2419 return 1;
2096 2420
@@ -2100,28 +2424,30 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2100 2424
2101 /* Xmit ADISC ACC response tag <ulpIoTag> */ 2425 /* Xmit ADISC ACC response tag <ulpIoTag> */
2102 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2426 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2103 "%d:0130 Xmit ADISC ACC response iotag x%x xri: " 2427 "%d (%d):0130 Xmit ADISC ACC response iotag x%x xri: "
2104 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 2428 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
2105 phba->brd_no, elsiocb->iotag, 2429 phba->brd_no, vport->vpi, elsiocb->iotag,
2106 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2430 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2107 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2431 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2108 2432
2109 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2433 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2110 2434
2111 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2435 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2112 pcmd += sizeof (uint32_t); 2436 pcmd += sizeof(uint32_t);
2113 2437
2114 ap = (ADISC *) (pcmd); 2438 ap = (ADISC *) (pcmd);
2115 ap->hardAL_PA = phba->fc_pref_ALPA; 2439 ap->hardAL_PA = phba->fc_pref_ALPA;
2116 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name)); 2440 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2117 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 2441 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2118 ap->DID = be32_to_cpu(phba->fc_myDID); 2442 ap->DID = be32_to_cpu(vport->fc_myDID);
2443
2444 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2445 "Issue ACC ADISC: did:x%x flg:x%x",
2446 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2119 2447
2120 phba->fc_stat.elsXmitACC++; 2448 phba->fc_stat.elsXmitACC++;
2121 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2449 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2122 spin_lock_irq(phba->host->host_lock);
2123 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2450 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2124 spin_unlock_irq(phba->host->host_lock);
2125 if (rc == IOCB_ERROR) { 2451 if (rc == IOCB_ERROR) {
2126 lpfc_els_free_iocb(phba, elsiocb); 2452 lpfc_els_free_iocb(phba, elsiocb);
2127 return 1; 2453 return 1;
@@ -2130,9 +2456,10 @@ lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
2130} 2456}
2131 2457
2132int 2458int
2133lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb, 2459lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
2134 struct lpfc_nodelist *ndlp) 2460 struct lpfc_nodelist *ndlp)
2135{ 2461{
2462 struct lpfc_hba *phba = vport->phba;
2136 PRLI *npr; 2463 PRLI *npr;
2137 lpfc_vpd_t *vpd; 2464 lpfc_vpd_t *vpd;
2138 IOCB_t *icmd; 2465 IOCB_t *icmd;
@@ -2147,8 +2474,8 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2147 psli = &phba->sli; 2474 psli = &phba->sli;
2148 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 2475 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2149 2476
2150 cmdsize = sizeof (uint32_t) + sizeof (PRLI); 2477 cmdsize = sizeof(uint32_t) + sizeof(PRLI);
2151 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, ndlp, 2478 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2152 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK))); 2479 ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
2153 if (!elsiocb) 2480 if (!elsiocb)
2154 return 1; 2481 return 1;
@@ -2159,19 +2486,19 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2159 2486
2160 /* Xmit PRLI ACC response tag <ulpIoTag> */ 2487 /* Xmit PRLI ACC response tag <ulpIoTag> */
2161 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2488 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2162 "%d:0131 Xmit PRLI ACC response tag x%x xri x%x, " 2489 "%d (%d):0131 Xmit PRLI ACC response tag x%x xri x%x, "
2163 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 2490 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
2164 phba->brd_no, elsiocb->iotag, 2491 phba->brd_no, vport->vpi, elsiocb->iotag,
2165 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 2492 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2166 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 2493 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2167 2494
2168 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2495 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2169 2496
2170 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 2497 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2171 pcmd += sizeof (uint32_t); 2498 pcmd += sizeof(uint32_t);
2172 2499
2173 /* For PRLI, remainder of payload is PRLI parameter page */ 2500 /* For PRLI, remainder of payload is PRLI parameter page */
2174 memset(pcmd, 0, sizeof (PRLI)); 2501 memset(pcmd, 0, sizeof(PRLI));
2175 2502
2176 npr = (PRLI *) pcmd; 2503 npr = (PRLI *) pcmd;
2177 vpd = &phba->vpd; 2504 vpd = &phba->vpd;
@@ -2193,12 +2520,14 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2193 npr->prliType = PRLI_FCP_TYPE; 2520 npr->prliType = PRLI_FCP_TYPE;
2194 npr->initiatorFunc = 1; 2521 npr->initiatorFunc = 1;
2195 2522
2523 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2524 "Issue ACC PRLI: did:x%x flg:x%x",
2525 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2526
2196 phba->fc_stat.elsXmitACC++; 2527 phba->fc_stat.elsXmitACC++;
2197 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2528 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2198 2529
2199 spin_lock_irq(phba->host->host_lock);
2200 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2530 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2201 spin_unlock_irq(phba->host->host_lock);
2202 if (rc == IOCB_ERROR) { 2531 if (rc == IOCB_ERROR) {
2203 lpfc_els_free_iocb(phba, elsiocb); 2532 lpfc_els_free_iocb(phba, elsiocb);
2204 return 1; 2533 return 1;
@@ -2207,12 +2536,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_hba *phba, struct lpfc_iocbq *oldiocb,
2207} 2536}
2208 2537
2209static int 2538static int
2210lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format, 2539lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
2211 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 2540 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2212{ 2541{
2542 struct lpfc_hba *phba = vport->phba;
2213 RNID *rn; 2543 RNID *rn;
2214 IOCB_t *icmd; 2544 IOCB_t *icmd, *oldcmd;
2215 IOCB_t *oldcmd;
2216 struct lpfc_iocbq *elsiocb; 2545 struct lpfc_iocbq *elsiocb;
2217 struct lpfc_sli_ring *pring; 2546 struct lpfc_sli_ring *pring;
2218 struct lpfc_sli *psli; 2547 struct lpfc_sli *psli;
@@ -2223,13 +2552,13 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2223 psli = &phba->sli; 2552 psli = &phba->sli;
2224 pring = &psli->ring[LPFC_ELS_RING]; 2553 pring = &psli->ring[LPFC_ELS_RING];
2225 2554
2226 cmdsize = sizeof (uint32_t) + sizeof (uint32_t) 2555 cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
2227 + (2 * sizeof (struct lpfc_name)); 2556 + (2 * sizeof(struct lpfc_name));
2228 if (format) 2557 if (format)
2229 cmdsize += sizeof (RNID_TOP_DISC); 2558 cmdsize += sizeof(RNID_TOP_DISC);
2230 2559
2231 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, 2560 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2232 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 2561 ndlp->nlp_DID, ELS_CMD_ACC);
2233 if (!elsiocb) 2562 if (!elsiocb)
2234 return 1; 2563 return 1;
2235 2564
@@ -2239,30 +2568,30 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2239 2568
2240 /* Xmit RNID ACC response tag <ulpIoTag> */ 2569 /* Xmit RNID ACC response tag <ulpIoTag> */
2241 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 2570 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2242 "%d:0132 Xmit RNID ACC response tag x%x " 2571 "%d (%d):0132 Xmit RNID ACC response tag x%x "
2243 "xri x%x\n", 2572 "xri x%x\n",
2244 phba->brd_no, elsiocb->iotag, 2573 phba->brd_no, vport->vpi, elsiocb->iotag,
2245 elsiocb->iocb.ulpContext); 2574 elsiocb->iocb.ulpContext);
2246 2575
2247 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2576 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2248 2577
2249 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 2578 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2250 pcmd += sizeof (uint32_t); 2579 pcmd += sizeof(uint32_t);
2251 2580
2252 memset(pcmd, 0, sizeof (RNID)); 2581 memset(pcmd, 0, sizeof(RNID));
2253 rn = (RNID *) (pcmd); 2582 rn = (RNID *) (pcmd);
2254 rn->Format = format; 2583 rn->Format = format;
2255 rn->CommonLen = (2 * sizeof (struct lpfc_name)); 2584 rn->CommonLen = (2 * sizeof(struct lpfc_name));
2256 memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name)); 2585 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2257 memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name)); 2586 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2258 switch (format) { 2587 switch (format) {
2259 case 0: 2588 case 0:
2260 rn->SpecificLen = 0; 2589 rn->SpecificLen = 0;
2261 break; 2590 break;
2262 case RNID_TOPOLOGY_DISC: 2591 case RNID_TOPOLOGY_DISC:
2263 rn->SpecificLen = sizeof (RNID_TOP_DISC); 2592 rn->SpecificLen = sizeof(RNID_TOP_DISC);
2264 memcpy(&rn->un.topologyDisc.portName, 2593 memcpy(&rn->un.topologyDisc.portName,
2265 &phba->fc_portname, sizeof (struct lpfc_name)); 2594 &vport->fc_portname, sizeof(struct lpfc_name));
2266 rn->un.topologyDisc.unitType = RNID_HBA; 2595 rn->un.topologyDisc.unitType = RNID_HBA;
2267 rn->un.topologyDisc.physPort = 0; 2596 rn->un.topologyDisc.physPort = 0;
2268 rn->un.topologyDisc.attachedNodes = 0; 2597 rn->un.topologyDisc.attachedNodes = 0;
@@ -2273,15 +2602,17 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2273 break; 2602 break;
2274 } 2603 }
2275 2604
2605 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
2606 "Issue ACC RNID: did:x%x flg:x%x",
2607 ndlp->nlp_DID, ndlp->nlp_flag, 0);
2608
2276 phba->fc_stat.elsXmitACC++; 2609 phba->fc_stat.elsXmitACC++;
2277 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 2610 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2278 lpfc_nlp_put(ndlp); 2611 lpfc_nlp_put(ndlp);
2279 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, 2612 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2280 * it could be freed */ 2613 * it could be freed */
2281 2614
2282 spin_lock_irq(phba->host->host_lock);
2283 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); 2615 rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
2284 spin_unlock_irq(phba->host->host_lock);
2285 if (rc == IOCB_ERROR) { 2616 if (rc == IOCB_ERROR) {
2286 lpfc_els_free_iocb(phba, elsiocb); 2617 lpfc_els_free_iocb(phba, elsiocb);
2287 return 1; 2618 return 1;
@@ -2290,168 +2621,153 @@ lpfc_els_rsp_rnid_acc(struct lpfc_hba *phba, uint8_t format,
2290} 2621}
2291 2622
2292int 2623int
2293lpfc_els_disc_adisc(struct lpfc_hba *phba) 2624lpfc_els_disc_adisc(struct lpfc_vport *vport)
2294{ 2625{
2295 int sentadisc; 2626 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2296 struct lpfc_nodelist *ndlp, *next_ndlp; 2627 struct lpfc_nodelist *ndlp, *next_ndlp;
2628 int sentadisc = 0;
2297 2629
2298 sentadisc = 0;
2299 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2630 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2300 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { 2631 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2301 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2632 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2302 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2633 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2303 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 2634 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
2304 spin_lock_irq(phba->host->host_lock); 2635 spin_lock_irq(shost->host_lock);
2305 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2636 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2306 spin_unlock_irq(phba->host->host_lock); 2637 spin_unlock_irq(shost->host_lock);
2307 ndlp->nlp_prev_state = ndlp->nlp_state; 2638 ndlp->nlp_prev_state = ndlp->nlp_state;
2308 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 2639 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2309 lpfc_issue_els_adisc(phba, ndlp, 0); 2640 lpfc_issue_els_adisc(vport, ndlp, 0);
2310 sentadisc++; 2641 sentadisc++;
2311 phba->num_disc_nodes++; 2642 vport->num_disc_nodes++;
2312 if (phba->num_disc_nodes >= 2643 if (vport->num_disc_nodes >=
2313 phba->cfg_discovery_threads) { 2644 vport->phba->cfg_discovery_threads) {
2314 spin_lock_irq(phba->host->host_lock); 2645 spin_lock_irq(shost->host_lock);
2315 phba->fc_flag |= FC_NLP_MORE; 2646 vport->fc_flag |= FC_NLP_MORE;
2316 spin_unlock_irq(phba->host->host_lock); 2647 spin_unlock_irq(shost->host_lock);
2317 break; 2648 break;
2318 } 2649 }
2319 } 2650 }
2320 } 2651 }
2321 if (sentadisc == 0) { 2652 if (sentadisc == 0) {
2322 spin_lock_irq(phba->host->host_lock); 2653 spin_lock_irq(shost->host_lock);
2323 phba->fc_flag &= ~FC_NLP_MORE; 2654 vport->fc_flag &= ~FC_NLP_MORE;
2324 spin_unlock_irq(phba->host->host_lock); 2655 spin_unlock_irq(shost->host_lock);
2325 } 2656 }
2326 return sentadisc; 2657 return sentadisc;
2327} 2658}
2328 2659
2329int 2660int
2330lpfc_els_disc_plogi(struct lpfc_hba * phba) 2661lpfc_els_disc_plogi(struct lpfc_vport *vport)
2331{ 2662{
2332 int sentplogi; 2663 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2333 struct lpfc_nodelist *ndlp, *next_ndlp; 2664 struct lpfc_nodelist *ndlp, *next_ndlp;
2665 int sentplogi = 0;
2334 2666
2335 sentplogi = 0; 2667 /* go thru NPR nodes and issue any remaining ELS PLOGIs */
2336 /* go thru NPR list and issue any remaining ELS PLOGIs */ 2668 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2337 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
2338 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 2669 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
2339 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 2670 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
2340 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 2671 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
2341 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 2672 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
2342 ndlp->nlp_prev_state = ndlp->nlp_state; 2673 ndlp->nlp_prev_state = ndlp->nlp_state;
2343 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 2674 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2344 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 2675 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2345 sentplogi++; 2676 sentplogi++;
2346 phba->num_disc_nodes++; 2677 vport->num_disc_nodes++;
2347 if (phba->num_disc_nodes >= 2678 if (vport->num_disc_nodes >=
2348 phba->cfg_discovery_threads) { 2679 vport->phba->cfg_discovery_threads) {
2349 spin_lock_irq(phba->host->host_lock); 2680 spin_lock_irq(shost->host_lock);
2350 phba->fc_flag |= FC_NLP_MORE; 2681 vport->fc_flag |= FC_NLP_MORE;
2351 spin_unlock_irq(phba->host->host_lock); 2682 spin_unlock_irq(shost->host_lock);
2352 break; 2683 break;
2353 } 2684 }
2354 } 2685 }
2355 } 2686 }
2356 if (sentplogi == 0) { 2687 if (sentplogi == 0) {
2357 spin_lock_irq(phba->host->host_lock); 2688 spin_lock_irq(shost->host_lock);
2358 phba->fc_flag &= ~FC_NLP_MORE; 2689 vport->fc_flag &= ~FC_NLP_MORE;
2359 spin_unlock_irq(phba->host->host_lock); 2690 spin_unlock_irq(shost->host_lock);
2360 } 2691 }
2361 return sentplogi; 2692 return sentplogi;
2362} 2693}
2363 2694
2364int 2695void
2365lpfc_els_flush_rscn(struct lpfc_hba * phba) 2696lpfc_els_flush_rscn(struct lpfc_vport *vport)
2366{ 2697{
2367 struct lpfc_dmabuf *mp; 2698 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2699 struct lpfc_hba *phba = vport->phba;
2368 int i; 2700 int i;
2369 2701
2370 for (i = 0; i < phba->fc_rscn_id_cnt; i++) { 2702 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2371 mp = phba->fc_rscn_id_list[i]; 2703 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
2372 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2704 vport->fc_rscn_id_list[i] = NULL;
2373 kfree(mp); 2705 }
2374 phba->fc_rscn_id_list[i] = NULL; 2706 spin_lock_irq(shost->host_lock);
2375 } 2707 vport->fc_rscn_id_cnt = 0;
2376 phba->fc_rscn_id_cnt = 0; 2708 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2377 spin_lock_irq(phba->host->host_lock); 2709 spin_unlock_irq(shost->host_lock);
2378 phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 2710 lpfc_can_disctmo(vport);
2379 spin_unlock_irq(phba->host->host_lock);
2380 lpfc_can_disctmo(phba);
2381 return 0;
2382} 2711}
2383 2712
2384int 2713int
2385lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did) 2714lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
2386{ 2715{
2387 D_ID ns_did; 2716 D_ID ns_did;
2388 D_ID rscn_did; 2717 D_ID rscn_did;
2389 struct lpfc_dmabuf *mp;
2390 uint32_t *lp; 2718 uint32_t *lp;
2391 uint32_t payload_len, cmd, i, match; 2719 uint32_t payload_len, i;
2720 struct lpfc_hba *phba = vport->phba;
2392 2721
2393 ns_did.un.word = did; 2722 ns_did.un.word = did;
2394 match = 0;
2395 2723
2396 /* Never match fabric nodes for RSCNs */ 2724 /* Never match fabric nodes for RSCNs */
2397 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 2725 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
2398 return(0); 2726 return 0;
2399 2727
2400 /* If we are doing a FULL RSCN rediscovery, match everything */ 2728 /* If we are doing a FULL RSCN rediscovery, match everything */
2401 if (phba->fc_flag & FC_RSCN_DISCOVERY) { 2729 if (vport->fc_flag & FC_RSCN_DISCOVERY)
2402 return did; 2730 return did;
2403 }
2404 2731
2405 for (i = 0; i < phba->fc_rscn_id_cnt; i++) { 2732 for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
2406 mp = phba->fc_rscn_id_list[i]; 2733 lp = vport->fc_rscn_id_list[i]->virt;
2407 lp = (uint32_t *) mp->virt; 2734 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2408 cmd = *lp++; 2735 payload_len -= sizeof(uint32_t); /* take off word 0 */
2409 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2410 payload_len -= sizeof (uint32_t); /* take off word 0 */
2411 while (payload_len) { 2736 while (payload_len) {
2412 rscn_did.un.word = *lp++; 2737 rscn_did.un.word = be32_to_cpu(*lp++);
2413 rscn_did.un.word = be32_to_cpu(rscn_did.un.word); 2738 payload_len -= sizeof(uint32_t);
2414 payload_len -= sizeof (uint32_t);
2415 switch (rscn_did.un.b.resv) { 2739 switch (rscn_did.un.b.resv) {
2416 case 0: /* Single N_Port ID effected */ 2740 case 0: /* Single N_Port ID effected */
2417 if (ns_did.un.word == rscn_did.un.word) { 2741 if (ns_did.un.word == rscn_did.un.word)
2418 match = did; 2742 return did;
2419 }
2420 break; 2743 break;
2421 case 1: /* Whole N_Port Area effected */ 2744 case 1: /* Whole N_Port Area effected */
2422 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 2745 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2423 && (ns_did.un.b.area == rscn_did.un.b.area)) 2746 && (ns_did.un.b.area == rscn_did.un.b.area))
2424 { 2747 return did;
2425 match = did;
2426 }
2427 break; 2748 break;
2428 case 2: /* Whole N_Port Domain effected */ 2749 case 2: /* Whole N_Port Domain effected */
2429 if (ns_did.un.b.domain == rscn_did.un.b.domain) 2750 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2430 { 2751 return did;
2431 match = did;
2432 }
2433 break;
2434 case 3: /* Whole Fabric effected */
2435 match = did;
2436 break; 2752 break;
2437 default: 2753 default:
2438 /* Unknown Identifier in RSCN list */ 2754 /* Unknown Identifier in RSCN node */
2439 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2755 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2440 "%d:0217 Unknown Identifier in " 2756 "%d (%d):0217 Unknown "
2441 "RSCN payload Data: x%x\n", 2757 "Identifier in RSCN payload "
2442 phba->brd_no, rscn_did.un.word); 2758 "Data: x%x\n",
2443 break; 2759 phba->brd_no, vport->vpi,
2444 } 2760 rscn_did.un.word);
2445 if (match) { 2761 case 3: /* Whole Fabric effected */
2446 break; 2762 return did;
2447 } 2763 }
2448 } 2764 }
2449 } 2765 }
2450 return match; 2766 return 0;
2451} 2767}
2452 2768
2453static int 2769static int
2454lpfc_rscn_recovery_check(struct lpfc_hba *phba) 2770lpfc_rscn_recovery_check(struct lpfc_vport *vport)
2455{ 2771{
2456 struct lpfc_nodelist *ndlp = NULL; 2772 struct lpfc_nodelist *ndlp = NULL;
2457 2773
@@ -2459,188 +2775,261 @@ lpfc_rscn_recovery_check(struct lpfc_hba *phba)
2459 * them to NPR state. 2775 * them to NPR state.
2460 */ 2776 */
2461 2777
2462 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2778 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2463 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE || 2779 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE ||
2464 lpfc_rscn_payload_check(phba, ndlp->nlp_DID) == 0) 2780 lpfc_rscn_payload_check(vport, ndlp->nlp_DID) == 0)
2465 continue; 2781 continue;
2466 2782
2467 lpfc_disc_state_machine(phba, ndlp, NULL, 2783 lpfc_disc_state_machine(vport, ndlp, NULL,
2468 NLP_EVT_DEVICE_RECOVERY); 2784 NLP_EVT_DEVICE_RECOVERY);
2469 2785
2470 /* 2786 /*
2471 * Make sure NLP_DELAY_TMO is NOT running after a device 2787 * Make sure NLP_DELAY_TMO is NOT running after a device
2472 * recovery event. 2788 * recovery event.
2473 */ 2789 */
2474 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2790 if (ndlp->nlp_flag & NLP_DELAY_TMO)
2475 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2791 lpfc_cancel_retry_delay_tmo(vport, ndlp);
2476 } 2792 }
2477 2793
2478 return 0; 2794 return 0;
2479} 2795}
2480 2796
2481static int 2797static int
2482lpfc_els_rcv_rscn(struct lpfc_hba * phba, 2798lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2483 struct lpfc_iocbq * cmdiocb, 2799 struct lpfc_nodelist *ndlp, uint8_t newnode)
2484 struct lpfc_nodelist * ndlp, uint8_t newnode)
2485{ 2800{
2801 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2802 struct lpfc_hba *phba = vport->phba;
2486 struct lpfc_dmabuf *pcmd; 2803 struct lpfc_dmabuf *pcmd;
2487 uint32_t *lp; 2804 struct lpfc_vport *next_vport;
2805 uint32_t *lp, *datap;
2488 IOCB_t *icmd; 2806 IOCB_t *icmd;
2489 uint32_t payload_len, cmd; 2807 uint32_t payload_len, length, nportid, *cmd;
2808 int rscn_cnt = vport->fc_rscn_id_cnt;
2809 int rscn_id = 0, hba_id = 0;
2490 int i; 2810 int i;
2491 2811
2492 icmd = &cmdiocb->iocb; 2812 icmd = &cmdiocb->iocb;
2493 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 2813 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2494 lp = (uint32_t *) pcmd->virt; 2814 lp = (uint32_t *) pcmd->virt;
2495 2815
2496 cmd = *lp++; 2816 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
2497 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */ 2817 payload_len -= sizeof(uint32_t); /* take off word 0 */
2498 payload_len -= sizeof (uint32_t); /* take off word 0 */
2499 cmd &= ELS_CMD_MASK;
2500 2818
2501 /* RSCN received */ 2819 /* RSCN received */
2502 lpfc_printf_log(phba, 2820 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2503 KERN_INFO, 2821 "%d (%d):0214 RSCN received Data: x%x x%x x%x x%x\n",
2504 LOG_DISCOVERY, 2822 phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
2505 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n", 2823 *lp, rscn_cnt);
2506 phba->brd_no,
2507 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2508 2824
2509 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 2825 for (i = 0; i < payload_len/sizeof(uint32_t); i++)
2510 fc_host_post_event(phba->host, fc_get_event_number(), 2826 fc_host_post_event(shost, fc_get_event_number(),
2511 FCH_EVT_RSCN, lp[i]); 2827 FCH_EVT_RSCN, lp[i]);
2512 2828
2513 /* If we are about to begin discovery, just ACC the RSCN. 2829 /* If we are about to begin discovery, just ACC the RSCN.
2514 * Discovery processing will satisfy it. 2830 * Discovery processing will satisfy it.
2515 */ 2831 */
2516 if (phba->hba_state <= LPFC_NS_QRY) { 2832 if (vport->port_state <= LPFC_NS_QRY) {
2517 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2833 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2518 newnode); 2834 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
2835 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2836
2837 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2838 newnode);
2519 return 0; 2839 return 0;
2520 } 2840 }
2521 2841
2842 /* If this RSCN just contains NPortIDs for other vports on this HBA,
2843 * just ACC and ignore it.
2844 */
2845 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2846 !(phba->cfg_peer_port_login)) {
2847 i = payload_len;
2848 datap = lp;
2849 while (i > 0) {
2850 nportid = *datap++;
2851 nportid = ((be32_to_cpu(nportid)) & Mask_DID);
2852 i -= sizeof(uint32_t);
2853 rscn_id++;
2854 list_for_each_entry(next_vport, &phba->port_list,
2855 listentry) {
2856 if (nportid == next_vport->fc_myDID) {
2857 hba_id++;
2858 break;
2859 }
2860 }
2861 }
2862 if (rscn_id == hba_id) {
2863 /* ALL NPortIDs in RSCN are on HBA */
2864 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2865 "%d (%d):0214 Ignore RSCN Data: x%x x%x x%x x%x\n",
2866 phba->brd_no, vport->vpi, vport->fc_flag, payload_len,
2867 *lp, rscn_cnt);
2868
2869 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2870 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x",
2871 ndlp->nlp_DID, vport->port_state,
2872 ndlp->nlp_flag);
2873
2874 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
2875 ndlp, NULL, newnode);
2876 return 0;
2877 }
2878 }
2879
2522 /* If we are already processing an RSCN, save the received 2880 /* If we are already processing an RSCN, save the received
2523 * RSCN payload buffer, cmdiocb->context2 to process later. 2881 * RSCN payload buffer, cmdiocb->context2 to process later.
2524 */ 2882 */
2525 if (phba->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 2883 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
2526 if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) && 2884 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2527 !(phba->fc_flag & FC_RSCN_DISCOVERY)) { 2885 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x",
2528 spin_lock_irq(phba->host->host_lock); 2886 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2529 phba->fc_flag |= FC_RSCN_MODE; 2887
2530 spin_unlock_irq(phba->host->host_lock); 2888 vport->fc_flag |= FC_RSCN_DEFERRED;
2531 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd; 2889 if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
2532 2890 !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
2533 /* If we zero, cmdiocb->context2, the calling 2891 spin_lock_irq(shost->host_lock);
2534 * routine will not try to free it. 2892 vport->fc_flag |= FC_RSCN_MODE;
2535 */ 2893 spin_unlock_irq(shost->host_lock);
2536 cmdiocb->context2 = NULL; 2894 if (rscn_cnt) {
2895 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
2896 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
2897 }
2898 if ((rscn_cnt) &&
2899 (payload_len + length <= LPFC_BPL_SIZE)) {
2900 *cmd &= ELS_CMD_MASK;
2901 *cmd |= be32_to_cpu(payload_len + length);
2902 memcpy(((uint8_t *)cmd) + length, lp,
2903 payload_len);
2904 } else {
2905 vport->fc_rscn_id_list[rscn_cnt] = pcmd;
2906 vport->fc_rscn_id_cnt++;
2907 /* If we zero, cmdiocb->context2, the calling
2908 * routine will not try to free it.
2909 */
2910 cmdiocb->context2 = NULL;
2911 }
2537 2912
2538 /* Deferred RSCN */ 2913 /* Deferred RSCN */
2539 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2914 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2540 "%d:0235 Deferred RSCN " 2915 "%d (%d):0235 Deferred RSCN "
2541 "Data: x%x x%x x%x\n", 2916 "Data: x%x x%x x%x\n",
2542 phba->brd_no, phba->fc_rscn_id_cnt, 2917 phba->brd_no, vport->vpi,
2543 phba->fc_flag, phba->hba_state); 2918 vport->fc_rscn_id_cnt, vport->fc_flag,
2919 vport->port_state);
2544 } else { 2920 } else {
2545 spin_lock_irq(phba->host->host_lock); 2921 spin_lock_irq(shost->host_lock);
2546 phba->fc_flag |= FC_RSCN_DISCOVERY; 2922 vport->fc_flag |= FC_RSCN_DISCOVERY;
2547 spin_unlock_irq(phba->host->host_lock); 2923 spin_unlock_irq(shost->host_lock);
2548 /* ReDiscovery RSCN */ 2924 /* ReDiscovery RSCN */
2549 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2925 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2550 "%d:0234 ReDiscovery RSCN " 2926 "%d (%d):0234 ReDiscovery RSCN "
2551 "Data: x%x x%x x%x\n", 2927 "Data: x%x x%x x%x\n",
2552 phba->brd_no, phba->fc_rscn_id_cnt, 2928 phba->brd_no, vport->vpi,
2553 phba->fc_flag, phba->hba_state); 2929 vport->fc_rscn_id_cnt, vport->fc_flag,
2930 vport->port_state);
2554 } 2931 }
2555 /* Send back ACC */ 2932 /* Send back ACC */
2556 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 2933 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2557 newnode); 2934 newnode);
2558 2935
2559 /* send RECOVERY event for ALL nodes that match RSCN payload */ 2936 /* send RECOVERY event for ALL nodes that match RSCN payload */
2560 lpfc_rscn_recovery_check(phba); 2937 lpfc_rscn_recovery_check(vport);
2938 vport->fc_flag &= ~FC_RSCN_DEFERRED;
2561 return 0; 2939 return 0;
2562 } 2940 }
2563 2941
2564 phba->fc_flag |= FC_RSCN_MODE; 2942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
2565 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd; 2943 "RCV RSCN: did:x%x/ste:x%x flg:x%x",
2944 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
2945
2946 spin_lock_irq(shost->host_lock);
2947 vport->fc_flag |= FC_RSCN_MODE;
2948 spin_unlock_irq(shost->host_lock);
2949 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
2566 /* 2950 /*
2567 * If we zero, cmdiocb->context2, the calling routine will 2951 * If we zero, cmdiocb->context2, the calling routine will
2568 * not try to free it. 2952 * not try to free it.
2569 */ 2953 */
2570 cmdiocb->context2 = NULL; 2954 cmdiocb->context2 = NULL;
2571 2955
2572 lpfc_set_disctmo(phba); 2956 lpfc_set_disctmo(vport);
2573 2957
2574 /* Send back ACC */ 2958 /* Send back ACC */
2575 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); 2959 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
2576 2960
2577 /* send RECOVERY event for ALL nodes that match RSCN payload */ 2961 /* send RECOVERY event for ALL nodes that match RSCN payload */
2578 lpfc_rscn_recovery_check(phba); 2962 lpfc_rscn_recovery_check(vport);
2579 2963
2580 return lpfc_els_handle_rscn(phba); 2964 return lpfc_els_handle_rscn(vport);
2581} 2965}
2582 2966
2583int 2967int
2584lpfc_els_handle_rscn(struct lpfc_hba * phba) 2968lpfc_els_handle_rscn(struct lpfc_vport *vport)
2585{ 2969{
2586 struct lpfc_nodelist *ndlp; 2970 struct lpfc_nodelist *ndlp;
2971 struct lpfc_hba *phba = vport->phba;
2972
2973 /* Ignore RSCN if the port is being torn down. */
2974 if (vport->load_flag & FC_UNLOADING) {
2975 lpfc_els_flush_rscn(vport);
2976 return 0;
2977 }
2587 2978
2588 /* Start timer for RSCN processing */ 2979 /* Start timer for RSCN processing */
2589 lpfc_set_disctmo(phba); 2980 lpfc_set_disctmo(vport);
2590 2981
2591 /* RSCN processed */ 2982 /* RSCN processed */
2592 lpfc_printf_log(phba, 2983 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2593 KERN_INFO, 2984 "%d (%d):0215 RSCN processed Data: x%x x%x x%x x%x\n",
2594 LOG_DISCOVERY, 2985 phba->brd_no, vport->vpi,
2595 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n", 2986 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
2596 phba->brd_no, 2987 vport->port_state);
2597 phba->fc_flag, 0, phba->fc_rscn_id_cnt,
2598 phba->hba_state);
2599 2988
2600 /* To process RSCN, first compare RSCN data with NameServer */ 2989 /* To process RSCN, first compare RSCN data with NameServer */
2601 phba->fc_ns_retry = 0; 2990 vport->fc_ns_retry = 0;
2602 ndlp = lpfc_findnode_did(phba, NameServer_DID); 2991 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2603 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2992 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
2604 /* Good ndlp, issue CT Request to NameServer */ 2993 /* Good ndlp, issue CT Request to NameServer */
2605 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) { 2994 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
2606 /* Wait for NameServer query cmpl before we can 2995 /* Wait for NameServer query cmpl before we can
2607 continue */ 2996 continue */
2608 return 1; 2997 return 1;
2609 }
2610 } else { 2998 } else {
2611 /* If login to NameServer does not exist, issue one */ 2999 /* If login to NameServer does not exist, issue one */
2612 /* Good status, issue PLOGI to NameServer */ 3000 /* Good status, issue PLOGI to NameServer */
2613 ndlp = lpfc_findnode_did(phba, NameServer_DID); 3001 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2614 if (ndlp) { 3002 if (ndlp)
2615 /* Wait for NameServer login cmpl before we can 3003 /* Wait for NameServer login cmpl before we can
2616 continue */ 3004 continue */
2617 return 1; 3005 return 1;
2618 } 3006
2619 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3007 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2620 if (!ndlp) { 3008 if (!ndlp) {
2621 lpfc_els_flush_rscn(phba); 3009 lpfc_els_flush_rscn(vport);
2622 return 0; 3010 return 0;
2623 } else { 3011 } else {
2624 lpfc_nlp_init(phba, ndlp, NameServer_DID); 3012 lpfc_nlp_init(vport, ndlp, NameServer_DID);
2625 ndlp->nlp_type |= NLP_FABRIC; 3013 ndlp->nlp_type |= NLP_FABRIC;
2626 ndlp->nlp_prev_state = ndlp->nlp_state; 3014 ndlp->nlp_prev_state = ndlp->nlp_state;
2627 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 3015 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2628 lpfc_issue_els_plogi(phba, NameServer_DID, 0); 3016 lpfc_issue_els_plogi(vport, NameServer_DID, 0);
2629 /* Wait for NameServer login cmpl before we can 3017 /* Wait for NameServer login cmpl before we can
2630 continue */ 3018 continue */
2631 return 1; 3019 return 1;
2632 } 3020 }
2633 } 3021 }
2634 3022
2635 lpfc_els_flush_rscn(phba); 3023 lpfc_els_flush_rscn(vport);
2636 return 0; 3024 return 0;
2637} 3025}
2638 3026
2639static int 3027static int
2640lpfc_els_rcv_flogi(struct lpfc_hba * phba, 3028lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2641 struct lpfc_iocbq * cmdiocb, 3029 struct lpfc_nodelist *ndlp, uint8_t newnode)
2642 struct lpfc_nodelist * ndlp, uint8_t newnode)
2643{ 3030{
3031 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3032 struct lpfc_hba *phba = vport->phba;
2644 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3033 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2645 uint32_t *lp = (uint32_t *) pcmd->virt; 3034 uint32_t *lp = (uint32_t *) pcmd->virt;
2646 IOCB_t *icmd = &cmdiocb->iocb; 3035 IOCB_t *icmd = &cmdiocb->iocb;
@@ -2655,7 +3044,7 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2655 3044
2656 /* FLOGI received */ 3045 /* FLOGI received */
2657 3046
2658 lpfc_set_disctmo(phba); 3047 lpfc_set_disctmo(vport);
2659 3048
2660 if (phba->fc_topology == TOPOLOGY_LOOP) { 3049 if (phba->fc_topology == TOPOLOGY_LOOP) {
2661 /* We should never receive a FLOGI in loop mode, ignore it */ 3050 /* We should never receive a FLOGI in loop mode, ignore it */
@@ -2664,33 +3053,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2664 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 3053 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2665 Loop Mode */ 3054 Loop Mode */
2666 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3055 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2667 "%d:0113 An FLOGI ELS command x%x was received " 3056 "%d (%d):0113 An FLOGI ELS command x%x was "
2668 "from DID x%x in Loop Mode\n", 3057 "received from DID x%x in Loop Mode\n",
2669 phba->brd_no, cmd, did); 3058 phba->brd_no, vport->vpi, cmd, did);
2670 return 1; 3059 return 1;
2671 } 3060 }
2672 3061
2673 did = Fabric_DID; 3062 did = Fabric_DID;
2674 3063
2675 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) { 3064 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3))) {
2676 /* For a FLOGI we accept, then if our portname is greater 3065 /* For a FLOGI we accept, then if our portname is greater
2677 * then the remote portname we initiate Nport login. 3066 * then the remote portname we initiate Nport login.
2678 */ 3067 */
2679 3068
2680 rc = memcmp(&phba->fc_portname, &sp->portName, 3069 rc = memcmp(&vport->fc_portname, &sp->portName,
2681 sizeof (struct lpfc_name)); 3070 sizeof(struct lpfc_name));
2682 3071
2683 if (!rc) { 3072 if (!rc) {
2684 if ((mbox = mempool_alloc(phba->mbox_mem_pool, 3073 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2685 GFP_KERNEL)) == 0) { 3074 if (!mbox)
2686 return 1; 3075 return 1;
2687 } 3076
2688 lpfc_linkdown(phba); 3077 lpfc_linkdown(phba);
2689 lpfc_init_link(phba, mbox, 3078 lpfc_init_link(phba, mbox,
2690 phba->cfg_topology, 3079 phba->cfg_topology,
2691 phba->cfg_link_speed); 3080 phba->cfg_link_speed);
2692 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 3081 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2693 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3082 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3083 mbox->vport = vport;
2694 rc = lpfc_sli_issue_mbox 3084 rc = lpfc_sli_issue_mbox
2695 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 3085 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
2696 lpfc_set_loopback_flag(phba); 3086 lpfc_set_loopback_flag(phba);
@@ -2699,31 +3089,34 @@ lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2699 } 3089 }
2700 return 1; 3090 return 1;
2701 } else if (rc > 0) { /* greater than */ 3091 } else if (rc > 0) { /* greater than */
2702 spin_lock_irq(phba->host->host_lock); 3092 spin_lock_irq(shost->host_lock);
2703 phba->fc_flag |= FC_PT2PT_PLOGI; 3093 vport->fc_flag |= FC_PT2PT_PLOGI;
2704 spin_unlock_irq(phba->host->host_lock); 3094 spin_unlock_irq(shost->host_lock);
2705 } 3095 }
2706 phba->fc_flag |= FC_PT2PT; 3096 spin_lock_irq(shost->host_lock);
2707 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 3097 vport->fc_flag |= FC_PT2PT;
3098 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3099 spin_unlock_irq(shost->host_lock);
2708 } else { 3100 } else {
2709 /* Reject this request because invalid parameters */ 3101 /* Reject this request because invalid parameters */
2710 stat.un.b.lsRjtRsvd0 = 0; 3102 stat.un.b.lsRjtRsvd0 = 0;
2711 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3103 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2712 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 3104 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
2713 stat.un.b.vendorUnique = 0; 3105 stat.un.b.vendorUnique = 0;
2714 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3106 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3107 NULL);
2715 return 1; 3108 return 1;
2716 } 3109 }
2717 3110
2718 /* Send back ACC */ 3111 /* Send back ACC */
2719 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode); 3112 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
2720 3113
2721 return 0; 3114 return 0;
2722} 3115}
2723 3116
2724static int 3117static int
2725lpfc_els_rcv_rnid(struct lpfc_hba * phba, 3118lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2726 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) 3119 struct lpfc_nodelist *ndlp)
2727{ 3120{
2728 struct lpfc_dmabuf *pcmd; 3121 struct lpfc_dmabuf *pcmd;
2729 uint32_t *lp; 3122 uint32_t *lp;
@@ -2746,7 +3139,7 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2746 case 0: 3139 case 0:
2747 case RNID_TOPOLOGY_DISC: 3140 case RNID_TOPOLOGY_DISC:
2748 /* Send back ACC */ 3141 /* Send back ACC */
2749 lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp); 3142 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
2750 break; 3143 break;
2751 default: 3144 default:
2752 /* Reject this request because format not supported */ 3145 /* Reject this request because format not supported */
@@ -2754,14 +3147,15 @@ lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2754 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3147 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2755 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3148 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2756 stat.un.b.vendorUnique = 0; 3149 stat.un.b.vendorUnique = 0;
2757 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3150 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3151 NULL);
2758 } 3152 }
2759 return 0; 3153 return 0;
2760} 3154}
2761 3155
2762static int 3156static int
2763lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3157lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2764 struct lpfc_nodelist *ndlp) 3158 struct lpfc_nodelist *ndlp)
2765{ 3159{
2766 struct ls_rjt stat; 3160 struct ls_rjt stat;
2767 3161
@@ -2770,15 +3164,15 @@ lpfc_els_rcv_lirr(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2770 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3164 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2771 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3165 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2772 stat.un.b.vendorUnique = 0; 3166 stat.un.b.vendorUnique = 0;
2773 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3167 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2774 return 0; 3168 return 0;
2775} 3169}
2776 3170
2777static void 3171static void
2778lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3172lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2779{ 3173{
2780 struct lpfc_sli *psli; 3174 struct lpfc_sli *psli = &phba->sli;
2781 struct lpfc_sli_ring *pring; 3175 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2782 MAILBOX_t *mb; 3176 MAILBOX_t *mb;
2783 IOCB_t *icmd; 3177 IOCB_t *icmd;
2784 RPS_RSP *rps_rsp; 3178 RPS_RSP *rps_rsp;
@@ -2788,8 +3182,6 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2788 uint16_t xri, status; 3182 uint16_t xri, status;
2789 uint32_t cmdsize; 3183 uint32_t cmdsize;
2790 3184
2791 psli = &phba->sli;
2792 pring = &psli->ring[LPFC_ELS_RING];
2793 mb = &pmb->mb; 3185 mb = &pmb->mb;
2794 3186
2795 ndlp = (struct lpfc_nodelist *) pmb->context2; 3187 ndlp = (struct lpfc_nodelist *) pmb->context2;
@@ -2804,8 +3196,9 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2804 3196
2805 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 3197 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
2806 mempool_free(pmb, phba->mbox_mem_pool); 3198 mempool_free(pmb, phba->mbox_mem_pool);
2807 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, lpfc_max_els_tries, ndlp, 3199 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
2808 ndlp->nlp_DID, ELS_CMD_ACC); 3200 lpfc_max_els_tries, ndlp,
3201 ndlp->nlp_DID, ELS_CMD_ACC);
2809 lpfc_nlp_put(ndlp); 3202 lpfc_nlp_put(ndlp);
2810 if (!elsiocb) 3203 if (!elsiocb)
2811 return; 3204 return;
@@ -2815,14 +3208,14 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2815 3208
2816 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3209 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2817 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3210 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2818 pcmd += sizeof (uint32_t); /* Skip past command */ 3211 pcmd += sizeof(uint32_t); /* Skip past command */
2819 rps_rsp = (RPS_RSP *)pcmd; 3212 rps_rsp = (RPS_RSP *)pcmd;
2820 3213
2821 if (phba->fc_topology != TOPOLOGY_LOOP) 3214 if (phba->fc_topology != TOPOLOGY_LOOP)
2822 status = 0x10; 3215 status = 0x10;
2823 else 3216 else
2824 status = 0x8; 3217 status = 0x8;
2825 if (phba->fc_flag & FC_FABRIC) 3218 if (phba->pport->fc_flag & FC_FABRIC)
2826 status |= 0x4; 3219 status |= 0x4;
2827 3220
2828 rps_rsp->rsvd1 = 0; 3221 rps_rsp->rsvd1 = 0;
@@ -2836,25 +3229,25 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2836 3229
2837 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 3230 /* Xmit ELS RPS ACC response tag <ulpIoTag> */
2838 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3231 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2839 "%d:0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 3232 "%d (%d):0118 Xmit ELS RPS ACC response tag x%x "
2840 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3233 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2841 phba->brd_no, elsiocb->iotag, 3234 "rpi x%x\n",
3235 phba->brd_no, ndlp->vport->vpi, elsiocb->iotag,
2842 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3236 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2843 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3237 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2844 3238
2845 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 3239 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2846 phba->fc_stat.elsXmitACC++; 3240 phba->fc_stat.elsXmitACC++;
2847 3241 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
2848 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2849 lpfc_els_free_iocb(phba, elsiocb); 3242 lpfc_els_free_iocb(phba, elsiocb);
2850 }
2851 return; 3243 return;
2852} 3244}
2853 3245
2854static int 3246static int
2855lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3247lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2856 struct lpfc_nodelist * ndlp) 3248 struct lpfc_nodelist *ndlp)
2857{ 3249{
3250 struct lpfc_hba *phba = vport->phba;
2858 uint32_t *lp; 3251 uint32_t *lp;
2859 uint8_t flag; 3252 uint8_t flag;
2860 LPFC_MBOXQ_t *mbox; 3253 LPFC_MBOXQ_t *mbox;
@@ -2868,7 +3261,8 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2868 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3261 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2869 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3262 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2870 stat.un.b.vendorUnique = 0; 3263 stat.un.b.vendorUnique = 0;
2871 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3264 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3265 NULL);
2872 } 3266 }
2873 3267
2874 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3268 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2878,19 +3272,24 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2878 3272
2879 if ((flag == 0) || 3273 if ((flag == 0) ||
2880 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 3274 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
2881 ((flag == 2) && (memcmp(&rps->un.portName, &phba->fc_portname, 3275 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
2882 sizeof (struct lpfc_name)) == 0))) { 3276 sizeof(struct lpfc_name)) == 0))) {
2883 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) { 3277
3278 printk("Fix me....\n");
3279 dump_stack();
3280 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
3281 if (mbox) {
2884 lpfc_read_lnk_stat(phba, mbox); 3282 lpfc_read_lnk_stat(phba, mbox);
2885 mbox->context1 = 3283 mbox->context1 =
2886 (void *)((unsigned long)cmdiocb->iocb.ulpContext); 3284 (void *)((unsigned long) cmdiocb->iocb.ulpContext);
2887 mbox->context2 = lpfc_nlp_get(ndlp); 3285 mbox->context2 = lpfc_nlp_get(ndlp);
3286 mbox->vport = vport;
2888 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 3287 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
2889 if (lpfc_sli_issue_mbox (phba, mbox, 3288 if (lpfc_sli_issue_mbox (phba, mbox,
2890 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED) { 3289 (MBX_NOWAIT | MBX_STOP_IOCB)) != MBX_NOT_FINISHED)
2891 /* Mbox completion will send ELS Response */ 3290 /* Mbox completion will send ELS Response */
2892 return 0; 3291 return 0;
2893 } 3292
2894 lpfc_nlp_put(ndlp); 3293 lpfc_nlp_put(ndlp);
2895 mempool_free(mbox, phba->mbox_mem_pool); 3294 mempool_free(mbox, phba->mbox_mem_pool);
2896 } 3295 }
@@ -2899,27 +3298,25 @@ lpfc_els_rcv_rps(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2899 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3298 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2900 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3299 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2901 stat.un.b.vendorUnique = 0; 3300 stat.un.b.vendorUnique = 0;
2902 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3301 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2903 return 0; 3302 return 0;
2904} 3303}
2905 3304
2906static int 3305static int
2907lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize, 3306lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
2908 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp) 3307 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
2909{ 3308{
2910 IOCB_t *icmd; 3309 struct lpfc_hba *phba = vport->phba;
2911 IOCB_t *oldcmd; 3310 IOCB_t *icmd, *oldcmd;
2912 RPL_RSP rpl_rsp; 3311 RPL_RSP rpl_rsp;
2913 struct lpfc_iocbq *elsiocb; 3312 struct lpfc_iocbq *elsiocb;
2914 struct lpfc_sli_ring *pring; 3313 struct lpfc_sli *psli = &phba->sli;
2915 struct lpfc_sli *psli; 3314 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2916 uint8_t *pcmd; 3315 uint8_t *pcmd;
2917 3316
2918 psli = &phba->sli; 3317 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
2919 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ 3318 ndlp->nlp_DID, ELS_CMD_ACC);
2920 3319
2921 elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2922 ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
2923 if (!elsiocb) 3320 if (!elsiocb)
2924 return 1; 3321 return 1;
2925 3322
@@ -2929,7 +3326,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2929 3326
2930 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3327 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2931 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 3328 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2932 pcmd += sizeof (uint16_t); 3329 pcmd += sizeof(uint16_t);
2933 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 3330 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
2934 pcmd += sizeof(uint16_t); 3331 pcmd += sizeof(uint16_t);
2935 3332
@@ -2937,8 +3334,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2937 rpl_rsp.listLen = be32_to_cpu(1); 3334 rpl_rsp.listLen = be32_to_cpu(1);
2938 rpl_rsp.index = 0; 3335 rpl_rsp.index = 0;
2939 rpl_rsp.port_num_blk.portNum = 0; 3336 rpl_rsp.port_num_blk.portNum = 0;
2940 rpl_rsp.port_num_blk.portID = be32_to_cpu(phba->fc_myDID); 3337 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
2941 memcpy(&rpl_rsp.port_num_blk.portName, &phba->fc_portname, 3338 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
2942 sizeof(struct lpfc_name)); 3339 sizeof(struct lpfc_name));
2943 3340
2944 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 3341 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
@@ -2946,13 +3343,14 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2946 3343
2947 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 3344 /* Xmit ELS RPL ACC response tag <ulpIoTag> */
2948 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3345 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2949 "%d:0120 Xmit ELS RPL ACC response tag x%x xri x%x, " 3346 "%d (%d):0120 Xmit ELS RPL ACC response tag x%x "
2950 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 3347 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
2951 phba->brd_no, elsiocb->iotag, 3348 "rpi x%x\n",
3349 phba->brd_no, vport->vpi, elsiocb->iotag,
2952 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 3350 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2953 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 3351 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2954 3352
2955 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc; 3353 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
2956 3354
2957 phba->fc_stat.elsXmitACC++; 3355 phba->fc_stat.elsXmitACC++;
2958 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { 3356 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
@@ -2963,8 +3361,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
2963} 3361}
2964 3362
2965static int 3363static int
2966lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3364lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
2967 struct lpfc_nodelist * ndlp) 3365 struct lpfc_nodelist *ndlp)
2968{ 3366{
2969 struct lpfc_dmabuf *pcmd; 3367 struct lpfc_dmabuf *pcmd;
2970 uint32_t *lp; 3368 uint32_t *lp;
@@ -2979,7 +3377,8 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2979 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3377 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2980 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 3378 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2981 stat.un.b.vendorUnique = 0; 3379 stat.un.b.vendorUnique = 0;
2982 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 3380 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
3381 NULL);
2983 } 3382 }
2984 3383
2985 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3384 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
@@ -2996,15 +3395,16 @@ lpfc_els_rcv_rpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2996 } else { 3395 } else {
2997 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 3396 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
2998 } 3397 }
2999 lpfc_els_rsp_rpl_acc(phba, cmdsize, cmdiocb, ndlp); 3398 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
3000 3399
3001 return 0; 3400 return 0;
3002} 3401}
3003 3402
3004static int 3403static int
3005lpfc_els_rcv_farp(struct lpfc_hba * phba, 3404lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3006 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) 3405 struct lpfc_nodelist *ndlp)
3007{ 3406{
3407 struct lpfc_hba *phba = vport->phba;
3008 struct lpfc_dmabuf *pcmd; 3408 struct lpfc_dmabuf *pcmd;
3009 uint32_t *lp; 3409 uint32_t *lp;
3010 IOCB_t *icmd; 3410 IOCB_t *icmd;
@@ -3020,11 +3420,9 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3020 fp = (FARP *) lp; 3420 fp = (FARP *) lp;
3021 3421
3022 /* FARP-REQ received from DID <did> */ 3422 /* FARP-REQ received from DID <did> */
3023 lpfc_printf_log(phba, 3423 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3024 KERN_INFO, 3424 "%d (%d):0601 FARP-REQ received from DID x%x\n",
3025 LOG_ELS, 3425 phba->brd_no, vport->vpi, did);
3026 "%d:0601 FARP-REQ received from DID x%x\n",
3027 phba->brd_no, did);
3028 3426
3029 /* We will only support match on WWPN or WWNN */ 3427 /* We will only support match on WWPN or WWNN */
3030 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 3428 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
@@ -3034,15 +3432,15 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3034 cnt = 0; 3432 cnt = 0;
3035 /* If this FARP command is searching for my portname */ 3433 /* If this FARP command is searching for my portname */
3036 if (fp->Mflags & FARP_MATCH_PORT) { 3434 if (fp->Mflags & FARP_MATCH_PORT) {
3037 if (memcmp(&fp->RportName, &phba->fc_portname, 3435 if (memcmp(&fp->RportName, &vport->fc_portname,
3038 sizeof (struct lpfc_name)) == 0) 3436 sizeof(struct lpfc_name)) == 0)
3039 cnt = 1; 3437 cnt = 1;
3040 } 3438 }
3041 3439
3042 /* If this FARP command is searching for my nodename */ 3440 /* If this FARP command is searching for my nodename */
3043 if (fp->Mflags & FARP_MATCH_NODE) { 3441 if (fp->Mflags & FARP_MATCH_NODE) {
3044 if (memcmp(&fp->RnodeName, &phba->fc_nodename, 3442 if (memcmp(&fp->RnodeName, &vport->fc_nodename,
3045 sizeof (struct lpfc_name)) == 0) 3443 sizeof(struct lpfc_name)) == 0)
3046 cnt = 1; 3444 cnt = 1;
3047 } 3445 }
3048 3446
@@ -3052,28 +3450,28 @@ lpfc_els_rcv_farp(struct lpfc_hba * phba,
3052 /* Log back into the node before sending the FARP. */ 3450 /* Log back into the node before sending the FARP. */
3053 if (fp->Rflags & FARP_REQUEST_PLOGI) { 3451 if (fp->Rflags & FARP_REQUEST_PLOGI) {
3054 ndlp->nlp_prev_state = ndlp->nlp_state; 3452 ndlp->nlp_prev_state = ndlp->nlp_state;
3055 lpfc_nlp_set_state(phba, ndlp, 3453 lpfc_nlp_set_state(vport, ndlp,
3056 NLP_STE_PLOGI_ISSUE); 3454 NLP_STE_PLOGI_ISSUE);
3057 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 3455 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
3058 } 3456 }
3059 3457
3060 /* Send a FARP response to that node */ 3458 /* Send a FARP response to that node */
3061 if (fp->Rflags & FARP_REQUEST_FARPR) { 3459 if (fp->Rflags & FARP_REQUEST_FARPR)
3062 lpfc_issue_els_farpr(phba, did, 0); 3460 lpfc_issue_els_farpr(vport, did, 0);
3063 }
3064 } 3461 }
3065 } 3462 }
3066 return 0; 3463 return 0;
3067} 3464}
3068 3465
3069static int 3466static int
3070lpfc_els_rcv_farpr(struct lpfc_hba * phba, 3467lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3071 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp) 3468 struct lpfc_nodelist *ndlp)
3072{ 3469{
3073 struct lpfc_dmabuf *pcmd; 3470 struct lpfc_dmabuf *pcmd;
3074 uint32_t *lp; 3471 uint32_t *lp;
3075 IOCB_t *icmd; 3472 IOCB_t *icmd;
3076 uint32_t cmd, did; 3473 uint32_t cmd, did;
3474 struct lpfc_hba *phba = vport->phba;
3077 3475
3078 icmd = &cmdiocb->iocb; 3476 icmd = &cmdiocb->iocb;
3079 did = icmd->un.elsreq64.remoteID; 3477 did = icmd->un.elsreq64.remoteID;
@@ -3082,21 +3480,18 @@ lpfc_els_rcv_farpr(struct lpfc_hba * phba,
3082 3480
3083 cmd = *lp++; 3481 cmd = *lp++;
3084 /* FARP-RSP received from DID <did> */ 3482 /* FARP-RSP received from DID <did> */
3085 lpfc_printf_log(phba, 3483 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3086 KERN_INFO, 3484 "%d (%d):0600 FARP-RSP received from DID x%x\n",
3087 LOG_ELS, 3485 phba->brd_no, vport->vpi, did);
3088 "%d:0600 FARP-RSP received from DID x%x\n",
3089 phba->brd_no, did);
3090
3091 /* ACCEPT the Farp resp request */ 3486 /* ACCEPT the Farp resp request */
3092 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 3487 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
3093 3488
3094 return 0; 3489 return 0;
3095} 3490}
3096 3491
3097static int 3492static int
3098lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3493lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3099 struct lpfc_nodelist * fan_ndlp) 3494 struct lpfc_nodelist *fan_ndlp)
3100{ 3495{
3101 struct lpfc_dmabuf *pcmd; 3496 struct lpfc_dmabuf *pcmd;
3102 uint32_t *lp; 3497 uint32_t *lp;
@@ -3104,10 +3499,12 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3104 uint32_t cmd, did; 3499 uint32_t cmd, did;
3105 FAN *fp; 3500 FAN *fp;
3106 struct lpfc_nodelist *ndlp, *next_ndlp; 3501 struct lpfc_nodelist *ndlp, *next_ndlp;
3502 struct lpfc_hba *phba = vport->phba;
3107 3503
3108 /* FAN received */ 3504 /* FAN received */
3109 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n", 3505 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3110 phba->brd_no); 3506 "%d (%d):0265 FAN received\n",
3507 phba->brd_no, vport->vpi);
3111 3508
3112 icmd = &cmdiocb->iocb; 3509 icmd = &cmdiocb->iocb;
3113 did = icmd->un.elsreq64.remoteID; 3510 did = icmd->un.elsreq64.remoteID;
@@ -3115,11 +3512,11 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3115 lp = (uint32_t *)pcmd->virt; 3512 lp = (uint32_t *)pcmd->virt;
3116 3513
3117 cmd = *lp++; 3514 cmd = *lp++;
3118 fp = (FAN *)lp; 3515 fp = (FAN *) lp;
3119 3516
3120 /* FAN received; Fan does not have a reply sequence */ 3517 /* FAN received; Fan does not have a reply sequence */
3121 3518
3122 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { 3519 if (phba->pport->port_state == LPFC_LOCAL_CFG_LINK) {
3123 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 3520 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
3124 sizeof(struct lpfc_name)) != 0) || 3521 sizeof(struct lpfc_name)) != 0) ||
3125 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 3522 (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
@@ -3130,7 +3527,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3130 */ 3527 */
3131 3528
3132 list_for_each_entry_safe(ndlp, next_ndlp, 3529 list_for_each_entry_safe(ndlp, next_ndlp,
3133 &phba->fc_nodes, nlp_listp) { 3530 &vport->fc_nodes, nlp_listp) {
3134 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3531 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3135 continue; 3532 continue;
3136 if (ndlp->nlp_type & NLP_FABRIC) { 3533 if (ndlp->nlp_type & NLP_FABRIC) {
@@ -3138,24 +3535,24 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3138 * Clean up old Fabric, Nameserver and 3535 * Clean up old Fabric, Nameserver and
3139 * other NLP_FABRIC logins 3536 * other NLP_FABRIC logins
3140 */ 3537 */
3141 lpfc_drop_node(phba, ndlp); 3538 lpfc_drop_node(vport, ndlp);
3142 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 3539 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
3143 /* Fail outstanding I/O now since this 3540 /* Fail outstanding I/O now since this
3144 * device is marked for PLOGI 3541 * device is marked for PLOGI
3145 */ 3542 */
3146 lpfc_unreg_rpi(phba, ndlp); 3543 lpfc_unreg_rpi(vport, ndlp);
3147 } 3544 }
3148 } 3545 }
3149 3546
3150 phba->hba_state = LPFC_FLOGI; 3547 vport->port_state = LPFC_FLOGI;
3151 lpfc_set_disctmo(phba); 3548 lpfc_set_disctmo(vport);
3152 lpfc_initial_flogi(phba); 3549 lpfc_initial_flogi(vport);
3153 return 0; 3550 return 0;
3154 } 3551 }
3155 /* Discovery not needed, 3552 /* Discovery not needed,
3156 * move the nodes to their original state. 3553 * move the nodes to their original state.
3157 */ 3554 */
3158 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 3555 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
3159 nlp_listp) { 3556 nlp_listp) {
3160 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3557 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
3161 continue; 3558 continue;
@@ -3163,13 +3560,13 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3163 switch (ndlp->nlp_prev_state) { 3560 switch (ndlp->nlp_prev_state) {
3164 case NLP_STE_UNMAPPED_NODE: 3561 case NLP_STE_UNMAPPED_NODE:
3165 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3562 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3166 lpfc_nlp_set_state(phba, ndlp, 3563 lpfc_nlp_set_state(vport, ndlp,
3167 NLP_STE_UNMAPPED_NODE); 3564 NLP_STE_UNMAPPED_NODE);
3168 break; 3565 break;
3169 3566
3170 case NLP_STE_MAPPED_NODE: 3567 case NLP_STE_MAPPED_NODE:
3171 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 3568 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
3172 lpfc_nlp_set_state(phba, ndlp, 3569 lpfc_nlp_set_state(vport, ndlp,
3173 NLP_STE_MAPPED_NODE); 3570 NLP_STE_MAPPED_NODE);
3174 break; 3571 break;
3175 3572
@@ -3179,7 +3576,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3179 } 3576 }
3180 3577
3181 /* Start discovery - this should just do CLEAR_LA */ 3578 /* Start discovery - this should just do CLEAR_LA */
3182 lpfc_disc_start(phba); 3579 lpfc_disc_start(vport);
3183 } 3580 }
3184 return 0; 3581 return 0;
3185} 3582}
@@ -3187,42 +3584,42 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
3187void 3584void
3188lpfc_els_timeout(unsigned long ptr) 3585lpfc_els_timeout(unsigned long ptr)
3189{ 3586{
3190 struct lpfc_hba *phba; 3587 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
3588 struct lpfc_hba *phba = vport->phba;
3191 unsigned long iflag; 3589 unsigned long iflag;
3192 3590
3193 phba = (struct lpfc_hba *)ptr; 3591 spin_lock_irqsave(&vport->work_port_lock, iflag);
3194 if (phba == 0) 3592 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3195 return; 3593 vport->work_port_events |= WORKER_ELS_TMO;
3196 spin_lock_irqsave(phba->host->host_lock, iflag); 3594 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3197 if (!(phba->work_hba_events & WORKER_ELS_TMO)) { 3595
3198 phba->work_hba_events |= WORKER_ELS_TMO; 3596 spin_lock_irqsave(&phba->hbalock, iflag);
3199 if (phba->work_wait) 3597 if (phba->work_wait)
3200 wake_up(phba->work_wait); 3598 lpfc_worker_wake_up(phba);
3599 spin_unlock_irqrestore(&phba->hbalock, iflag);
3201 } 3600 }
3202 spin_unlock_irqrestore(phba->host->host_lock, iflag); 3601 else
3602 spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3203 return; 3603 return;
3204} 3604}
3205 3605
3206void 3606void
3207lpfc_els_timeout_handler(struct lpfc_hba *phba) 3607lpfc_els_timeout_handler(struct lpfc_vport *vport)
3208{ 3608{
3609 struct lpfc_hba *phba = vport->phba;
3209 struct lpfc_sli_ring *pring; 3610 struct lpfc_sli_ring *pring;
3210 struct lpfc_iocbq *tmp_iocb, *piocb; 3611 struct lpfc_iocbq *tmp_iocb, *piocb;
3211 IOCB_t *cmd = NULL; 3612 IOCB_t *cmd = NULL;
3212 struct lpfc_dmabuf *pcmd; 3613 struct lpfc_dmabuf *pcmd;
3213 uint32_t *elscmd; 3614 uint32_t els_command = 0;
3214 uint32_t els_command=0;
3215 uint32_t timeout; 3615 uint32_t timeout;
3216 uint32_t remote_ID; 3616 uint32_t remote_ID = 0xffffffff;
3217 3617
3218 if (phba == 0)
3219 return;
3220 spin_lock_irq(phba->host->host_lock);
3221 /* If the timer is already canceled do nothing */ 3618 /* If the timer is already canceled do nothing */
3222 if (!(phba->work_hba_events & WORKER_ELS_TMO)) { 3619 if ((vport->work_port_events & WORKER_ELS_TMO) == 0) {
3223 spin_unlock_irq(phba->host->host_lock);
3224 return; 3620 return;
3225 } 3621 }
3622 spin_lock_irq(&phba->hbalock);
3226 timeout = (uint32_t)(phba->fc_ratov << 1); 3623 timeout = (uint32_t)(phba->fc_ratov << 1);
3227 3624
3228 pring = &phba->sli.ring[LPFC_ELS_RING]; 3625 pring = &phba->sli.ring[LPFC_ELS_RING];
@@ -3230,63 +3627,70 @@ lpfc_els_timeout_handler(struct lpfc_hba *phba)
3230 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3627 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3231 cmd = &piocb->iocb; 3628 cmd = &piocb->iocb;
3232 3629
3233 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) || 3630 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
3234 (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN) || 3631 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
3235 (piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)) { 3632 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
3236 continue; 3633 continue;
3237 } 3634
3635 if (piocb->vport != vport)
3636 continue;
3637
3238 pcmd = (struct lpfc_dmabuf *) piocb->context2; 3638 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3239 if (pcmd) { 3639 if (pcmd)
3240 elscmd = (uint32_t *) (pcmd->virt); 3640 els_command = *(uint32_t *) (pcmd->virt);
3241 els_command = *elscmd;
3242 }
3243 3641
3244 if ((els_command == ELS_CMD_FARP) 3642 if (els_command == ELS_CMD_FARP ||
3245 || (els_command == ELS_CMD_FARPR)) { 3643 els_command == ELS_CMD_FARPR ||
3644 els_command == ELS_CMD_FDISC)
3645 continue;
3646
3647 if (vport != piocb->vport)
3246 continue; 3648 continue;
3247 }
3248 3649
3249 if (piocb->drvrTimeout > 0) { 3650 if (piocb->drvrTimeout > 0) {
3250 if (piocb->drvrTimeout >= timeout) { 3651 if (piocb->drvrTimeout >= timeout)
3251 piocb->drvrTimeout -= timeout; 3652 piocb->drvrTimeout -= timeout;
3252 } else { 3653 else
3253 piocb->drvrTimeout = 0; 3654 piocb->drvrTimeout = 0;
3254 }
3255 continue; 3655 continue;
3256 } 3656 }
3257 3657
3258 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) { 3658 remote_ID = 0xffffffff;
3259 struct lpfc_nodelist *ndlp; 3659 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
3260 ndlp = __lpfc_findnode_rpi(phba, cmd->ulpContext);
3261 remote_ID = ndlp->nlp_DID;
3262 } else {
3263 remote_ID = cmd->un.elsreq64.remoteID; 3660 remote_ID = cmd->un.elsreq64.remoteID;
3661 else {
3662 struct lpfc_nodelist *ndlp;
3663 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
3664 if (ndlp)
3665 remote_ID = ndlp->nlp_DID;
3264 } 3666 }
3265 3667
3266 lpfc_printf_log(phba, 3668 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3267 KERN_ERR, 3669 "%d (%d):0127 ELS timeout Data: x%x x%x x%x "
3268 LOG_ELS, 3670 "x%x\n",
3269 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n", 3671 phba->brd_no, vport->vpi, els_command,
3270 phba->brd_no, els_command,
3271 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 3672 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
3272 3673
3273 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3674 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3274 } 3675 }
3275 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt) 3676 spin_unlock_irq(&phba->hbalock);
3276 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout);
3277 3677
3278 spin_unlock_irq(phba->host->host_lock); 3678 if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
3679 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
3279} 3680}
3280 3681
3281void 3682void
3282lpfc_els_flush_cmd(struct lpfc_hba *phba) 3683lpfc_els_flush_cmd(struct lpfc_vport *vport)
3283{ 3684{
3284 LIST_HEAD(completions); 3685 LIST_HEAD(completions);
3686 struct lpfc_hba *phba = vport->phba;
3285 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3687 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3286 struct lpfc_iocbq *tmp_iocb, *piocb; 3688 struct lpfc_iocbq *tmp_iocb, *piocb;
3287 IOCB_t *cmd = NULL; 3689 IOCB_t *cmd = NULL;
3288 3690
3289 spin_lock_irq(phba->host->host_lock); 3691 lpfc_fabric_abort_vport(vport);
3692
3693 spin_lock_irq(&phba->hbalock);
3290 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 3694 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
3291 cmd = &piocb->iocb; 3695 cmd = &piocb->iocb;
3292 3696
@@ -3301,271 +3705,1042 @@ lpfc_els_flush_cmd(struct lpfc_hba *phba)
3301 cmd->ulpCommand == CMD_ABORT_XRI_CN) 3705 cmd->ulpCommand == CMD_ABORT_XRI_CN)
3302 continue; 3706 continue;
3303 3707
3708 if (piocb->vport != vport)
3709 continue;
3710
3304 list_move_tail(&piocb->list, &completions); 3711 list_move_tail(&piocb->list, &completions);
3305 pring->txq_cnt--; 3712 pring->txq_cnt--;
3306
3307 } 3713 }
3308 3714
3309 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 3715 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3310 cmd = &piocb->iocb;
3311
3312 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 3716 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3313 continue; 3717 continue;
3314 } 3718 }
3315 3719
3720 if (piocb->vport != vport)
3721 continue;
3722
3316 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 3723 lpfc_sli_issue_abort_iotag(phba, pring, piocb);
3317 } 3724 }
3318 spin_unlock_irq(phba->host->host_lock); 3725 spin_unlock_irq(&phba->hbalock);
3319 3726
3320 while(!list_empty(&completions)) { 3727 while (!list_empty(&completions)) {
3321 piocb = list_get_first(&completions, struct lpfc_iocbq, list); 3728 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
3322 cmd = &piocb->iocb; 3729 cmd = &piocb->iocb;
3323 list_del(&piocb->list); 3730 list_del_init(&piocb->list);
3324 3731
3325 if (piocb->iocb_cmpl) { 3732 if (!piocb->iocb_cmpl)
3733 lpfc_sli_release_iocbq(phba, piocb);
3734 else {
3326 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3735 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3327 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 3736 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3328 (piocb->iocb_cmpl) (phba, piocb, piocb); 3737 (piocb->iocb_cmpl) (phba, piocb, piocb);
3329 } else 3738 }
3330 lpfc_sli_release_iocbq(phba, piocb);
3331 } 3739 }
3332 3740
3333 return; 3741 return;
3334} 3742}
3335 3743
3336void 3744static void
3337lpfc_els_unsol_event(struct lpfc_hba * phba, 3745lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3338 struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb) 3746 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
3339{ 3747{
3340 struct lpfc_sli *psli;
3341 struct lpfc_nodelist *ndlp; 3748 struct lpfc_nodelist *ndlp;
3342 struct lpfc_dmabuf *mp;
3343 uint32_t *lp;
3344 IOCB_t *icmd;
3345 struct ls_rjt stat; 3749 struct ls_rjt stat;
3346 uint32_t cmd; 3750 uint32_t *payload;
3347 uint32_t did; 3751 uint32_t cmd, did, newnode, rjt_err = 0;
3348 uint32_t newnode; 3752 IOCB_t *icmd = &elsiocb->iocb;
3349 uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
3350 uint32_t rjt_err = 0;
3351
3352 psli = &phba->sli;
3353 icmd = &elsiocb->iocb;
3354
3355 if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3356 ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) {
3357 /* Not enough posted buffers; Try posting more buffers */
3358 phba->fc_stat.NoRcvBuf++;
3359 lpfc_post_buffer(phba, pring, 0, 1);
3360 return;
3361 }
3362
3363 /* If there are no BDEs associated with this IOCB,
3364 * there is nothing to do.
3365 */
3366 if (icmd->ulpBdeCount == 0)
3367 return;
3368 3753
3369 /* type of ELS cmd is first 32bit word in packet */ 3754 if (vport == NULL || elsiocb->context2 == NULL)
3370 mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
3371 cont64[0].
3372 addrHigh,
3373 icmd->un.
3374 cont64[0].addrLow));
3375 if (mp == 0) {
3376 drop_cmd = 1;
3377 goto dropit; 3755 goto dropit;
3378 }
3379 3756
3380 newnode = 0; 3757 newnode = 0;
3381 lp = (uint32_t *) mp->virt; 3758 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
3382 cmd = *lp++; 3759 cmd = *payload;
3383 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1); 3760 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
3761 lpfc_post_buffer(phba, pring, 1, 1);
3384 3762
3763 did = icmd->un.rcvels.remoteID;
3385 if (icmd->ulpStatus) { 3764 if (icmd->ulpStatus) {
3386 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3765 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3387 kfree(mp); 3766 "RCV Unsol ELS: status:x%x/x%x did:x%x",
3388 drop_cmd = 1; 3767 icmd->ulpStatus, icmd->un.ulpWord[4], did);
3389 goto dropit; 3768 goto dropit;
3390 } 3769 }
3391 3770
3392 /* Check to see if link went down during discovery */ 3771 /* Check to see if link went down during discovery */
3393 if (lpfc_els_chk_latt(phba)) { 3772 if (lpfc_els_chk_latt(vport))
3394 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3395 kfree(mp);
3396 drop_cmd = 1;
3397 goto dropit; 3773 goto dropit;
3398 }
3399 3774
3400 did = icmd->un.rcvels.remoteID; 3775 /* Ignore traffic recevied during vport shutdown. */
3401 ndlp = lpfc_findnode_did(phba, did); 3776 if (vport->load_flag & FC_UNLOADING)
3777 goto dropit;
3778
3779 ndlp = lpfc_findnode_did(vport, did);
3402 if (!ndlp) { 3780 if (!ndlp) {
3403 /* Cannot find existing Fabric ndlp, so allocate a new one */ 3781 /* Cannot find existing Fabric ndlp, so allocate a new one */
3404 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 3782 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
3405 if (!ndlp) { 3783 if (!ndlp)
3406 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3407 kfree(mp);
3408 drop_cmd = 1;
3409 goto dropit; 3784 goto dropit;
3410 }
3411 3785
3412 lpfc_nlp_init(phba, ndlp, did); 3786 lpfc_nlp_init(vport, ndlp, did);
3413 newnode = 1; 3787 newnode = 1;
3414 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) { 3788 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3415 ndlp->nlp_type |= NLP_FABRIC; 3789 ndlp->nlp_type |= NLP_FABRIC;
3416 } 3790 }
3417 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 3791 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
3418 } 3792 }
3419 3793
3420 phba->fc_stat.elsRcvFrame++; 3794 phba->fc_stat.elsRcvFrame++;
3421 if (elsiocb->context1) 3795 if (elsiocb->context1)
3422 lpfc_nlp_put(elsiocb->context1); 3796 lpfc_nlp_put(elsiocb->context1);
3423 elsiocb->context1 = lpfc_nlp_get(ndlp); 3797 elsiocb->context1 = lpfc_nlp_get(ndlp);
3424 elsiocb->context2 = mp; 3798 elsiocb->vport = vport;
3425 3799
3426 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 3800 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
3427 cmd &= ELS_CMD_MASK; 3801 cmd &= ELS_CMD_MASK;
3428 } 3802 }
3429 /* ELS command <elsCmd> received from NPORT <did> */ 3803 /* ELS command <elsCmd> received from NPORT <did> */
3430 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3804 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3431 "%d:0112 ELS command x%x received from NPORT x%x " 3805 "%d (%d):0112 ELS command x%x received from NPORT x%x "
3432 "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state); 3806 "Data: x%x\n", phba->brd_no, vport->vpi, cmd, did,
3807 vport->port_state);
3433 3808
3434 switch (cmd) { 3809 switch (cmd) {
3435 case ELS_CMD_PLOGI: 3810 case ELS_CMD_PLOGI:
3811 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3812 "RCV PLOGI: did:x%x/ste:x%x flg:x%x",
3813 did, vport->port_state, ndlp->nlp_flag);
3814
3436 phba->fc_stat.elsRcvPLOGI++; 3815 phba->fc_stat.elsRcvPLOGI++;
3437 if (phba->hba_state < LPFC_DISC_AUTH) { 3816 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
3438 rjt_err = 1; 3817
3818 if (vport->port_state < LPFC_DISC_AUTH) {
3819 rjt_err = LSRJT_UNABLE_TPC;
3439 break; 3820 break;
3440 } 3821 }
3441 ndlp = lpfc_plogi_confirm_nport(phba, mp, ndlp); 3822 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3442 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); 3823 NLP_EVT_RCV_PLOGI);
3824
3443 break; 3825 break;
3444 case ELS_CMD_FLOGI: 3826 case ELS_CMD_FLOGI:
3827 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3828 "RCV FLOGI: did:x%x/ste:x%x flg:x%x",
3829 did, vport->port_state, ndlp->nlp_flag);
3830
3445 phba->fc_stat.elsRcvFLOGI++; 3831 phba->fc_stat.elsRcvFLOGI++;
3446 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode); 3832 lpfc_els_rcv_flogi(vport, elsiocb, ndlp, newnode);
3447 if (newnode) 3833 if (newnode)
3448 lpfc_drop_node(phba, ndlp); 3834 lpfc_drop_node(vport, ndlp);
3449 break; 3835 break;
3450 case ELS_CMD_LOGO: 3836 case ELS_CMD_LOGO:
3837 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3838 "RCV LOGO: did:x%x/ste:x%x flg:x%x",
3839 did, vport->port_state, ndlp->nlp_flag);
3840
3451 phba->fc_stat.elsRcvLOGO++; 3841 phba->fc_stat.elsRcvLOGO++;
3452 if (phba->hba_state < LPFC_DISC_AUTH) { 3842 if (vport->port_state < LPFC_DISC_AUTH) {
3453 rjt_err = 1; 3843 rjt_err = LSRJT_UNABLE_TPC;
3454 break; 3844 break;
3455 } 3845 }
3456 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 3846 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
3457 break; 3847 break;
3458 case ELS_CMD_PRLO: 3848 case ELS_CMD_PRLO:
3849 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3850 "RCV PRLO: did:x%x/ste:x%x flg:x%x",
3851 did, vport->port_state, ndlp->nlp_flag);
3852
3459 phba->fc_stat.elsRcvPRLO++; 3853 phba->fc_stat.elsRcvPRLO++;
3460 if (phba->hba_state < LPFC_DISC_AUTH) { 3854 if (vport->port_state < LPFC_DISC_AUTH) {
3461 rjt_err = 1; 3855 rjt_err = LSRJT_UNABLE_TPC;
3462 break; 3856 break;
3463 } 3857 }
3464 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 3858 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
3465 break; 3859 break;
3466 case ELS_CMD_RSCN: 3860 case ELS_CMD_RSCN:
3467 phba->fc_stat.elsRcvRSCN++; 3861 phba->fc_stat.elsRcvRSCN++;
3468 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode); 3862 lpfc_els_rcv_rscn(vport, elsiocb, ndlp, newnode);
3469 if (newnode) 3863 if (newnode)
3470 lpfc_drop_node(phba, ndlp); 3864 lpfc_drop_node(vport, ndlp);
3471 break; 3865 break;
3472 case ELS_CMD_ADISC: 3866 case ELS_CMD_ADISC:
3867 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3868 "RCV ADISC: did:x%x/ste:x%x flg:x%x",
3869 did, vport->port_state, ndlp->nlp_flag);
3870
3473 phba->fc_stat.elsRcvADISC++; 3871 phba->fc_stat.elsRcvADISC++;
3474 if (phba->hba_state < LPFC_DISC_AUTH) { 3872 if (vport->port_state < LPFC_DISC_AUTH) {
3475 rjt_err = 1; 3873 rjt_err = LSRJT_UNABLE_TPC;
3476 break; 3874 break;
3477 } 3875 }
3478 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC); 3876 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3877 NLP_EVT_RCV_ADISC);
3479 break; 3878 break;
3480 case ELS_CMD_PDISC: 3879 case ELS_CMD_PDISC:
3880 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3881 "RCV PDISC: did:x%x/ste:x%x flg:x%x",
3882 did, vport->port_state, ndlp->nlp_flag);
3883
3481 phba->fc_stat.elsRcvPDISC++; 3884 phba->fc_stat.elsRcvPDISC++;
3482 if (phba->hba_state < LPFC_DISC_AUTH) { 3885 if (vport->port_state < LPFC_DISC_AUTH) {
3483 rjt_err = 1; 3886 rjt_err = LSRJT_UNABLE_TPC;
3484 break; 3887 break;
3485 } 3888 }
3486 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC); 3889 lpfc_disc_state_machine(vport, ndlp, elsiocb,
3890 NLP_EVT_RCV_PDISC);
3487 break; 3891 break;
3488 case ELS_CMD_FARPR: 3892 case ELS_CMD_FARPR:
3893 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3894 "RCV FARPR: did:x%x/ste:x%x flg:x%x",
3895 did, vport->port_state, ndlp->nlp_flag);
3896
3489 phba->fc_stat.elsRcvFARPR++; 3897 phba->fc_stat.elsRcvFARPR++;
3490 lpfc_els_rcv_farpr(phba, elsiocb, ndlp); 3898 lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
3491 break; 3899 break;
3492 case ELS_CMD_FARP: 3900 case ELS_CMD_FARP:
3901 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3902 "RCV FARP: did:x%x/ste:x%x flg:x%x",
3903 did, vport->port_state, ndlp->nlp_flag);
3904
3493 phba->fc_stat.elsRcvFARP++; 3905 phba->fc_stat.elsRcvFARP++;
3494 lpfc_els_rcv_farp(phba, elsiocb, ndlp); 3906 lpfc_els_rcv_farp(vport, elsiocb, ndlp);
3495 break; 3907 break;
3496 case ELS_CMD_FAN: 3908 case ELS_CMD_FAN:
3909 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3910 "RCV FAN: did:x%x/ste:x%x flg:x%x",
3911 did, vport->port_state, ndlp->nlp_flag);
3912
3497 phba->fc_stat.elsRcvFAN++; 3913 phba->fc_stat.elsRcvFAN++;
3498 lpfc_els_rcv_fan(phba, elsiocb, ndlp); 3914 lpfc_els_rcv_fan(vport, elsiocb, ndlp);
3499 break; 3915 break;
3500 case ELS_CMD_PRLI: 3916 case ELS_CMD_PRLI:
3917 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3918 "RCV PRLI: did:x%x/ste:x%x flg:x%x",
3919 did, vport->port_state, ndlp->nlp_flag);
3920
3501 phba->fc_stat.elsRcvPRLI++; 3921 phba->fc_stat.elsRcvPRLI++;
3502 if (phba->hba_state < LPFC_DISC_AUTH) { 3922 if (vport->port_state < LPFC_DISC_AUTH) {
3503 rjt_err = 1; 3923 rjt_err = LSRJT_UNABLE_TPC;
3504 break; 3924 break;
3505 } 3925 }
3506 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 3926 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
3507 break; 3927 break;
3508 case ELS_CMD_LIRR: 3928 case ELS_CMD_LIRR:
3929 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3930 "RCV LIRR: did:x%x/ste:x%x flg:x%x",
3931 did, vport->port_state, ndlp->nlp_flag);
3932
3509 phba->fc_stat.elsRcvLIRR++; 3933 phba->fc_stat.elsRcvLIRR++;
3510 lpfc_els_rcv_lirr(phba, elsiocb, ndlp); 3934 lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
3511 if (newnode) 3935 if (newnode)
3512 lpfc_drop_node(phba, ndlp); 3936 lpfc_drop_node(vport, ndlp);
3513 break; 3937 break;
3514 case ELS_CMD_RPS: 3938 case ELS_CMD_RPS:
3939 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3940 "RCV RPS: did:x%x/ste:x%x flg:x%x",
3941 did, vport->port_state, ndlp->nlp_flag);
3942
3515 phba->fc_stat.elsRcvRPS++; 3943 phba->fc_stat.elsRcvRPS++;
3516 lpfc_els_rcv_rps(phba, elsiocb, ndlp); 3944 lpfc_els_rcv_rps(vport, elsiocb, ndlp);
3517 if (newnode) 3945 if (newnode)
3518 lpfc_drop_node(phba, ndlp); 3946 lpfc_drop_node(vport, ndlp);
3519 break; 3947 break;
3520 case ELS_CMD_RPL: 3948 case ELS_CMD_RPL:
3949 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3950 "RCV RPL: did:x%x/ste:x%x flg:x%x",
3951 did, vport->port_state, ndlp->nlp_flag);
3952
3521 phba->fc_stat.elsRcvRPL++; 3953 phba->fc_stat.elsRcvRPL++;
3522 lpfc_els_rcv_rpl(phba, elsiocb, ndlp); 3954 lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
3523 if (newnode) 3955 if (newnode)
3524 lpfc_drop_node(phba, ndlp); 3956 lpfc_drop_node(vport, ndlp);
3525 break; 3957 break;
3526 case ELS_CMD_RNID: 3958 case ELS_CMD_RNID:
3959 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3960 "RCV RNID: did:x%x/ste:x%x flg:x%x",
3961 did, vport->port_state, ndlp->nlp_flag);
3962
3527 phba->fc_stat.elsRcvRNID++; 3963 phba->fc_stat.elsRcvRNID++;
3528 lpfc_els_rcv_rnid(phba, elsiocb, ndlp); 3964 lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
3529 if (newnode) 3965 if (newnode)
3530 lpfc_drop_node(phba, ndlp); 3966 lpfc_drop_node(vport, ndlp);
3531 break; 3967 break;
3532 default: 3968 default:
3969 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
3970 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
3971 cmd, did, vport->port_state);
3972
3533 /* Unsupported ELS command, reject */ 3973 /* Unsupported ELS command, reject */
3534 rjt_err = 1; 3974 rjt_err = LSRJT_INVALID_CMD;
3535 3975
3536 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 3976 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3537 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 3977 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3538 "%d:0115 Unknown ELS command x%x received from " 3978 "%d (%d):0115 Unknown ELS command x%x "
3539 "NPORT x%x\n", phba->brd_no, cmd, did); 3979 "received from NPORT x%x\n",
3980 phba->brd_no, vport->vpi, cmd, did);
3540 if (newnode) 3981 if (newnode)
3541 lpfc_drop_node(phba, ndlp); 3982 lpfc_drop_node(vport, ndlp);
3542 break; 3983 break;
3543 } 3984 }
3544 3985
3545 /* check if need to LS_RJT received ELS cmd */ 3986 /* check if need to LS_RJT received ELS cmd */
3546 if (rjt_err) { 3987 if (rjt_err) {
3547 stat.un.b.lsRjtRsvd0 = 0; 3988 memset(&stat, 0, sizeof(stat));
3548 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3989 stat.un.b.lsRjtRsnCode = rjt_err;
3549 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3990 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3550 stat.un.b.vendorUnique = 0; 3991 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
3551 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp); 3992 NULL);
3993 if (newnode)
3994 lpfc_drop_node(vport, ndlp);
3995 }
3996
3997 return;
3998
3999dropit:
4000 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4001 "%d (%d):0111 Dropping received ELS cmd "
4002 "Data: x%x x%x x%x\n",
4003 phba->brd_no, vport ? vport->vpi : 0xffff,
4004 icmd->ulpStatus, icmd->un.ulpWord[4],
4005 icmd->ulpTimeout);
4006 phba->fc_stat.elsRcvDrop++;
4007}
4008
4009static struct lpfc_vport *
4010lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
4011{
4012 struct lpfc_vport *vport;
4013
4014 list_for_each_entry(vport, &phba->port_list, listentry) {
4015 if (vport->vpi == vpi)
4016 return vport;
4017 }
4018 return NULL;
4019}
4020
4021void
4022lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4023 struct lpfc_iocbq *elsiocb)
4024{
4025 struct lpfc_vport *vport = phba->pport;
4026 IOCB_t *icmd = &elsiocb->iocb;
4027 dma_addr_t paddr;
4028 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
4029 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
4030
4031 elsiocb->context2 = NULL;
4032 elsiocb->context3 = NULL;
4033
4034 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
4035 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
4036 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
4037 (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
4038 phba->fc_stat.NoRcvBuf++;
4039 /* Not enough posted buffers; Try posting more buffers */
4040 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
4041 lpfc_post_buffer(phba, pring, 0, 1);
4042 return;
4043 }
4044
4045 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4046 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
4047 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
4048 if (icmd->unsli3.rcvsli3.vpi == 0xffff)
4049 vport = phba->pport;
4050 else {
4051 uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
4052 vport = lpfc_find_vport_by_vpid(phba, vpi);
4053 }
4054 }
4055 /* If there are no BDEs associated
4056 * with this IOCB, there is nothing to do.
4057 */
4058 if (icmd->ulpBdeCount == 0)
4059 return;
4060
4061 /* type of ELS cmd is first 32bit word
4062 * in packet
4063 */
4064 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4065 elsiocb->context2 = bdeBuf1;
4066 } else {
4067 paddr = getPaddr(icmd->un.cont64[0].addrHigh,
4068 icmd->un.cont64[0].addrLow);
4069 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
4070 paddr);
3552 } 4071 }
3553 4072
4073 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4074 /*
4075 * The different unsolicited event handlers would tell us
4076 * if they are done with "mp" by setting context2 to NULL.
4077 */
3554 lpfc_nlp_put(elsiocb->context1); 4078 lpfc_nlp_put(elsiocb->context1);
3555 elsiocb->context1 = NULL; 4079 elsiocb->context1 = NULL;
3556 if (elsiocb->context2) { 4080 if (elsiocb->context2) {
3557 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4081 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
3558 kfree(mp); 4082 elsiocb->context2 = NULL;
3559 } 4083 }
3560dropit: 4084
3561 /* check if need to drop received ELS cmd */ 4085 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
3562 if (drop_cmd == 1) { 4086 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
4087 icmd->ulpBdeCount == 2) {
4088 elsiocb->context2 = bdeBuf2;
4089 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
4090 /* free mp if we are done with it */
4091 if (elsiocb->context2) {
4092 lpfc_in_buf_free(phba, elsiocb->context2);
4093 elsiocb->context2 = NULL;
4094 }
4095 }
4096}
4097
4098void
4099lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4100{
4101 struct lpfc_nodelist *ndlp, *ndlp_fdmi;
4102
4103 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4104 if (!ndlp) {
4105 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4106 if (!ndlp) {
4107 if (phba->fc_topology == TOPOLOGY_LOOP) {
4108 lpfc_disc_start(vport);
4109 return;
4110 }
4111 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4112 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4113 "%d (%d):0251 NameServer login: no memory\n",
4114 phba->brd_no, vport->vpi);
4115 return;
4116 }
4117 lpfc_nlp_init(vport, ndlp, NameServer_DID);
4118 ndlp->nlp_type |= NLP_FABRIC;
4119 }
4120
4121 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4122
4123 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
4124 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3563 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 4125 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3564 "%d:0111 Dropping received ELS cmd " 4126 "%d (%d):0252 Cannot issue NameServer login\n",
3565 "Data: x%x x%x x%x\n", phba->brd_no, 4127 phba->brd_no, vport->vpi);
3566 icmd->ulpStatus, icmd->un.ulpWord[4], 4128 return;
3567 icmd->ulpTimeout); 4129 }
3568 phba->fc_stat.elsRcvDrop++; 4130
4131 if (phba->cfg_fdmi_on) {
4132 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
4133 GFP_KERNEL);
4134 if (ndlp_fdmi) {
4135 lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
4136 ndlp_fdmi->nlp_type |= NLP_FABRIC;
4137 ndlp_fdmi->nlp_state =
4138 NLP_STE_PLOGI_ISSUE;
4139 lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
4140 0);
4141 }
4142 }
4143 return;
4144}
4145
4146static void
4147lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4148{
4149 struct lpfc_vport *vport = pmb->vport;
4150 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4151 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4152 MAILBOX_t *mb = &pmb->mb;
4153
4154 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4155 lpfc_nlp_put(ndlp);
4156
4157 if (mb->mbxStatus) {
4158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4159 "%d (%d):0915 Register VPI failed: 0x%x\n",
4160 phba->brd_no, vport->vpi, mb->mbxStatus);
4161
4162 switch (mb->mbxStatus) {
4163 case 0x11: /* unsupported feature */
4164 case 0x9603: /* max_vpi exceeded */
4165 /* giving up on vport registration */
4166 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4167 spin_lock_irq(shost->host_lock);
4168 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4169 spin_unlock_irq(shost->host_lock);
4170 lpfc_can_disctmo(vport);
4171 break;
4172 default:
4173 /* Try to recover from this error */
4174 lpfc_mbx_unreg_vpi(vport);
4175 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4176 lpfc_initial_fdisc(vport);
4177 break;
4178 }
4179
4180 } else {
4181 if (vport == phba->pport)
4182 lpfc_issue_fabric_reglogin(vport);
4183 else
4184 lpfc_do_scr_ns_plogi(phba, vport);
3569 } 4185 }
4186 mempool_free(pmb, phba->mbox_mem_pool);
3570 return; 4187 return;
3571} 4188}
4189
4190void
4191lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
4192 struct lpfc_nodelist *ndlp)
4193{
4194 LPFC_MBOXQ_t *mbox;
4195
4196 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4197 if (mbox) {
4198 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
4199 mbox->vport = vport;
4200 mbox->context2 = lpfc_nlp_get(ndlp);
4201 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
4202 if (lpfc_sli_issue_mbox(phba, mbox,
4203 MBX_NOWAIT | MBX_STOP_IOCB)
4204 == MBX_NOT_FINISHED) {
4205 mempool_free(mbox, phba->mbox_mem_pool);
4206 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4207
4208 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4209
4210 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4211 "%d (%d):0253 Register VPI: Cannot send mbox\n",
4212 phba->brd_no, vport->vpi);
4213 }
4214 } else {
4215 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4216
4217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
4218 "%d (%d):0254 Register VPI: no memory\n",
4219 phba->brd_no, vport->vpi);
4220
4221 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4222 lpfc_nlp_put(ndlp);
4223 }
4224}
4225
4226static void
4227lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4228 struct lpfc_iocbq *rspiocb)
4229{
4230 struct lpfc_vport *vport = cmdiocb->vport;
4231 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4232 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4233 struct lpfc_nodelist *np;
4234 struct lpfc_nodelist *next_np;
4235 IOCB_t *irsp = &rspiocb->iocb;
4236 struct lpfc_iocbq *piocb;
4237
4238 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
4239 "%d (%d):0123 FDISC completes. x%x/x%x prevDID: x%x\n",
4240 phba->brd_no, vport->vpi,
4241 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4242
4243 /* Since all FDISCs are being single threaded, we
4244 * must reset the discovery timer for ALL vports
4245 * waiting to send FDISC when one completes.
4246 */
4247 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
4248 lpfc_set_disctmo(piocb->vport);
4249 }
4250
4251 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4252 "FDISC cmpl: status:x%x/x%x prevdid:x%x",
4253 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
4254
4255 if (irsp->ulpStatus) {
4256 /* Check for retry */
4257 if (lpfc_els_retry(phba, cmdiocb, rspiocb))
4258 goto out;
4259
4260 /* FDISC failed */
4261 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4262 "%d (%d):0124 FDISC failed. (%d/%d)\n",
4263 phba->brd_no, vport->vpi,
4264 irsp->ulpStatus, irsp->un.ulpWord[4]);
4265
4266 if (vport->fc_vport->vport_state == FC_VPORT_INITIALIZING)
4267 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4268
4269 lpfc_nlp_put(ndlp);
4270 /* giving up on FDISC. Cancel discovery timer */
4271 lpfc_can_disctmo(vport);
4272 } else {
4273 spin_lock_irq(shost->host_lock);
4274 vport->fc_flag |= FC_FABRIC;
4275 if (vport->phba->fc_topology == TOPOLOGY_LOOP)
4276 vport->fc_flag |= FC_PUBLIC_LOOP;
4277 spin_unlock_irq(shost->host_lock);
4278
4279 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
4280 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
4281 if ((vport->fc_prevDID != vport->fc_myDID) &&
4282 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
4283 /* If our NportID changed, we need to ensure all
4284 * remaining NPORTs get unreg_login'ed so we can
4285 * issue unreg_vpi.
4286 */
4287 list_for_each_entry_safe(np, next_np,
4288 &vport->fc_nodes, nlp_listp) {
4289 if (np->nlp_state != NLP_STE_NPR_NODE
4290 || !(np->nlp_flag & NLP_NPR_ADISC))
4291 continue;
4292 spin_lock_irq(shost->host_lock);
4293 np->nlp_flag &= ~NLP_NPR_ADISC;
4294 spin_unlock_irq(shost->host_lock);
4295 lpfc_unreg_rpi(vport, np);
4296 }
4297 lpfc_mbx_unreg_vpi(vport);
4298 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4299 }
4300
4301 if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
4302 lpfc_register_new_vport(phba, vport, ndlp);
4303 else
4304 lpfc_do_scr_ns_plogi(phba, vport);
4305
4306 lpfc_nlp_put(ndlp); /* Free Fabric ndlp for vports */
4307 }
4308
4309out:
4310 lpfc_els_free_iocb(phba, cmdiocb);
4311}
4312
4313int
4314lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4315 uint8_t retry)
4316{
4317 struct lpfc_hba *phba = vport->phba;
4318 IOCB_t *icmd;
4319 struct lpfc_iocbq *elsiocb;
4320 struct serv_parm *sp;
4321 uint8_t *pcmd;
4322 uint16_t cmdsize;
4323 int did = ndlp->nlp_DID;
4324 int rc;
4325
4326 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
4327 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
4328 ELS_CMD_FDISC);
4329 if (!elsiocb) {
4330 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4331
4332 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4333 "%d (%d):0255 Issue FDISC: no IOCB\n",
4334 phba->brd_no, vport->vpi);
4335 return 1;
4336 }
4337
4338 icmd = &elsiocb->iocb;
4339 icmd->un.elsreq64.myID = 0;
4340 icmd->un.elsreq64.fl = 1;
4341
4342 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
4343 icmd->ulpCt_h = 1;
4344 icmd->ulpCt_l = 0;
4345
4346 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4347 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
4348 pcmd += sizeof(uint32_t); /* CSP Word 1 */
4349 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
4350 sp = (struct serv_parm *) pcmd;
4351 /* Setup CSPs accordingly for Fabric */
4352 sp->cmn.e_d_tov = 0;
4353 sp->cmn.w2.r_a_tov = 0;
4354 sp->cls1.classValid = 0;
4355 sp->cls2.seqDelivery = 1;
4356 sp->cls3.seqDelivery = 1;
4357
4358 pcmd += sizeof(uint32_t); /* CSP Word 2 */
4359 pcmd += sizeof(uint32_t); /* CSP Word 3 */
4360 pcmd += sizeof(uint32_t); /* CSP Word 4 */
4361 pcmd += sizeof(uint32_t); /* Port Name */
4362 memcpy(pcmd, &vport->fc_portname, 8);
4363 pcmd += sizeof(uint32_t); /* Node Name */
4364 pcmd += sizeof(uint32_t); /* Node Name */
4365 memcpy(pcmd, &vport->fc_nodename, 8);
4366
4367 lpfc_set_disctmo(vport);
4368
4369 phba->fc_stat.elsXmitFDISC++;
4370 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
4371
4372 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4373 "Issue FDISC: did:x%x",
4374 did, 0, 0);
4375
4376 rc = lpfc_issue_fabric_iocb(phba, elsiocb);
4377 if (rc == IOCB_ERROR) {
4378 lpfc_els_free_iocb(phba, elsiocb);
4379 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4380
4381 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
4382 "%d (%d):0256 Issue FDISC: Cannot send IOCB\n",
4383 phba->brd_no, vport->vpi);
4384
4385 return 1;
4386 }
4387 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
4388 vport->port_state = LPFC_FDISC;
4389 return 0;
4390}
4391
4392static void
4393lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4394 struct lpfc_iocbq *rspiocb)
4395{
4396 struct lpfc_vport *vport = cmdiocb->vport;
4397 IOCB_t *irsp;
4398
4399 irsp = &rspiocb->iocb;
4400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4401 "LOGO npiv cmpl: status:x%x/x%x did:x%x",
4402 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
4403
4404 lpfc_els_free_iocb(phba, cmdiocb);
4405 vport->unreg_vpi_cmpl = VPORT_ERROR;
4406}
4407
4408int
4409lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4410{
4411 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4412 struct lpfc_hba *phba = vport->phba;
4413 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4414 IOCB_t *icmd;
4415 struct lpfc_iocbq *elsiocb;
4416 uint8_t *pcmd;
4417 uint16_t cmdsize;
4418
4419 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
4420 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
4421 ELS_CMD_LOGO);
4422 if (!elsiocb)
4423 return 1;
4424
4425 icmd = &elsiocb->iocb;
4426 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4427 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
4428 pcmd += sizeof(uint32_t);
4429
4430 /* Fill in LOGO payload */
4431 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
4432 pcmd += sizeof(uint32_t);
4433 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
4434
4435 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4436 "Issue LOGO npiv did:x%x flg:x%x",
4437 ndlp->nlp_DID, ndlp->nlp_flag, 0);
4438
4439 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
4440 spin_lock_irq(shost->host_lock);
4441 ndlp->nlp_flag |= NLP_LOGO_SND;
4442 spin_unlock_irq(shost->host_lock);
4443 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
4444 spin_lock_irq(shost->host_lock);
4445 ndlp->nlp_flag &= ~NLP_LOGO_SND;
4446 spin_unlock_irq(shost->host_lock);
4447 lpfc_els_free_iocb(phba, elsiocb);
4448 return 1;
4449 }
4450 return 0;
4451}
4452
4453void
4454lpfc_fabric_block_timeout(unsigned long ptr)
4455{
4456 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4457 unsigned long iflags;
4458 uint32_t tmo_posted;
4459 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
4460 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
4461 if (!tmo_posted)
4462 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
4463 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
4464
4465 if (!tmo_posted) {
4466 spin_lock_irqsave(&phba->hbalock, iflags);
4467 if (phba->work_wait)
4468 lpfc_worker_wake_up(phba);
4469 spin_unlock_irqrestore(&phba->hbalock, iflags);
4470 }
4471}
4472
4473static void
4474lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
4475{
4476 struct lpfc_iocbq *iocb;
4477 unsigned long iflags;
4478 int ret;
4479 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4480 IOCB_t *cmd;
4481
4482repeat:
4483 iocb = NULL;
4484 spin_lock_irqsave(&phba->hbalock, iflags);
4485 /* Post any pending iocb to the SLI layer */
4486 if (atomic_read(&phba->fabric_iocb_count) == 0) {
4487 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
4488 list);
4489 if (iocb)
4490 atomic_inc(&phba->fabric_iocb_count);
4491 }
4492 spin_unlock_irqrestore(&phba->hbalock, iflags);
4493 if (iocb) {
4494 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4495 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4496 iocb->iocb_flag |= LPFC_IO_FABRIC;
4497
4498 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4499 "Fabric sched1: ste:x%x",
4500 iocb->vport->port_state, 0, 0);
4501
4502 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4503
4504 if (ret == IOCB_ERROR) {
4505 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4506 iocb->fabric_iocb_cmpl = NULL;
4507 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4508 cmd = &iocb->iocb;
4509 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4510 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4511 iocb->iocb_cmpl(phba, iocb, iocb);
4512
4513 atomic_dec(&phba->fabric_iocb_count);
4514 goto repeat;
4515 }
4516 }
4517
4518 return;
4519}
4520
4521void
4522lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
4523{
4524 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4525
4526 lpfc_resume_fabric_iocbs(phba);
4527 return;
4528}
4529
4530static void
4531lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
4532{
4533 int blocked;
4534
4535 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4536 /* Start a timer to unblock fabric
4537 * iocbs after 100ms
4538 */
4539 if (!blocked)
4540 mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
4541
4542 return;
4543}
4544
4545static void
4546lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4547 struct lpfc_iocbq *rspiocb)
4548{
4549 struct ls_rjt stat;
4550
4551 if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
4552 BUG();
4553
4554 switch (rspiocb->iocb.ulpStatus) {
4555 case IOSTAT_NPORT_RJT:
4556 case IOSTAT_FABRIC_RJT:
4557 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4558 lpfc_block_fabric_iocbs(phba);
4559 }
4560 break;
4561
4562 case IOSTAT_NPORT_BSY:
4563 case IOSTAT_FABRIC_BSY:
4564 lpfc_block_fabric_iocbs(phba);
4565 break;
4566
4567 case IOSTAT_LS_RJT:
4568 stat.un.lsRjtError =
4569 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
4570 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
4571 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
4572 lpfc_block_fabric_iocbs(phba);
4573 break;
4574 }
4575
4576 if (atomic_read(&phba->fabric_iocb_count) == 0)
4577 BUG();
4578
4579 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
4580 cmdiocb->fabric_iocb_cmpl = NULL;
4581 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
4582 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
4583
4584 atomic_dec(&phba->fabric_iocb_count);
4585 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
4586 /* Post any pending iocbs to HBA */
4587 lpfc_resume_fabric_iocbs(phba);
4588 }
4589}
4590
4591int
4592lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
4593{
4594 unsigned long iflags;
4595 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4596 int ready;
4597 int ret;
4598
4599 if (atomic_read(&phba->fabric_iocb_count) > 1)
4600 BUG();
4601
4602 spin_lock_irqsave(&phba->hbalock, iflags);
4603 ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
4604 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
4605
4606 spin_unlock_irqrestore(&phba->hbalock, iflags);
4607 if (ready) {
4608 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
4609 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
4610 iocb->iocb_flag |= LPFC_IO_FABRIC;
4611
4612 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
4613 "Fabric sched2: ste:x%x",
4614 iocb->vport->port_state, 0, 0);
4615
4616 atomic_inc(&phba->fabric_iocb_count);
4617 ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
4618
4619 if (ret == IOCB_ERROR) {
4620 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
4621 iocb->fabric_iocb_cmpl = NULL;
4622 iocb->iocb_flag &= ~LPFC_IO_FABRIC;
4623 atomic_dec(&phba->fabric_iocb_count);
4624 }
4625 } else {
4626 spin_lock_irqsave(&phba->hbalock, iflags);
4627 list_add_tail(&iocb->list, &phba->fabric_iocb_list);
4628 spin_unlock_irqrestore(&phba->hbalock, iflags);
4629 ret = IOCB_SUCCESS;
4630 }
4631 return ret;
4632}
4633
4634
4635void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
4636{
4637 LIST_HEAD(completions);
4638 struct lpfc_hba *phba = vport->phba;
4639 struct lpfc_iocbq *tmp_iocb, *piocb;
4640 IOCB_t *cmd;
4641
4642 spin_lock_irq(&phba->hbalock);
4643 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4644 list) {
4645
4646 if (piocb->vport != vport)
4647 continue;
4648
4649 list_move_tail(&piocb->list, &completions);
4650 }
4651 spin_unlock_irq(&phba->hbalock);
4652
4653 while (!list_empty(&completions)) {
4654 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4655 list_del_init(&piocb->list);
4656
4657 cmd = &piocb->iocb;
4658 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4659 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4660 (piocb->iocb_cmpl) (phba, piocb, piocb);
4661 }
4662}
4663
4664void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
4665{
4666 LIST_HEAD(completions);
4667 struct lpfc_hba *phba = ndlp->vport->phba;
4668 struct lpfc_iocbq *tmp_iocb, *piocb;
4669 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4670 IOCB_t *cmd;
4671
4672 spin_lock_irq(&phba->hbalock);
4673 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4674 list) {
4675 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
4676
4677 list_move_tail(&piocb->list, &completions);
4678 }
4679 }
4680 spin_unlock_irq(&phba->hbalock);
4681
4682 while (!list_empty(&completions)) {
4683 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4684 list_del_init(&piocb->list);
4685
4686 cmd = &piocb->iocb;
4687 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4688 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4689 (piocb->iocb_cmpl) (phba, piocb, piocb);
4690 }
4691}
4692
4693void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
4694{
4695 LIST_HEAD(completions);
4696 struct lpfc_iocbq *piocb;
4697 IOCB_t *cmd;
4698
4699 spin_lock_irq(&phba->hbalock);
4700 list_splice_init(&phba->fabric_iocb_list, &completions);
4701 spin_unlock_irq(&phba->hbalock);
4702
4703 while (!list_empty(&completions)) {
4704 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4705 list_del_init(&piocb->list);
4706
4707 cmd = &piocb->iocb;
4708 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4709 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4710 (piocb->iocb_cmpl) (phba, piocb, piocb);
4711 }
4712}
4713
4714
4715void lpfc_fabric_abort_flogi(struct lpfc_hba *phba)
4716{
4717 LIST_HEAD(completions);
4718 struct lpfc_iocbq *tmp_iocb, *piocb;
4719 IOCB_t *cmd;
4720 struct lpfc_nodelist *ndlp;
4721
4722 spin_lock_irq(&phba->hbalock);
4723 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
4724 list) {
4725
4726 cmd = &piocb->iocb;
4727 ndlp = (struct lpfc_nodelist *) piocb->context1;
4728 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
4729 ndlp != NULL &&
4730 ndlp->nlp_DID == Fabric_DID)
4731 list_move_tail(&piocb->list, &completions);
4732 }
4733 spin_unlock_irq(&phba->hbalock);
4734
4735 while (!list_empty(&completions)) {
4736 piocb = list_get_first(&completions, struct lpfc_iocbq, list);
4737 list_del_init(&piocb->list);
4738
4739 cmd = &piocb->iocb;
4740 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
4741 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
4742 (piocb->iocb_cmpl) (phba, piocb, piocb);
4743 }
4744}
4745
4746
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 61caa8d379e2..f2f4639eab59 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -36,6 +36,8 @@
36#include "lpfc.h" 36#include "lpfc.h"
37#include "lpfc_logmsg.h" 37#include "lpfc_logmsg.h"
38#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
39#include "lpfc_vport.h"
40#include "lpfc_debugfs.h"
39 41
40/* AlpaArray for assignment of scsid for scan-down and bind_method */ 42/* AlpaArray for assignment of scsid for scan-down and bind_method */
41static uint8_t lpfcAlpaArray[] = { 43static uint8_t lpfcAlpaArray[] = {
@@ -54,7 +56,7 @@ static uint8_t lpfcAlpaArray[] = {
54 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 56 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
55}; 57};
56 58
57static void lpfc_disc_timeout_handler(struct lpfc_hba *); 59static void lpfc_disc_timeout_handler(struct lpfc_vport *);
58 60
59void 61void
60lpfc_terminate_rport_io(struct fc_rport *rport) 62lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -74,14 +76,16 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
74 return; 76 return;
75 } 77 }
76 78
77 phba = ndlp->nlp_phba; 79 phba = ndlp->vport->phba;
80
81 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
82 "rport terminate: sid:x%x did:x%x flg:x%x",
83 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
78 84
79 spin_lock_irq(phba->host->host_lock);
80 if (ndlp->nlp_sid != NLP_NO_SID) { 85 if (ndlp->nlp_sid != NLP_NO_SID) {
81 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 86 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
82 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 87 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
83 } 88 }
84 spin_unlock_irq(phba->host->host_lock);
85 89
86 return; 90 return;
87} 91}
@@ -94,105 +98,213 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
94{ 98{
95 struct lpfc_rport_data *rdata; 99 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist * ndlp; 100 struct lpfc_nodelist * ndlp;
97 uint8_t *name; 101 struct lpfc_vport *vport;
98 int warn_on = 0; 102 struct lpfc_hba *phba;
99 struct lpfc_hba *phba; 103 struct completion devloss_compl;
104 struct lpfc_work_evt *evtp;
100 105
101 rdata = rport->dd_data; 106 rdata = rport->dd_data;
102 ndlp = rdata->pnode; 107 ndlp = rdata->pnode;
103 108
104 if (!ndlp) { 109 if (!ndlp) {
105 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 110 if (rport->scsi_target_id != -1) {
106 printk(KERN_ERR "Cannot find remote node" 111 printk(KERN_ERR "Cannot find remote node"
107 " for rport in dev_loss_tmo_callbk x%x\n", 112 " for rport in dev_loss_tmo_callbk x%x\n",
108 rport->port_id); 113 rport->port_id);
114 }
109 return; 115 return;
110 } 116 }
111 117
112 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 118 vport = ndlp->vport;
119 phba = vport->phba;
120
121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
122 "rport devlosscb: sid:x%x did:x%x flg:x%x",
123 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
124
125 init_completion(&devloss_compl);
126 evtp = &ndlp->dev_loss_evt;
127
128 if (!list_empty(&evtp->evt_listp))
129 return;
130
131 spin_lock_irq(&phba->hbalock);
132 evtp->evt_arg1 = ndlp;
133 evtp->evt_arg2 = &devloss_compl;
134 evtp->evt = LPFC_EVT_DEV_LOSS;
135 list_add_tail(&evtp->evt_listp, &phba->work_list);
136 if (phba->work_wait)
137 wake_up(phba->work_wait);
138
139 spin_unlock_irq(&phba->hbalock);
140
141 wait_for_completion(&devloss_compl);
142
143 return;
144}
145
146/*
147 * This function is called from the worker thread when dev_loss_tmo
148 * expire.
149 */
150void
151lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
152{
153 struct lpfc_rport_data *rdata;
154 struct fc_rport *rport;
155 struct lpfc_vport *vport;
156 struct lpfc_hba *phba;
157 uint8_t *name;
158 int warn_on = 0;
159
160 rport = ndlp->rport;
161
162 if (!rport)
163 return;
164
165 rdata = rport->dd_data;
166 name = (uint8_t *) &ndlp->nlp_portname;
167 vport = ndlp->vport;
168 phba = vport->phba;
169
170 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
171 "rport devlosstmo:did:x%x type:x%x id:x%x",
172 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
173
174 if (!(vport->load_flag & FC_UNLOADING) &&
175 ndlp->nlp_state == NLP_STE_MAPPED_NODE)
113 return; 176 return;
114 177
115 name = (uint8_t *)&ndlp->nlp_portname; 178 if (ndlp->nlp_type & NLP_FABRIC) {
116 phba = ndlp->nlp_phba; 179 int put_node;
180 int put_rport;
117 181
118 spin_lock_irq(phba->host->host_lock); 182 /* We will clean up these Nodes in linkup */
183 put_node = rdata->pnode != NULL;
184 put_rport = ndlp->rport != NULL;
185 rdata->pnode = NULL;
186 ndlp->rport = NULL;
187 if (put_node)
188 lpfc_nlp_put(ndlp);
189 if (put_rport)
190 put_device(&rport->dev);
191 return;
192 }
119 193
120 if (ndlp->nlp_sid != NLP_NO_SID) { 194 if (ndlp->nlp_sid != NLP_NO_SID) {
121 warn_on = 1; 195 warn_on = 1;
122 /* flush the target */ 196 /* flush the target */
123 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 197 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
124 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 198 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
125 } 199 }
126 if (phba->fc_flag & FC_UNLOADING) 200 if (vport->load_flag & FC_UNLOADING)
127 warn_on = 0; 201 warn_on = 0;
128 202
129 spin_unlock_irq(phba->host->host_lock);
130
131 if (warn_on) { 203 if (warn_on) {
132 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 204 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
133 "%d:0203 Devloss timeout on " 205 "%d (%d):0203 Devloss timeout on "
134 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 206 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
135 "NPort x%x Data: x%x x%x x%x\n", 207 "NPort x%x Data: x%x x%x x%x\n",
136 phba->brd_no, 208 phba->brd_no, vport->vpi,
137 *name, *(name+1), *(name+2), *(name+3), 209 *name, *(name+1), *(name+2), *(name+3),
138 *(name+4), *(name+5), *(name+6), *(name+7), 210 *(name+4), *(name+5), *(name+6), *(name+7),
139 ndlp->nlp_DID, ndlp->nlp_flag, 211 ndlp->nlp_DID, ndlp->nlp_flag,
140 ndlp->nlp_state, ndlp->nlp_rpi); 212 ndlp->nlp_state, ndlp->nlp_rpi);
141 } else { 213 } else {
142 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 214 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
143 "%d:0204 Devloss timeout on " 215 "%d (%d):0204 Devloss timeout on "
144 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
145 "NPort x%x Data: x%x x%x x%x\n", 217 "NPort x%x Data: x%x x%x x%x\n",
146 phba->brd_no, 218 phba->brd_no, vport->vpi,
147 *name, *(name+1), *(name+2), *(name+3), 219 *name, *(name+1), *(name+2), *(name+3),
148 *(name+4), *(name+5), *(name+6), *(name+7), 220 *(name+4), *(name+5), *(name+6), *(name+7),
149 ndlp->nlp_DID, ndlp->nlp_flag, 221 ndlp->nlp_DID, ndlp->nlp_flag,
150 ndlp->nlp_state, ndlp->nlp_rpi); 222 ndlp->nlp_state, ndlp->nlp_rpi);
151 } 223 }
152 224
153 if (!(phba->fc_flag & FC_UNLOADING) && 225 if (!(vport->load_flag & FC_UNLOADING) &&
154 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 226 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
155 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 227 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
156 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 228 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
157 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM); 229 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
158 else { 230 else {
231 int put_node;
232 int put_rport;
233
234 put_node = rdata->pnode != NULL;
235 put_rport = ndlp->rport != NULL;
159 rdata->pnode = NULL; 236 rdata->pnode = NULL;
160 ndlp->rport = NULL; 237 ndlp->rport = NULL;
161 lpfc_nlp_put(ndlp); 238 if (put_node)
162 put_device(&rport->dev); 239 lpfc_nlp_put(ndlp);
240 if (put_rport)
241 put_device(&rport->dev);
163 } 242 }
243}
244
164 245
246void
247lpfc_worker_wake_up(struct lpfc_hba *phba)
248{
249 wake_up(phba->work_wait);
165 return; 250 return;
166} 251}
167 252
168static void 253static void
169lpfc_work_list_done(struct lpfc_hba * phba) 254lpfc_work_list_done(struct lpfc_hba *phba)
170{ 255{
171 struct lpfc_work_evt *evtp = NULL; 256 struct lpfc_work_evt *evtp = NULL;
172 struct lpfc_nodelist *ndlp; 257 struct lpfc_nodelist *ndlp;
258 struct lpfc_vport *vport;
173 int free_evt; 259 int free_evt;
174 260
175 spin_lock_irq(phba->host->host_lock); 261 spin_lock_irq(&phba->hbalock);
176 while(!list_empty(&phba->work_list)) { 262 while (!list_empty(&phba->work_list)) {
177 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 263 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
178 evt_listp); 264 evt_listp);
179 spin_unlock_irq(phba->host->host_lock); 265 spin_unlock_irq(&phba->hbalock);
180 free_evt = 1; 266 free_evt = 1;
181 switch (evtp->evt) { 267 switch (evtp->evt) {
268 case LPFC_EVT_DEV_LOSS_DELAY:
269 free_evt = 0; /* evt is part of ndlp */
270 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
271 vport = ndlp->vport;
272 if (!vport)
273 break;
274
275 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
276 "rport devlossdly:did:x%x flg:x%x",
277 ndlp->nlp_DID, ndlp->nlp_flag, 0);
278
279 if (!(vport->load_flag & FC_UNLOADING) &&
280 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
281 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
282 lpfc_disc_state_machine(vport, ndlp, NULL,
283 NLP_EVT_DEVICE_RM);
284 }
285 break;
182 case LPFC_EVT_ELS_RETRY: 286 case LPFC_EVT_ELS_RETRY:
183 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 287 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
184 lpfc_els_retry_delay_handler(ndlp); 288 lpfc_els_retry_delay_handler(ndlp);
289 free_evt = 0; /* evt is part of ndlp */
290 break;
291 case LPFC_EVT_DEV_LOSS:
292 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
293 lpfc_nlp_get(ndlp);
294 lpfc_dev_loss_tmo_handler(ndlp);
185 free_evt = 0; 295 free_evt = 0;
296 complete((struct completion *)(evtp->evt_arg2));
297 lpfc_nlp_put(ndlp);
186 break; 298 break;
187 case LPFC_EVT_ONLINE: 299 case LPFC_EVT_ONLINE:
188 if (phba->hba_state < LPFC_LINK_DOWN) 300 if (phba->link_state < LPFC_LINK_DOWN)
189 *(int *)(evtp->evt_arg1) = lpfc_online(phba); 301 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
190 else 302 else
191 *(int *)(evtp->evt_arg1) = 0; 303 *(int *) (evtp->evt_arg1) = 0;
192 complete((struct completion *)(evtp->evt_arg2)); 304 complete((struct completion *)(evtp->evt_arg2));
193 break; 305 break;
194 case LPFC_EVT_OFFLINE_PREP: 306 case LPFC_EVT_OFFLINE_PREP:
195 if (phba->hba_state >= LPFC_LINK_DOWN) 307 if (phba->link_state >= LPFC_LINK_DOWN)
196 lpfc_offline_prep(phba); 308 lpfc_offline_prep(phba);
197 *(int *)(evtp->evt_arg1) = 0; 309 *(int *)(evtp->evt_arg1) = 0;
198 complete((struct completion *)(evtp->evt_arg2)); 310 complete((struct completion *)(evtp->evt_arg2));
@@ -218,33 +330,31 @@ lpfc_work_list_done(struct lpfc_hba * phba)
218 case LPFC_EVT_KILL: 330 case LPFC_EVT_KILL:
219 lpfc_offline(phba); 331 lpfc_offline(phba);
220 *(int *)(evtp->evt_arg1) 332 *(int *)(evtp->evt_arg1)
221 = (phba->stopped) ? 0 : lpfc_sli_brdkill(phba); 333 = (phba->pport->stopped)
334 ? 0 : lpfc_sli_brdkill(phba);
222 lpfc_unblock_mgmt_io(phba); 335 lpfc_unblock_mgmt_io(phba);
223 complete((struct completion *)(evtp->evt_arg2)); 336 complete((struct completion *)(evtp->evt_arg2));
224 break; 337 break;
225 } 338 }
226 if (free_evt) 339 if (free_evt)
227 kfree(evtp); 340 kfree(evtp);
228 spin_lock_irq(phba->host->host_lock); 341 spin_lock_irq(&phba->hbalock);
229 } 342 }
230 spin_unlock_irq(phba->host->host_lock); 343 spin_unlock_irq(&phba->hbalock);
231 344
232} 345}
233 346
234static void 347void
235lpfc_work_done(struct lpfc_hba * phba) 348lpfc_work_done(struct lpfc_hba *phba)
236{ 349{
237 struct lpfc_sli_ring *pring; 350 struct lpfc_sli_ring *pring;
238 int i; 351 uint32_t ha_copy, status, control, work_port_events;
239 uint32_t ha_copy; 352 struct lpfc_vport *vport;
240 uint32_t control;
241 uint32_t work_hba_events;
242 353
243 spin_lock_irq(phba->host->host_lock); 354 spin_lock_irq(&phba->hbalock);
244 ha_copy = phba->work_ha; 355 ha_copy = phba->work_ha;
245 phba->work_ha = 0; 356 phba->work_ha = 0;
246 work_hba_events=phba->work_hba_events; 357 spin_unlock_irq(&phba->hbalock);
247 spin_unlock_irq(phba->host->host_lock);
248 358
249 if (ha_copy & HA_ERATT) 359 if (ha_copy & HA_ERATT)
250 lpfc_handle_eratt(phba); 360 lpfc_handle_eratt(phba);
@@ -255,66 +365,111 @@ lpfc_work_done(struct lpfc_hba * phba)
255 if (ha_copy & HA_LATT) 365 if (ha_copy & HA_LATT)
256 lpfc_handle_latt(phba); 366 lpfc_handle_latt(phba);
257 367
258 if (work_hba_events & WORKER_DISC_TMO) 368 spin_lock_irq(&phba->hbalock);
259 lpfc_disc_timeout_handler(phba); 369 list_for_each_entry(vport, &phba->port_list, listentry) {
260 370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
261 if (work_hba_events & WORKER_ELS_TMO) 371
262 lpfc_els_timeout_handler(phba); 372 if (!scsi_host_get(shost)) {
263 373 continue;
264 if (work_hba_events & WORKER_MBOX_TMO) 374 }
265 lpfc_mbox_timeout_handler(phba); 375 spin_unlock_irq(&phba->hbalock);
266 376 work_port_events = vport->work_port_events;
267 if (work_hba_events & WORKER_FDMI_TMO) 377
268 lpfc_fdmi_tmo_handler(phba); 378 if (work_port_events & WORKER_DISC_TMO)
269 379 lpfc_disc_timeout_handler(vport);
270 spin_lock_irq(phba->host->host_lock); 380
271 phba->work_hba_events &= ~work_hba_events; 381 if (work_port_events & WORKER_ELS_TMO)
272 spin_unlock_irq(phba->host->host_lock); 382 lpfc_els_timeout_handler(vport);
273 383
274 for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) { 384 if (work_port_events & WORKER_HB_TMO)
275 pring = &phba->sli.ring[i]; 385 lpfc_hb_timeout_handler(phba);
276 if ((ha_copy & HA_RXATT) 386
277 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 387 if (work_port_events & WORKER_MBOX_TMO)
278 if (pring->flag & LPFC_STOP_IOCB_MASK) { 388 lpfc_mbox_timeout_handler(phba);
279 pring->flag |= LPFC_DEFERRED_RING_EVENT; 389
280 } else { 390 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
281 lpfc_sli_handle_slow_ring_event(phba, pring, 391 lpfc_unblock_fabric_iocbs(phba);
282 (ha_copy & 392
283 HA_RXMASK)); 393 if (work_port_events & WORKER_FDMI_TMO)
284 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 394 lpfc_fdmi_timeout_handler(vport);
285 } 395
286 /* 396 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
287 * Turn on Ring interrupts 397 lpfc_ramp_down_queue_handler(phba);
288 */ 398
289 spin_lock_irq(phba->host->host_lock); 399 if (work_port_events & WORKER_RAMP_UP_QUEUE)
290 control = readl(phba->HCregaddr); 400 lpfc_ramp_up_queue_handler(phba);
291 control |= (HC_R0INT_ENA << i); 401
402 spin_lock_irq(&vport->work_port_lock);
403 vport->work_port_events &= ~work_port_events;
404 spin_unlock_irq(&vport->work_port_lock);
405 scsi_host_put(shost);
406 spin_lock_irq(&phba->hbalock);
407 }
408 spin_unlock_irq(&phba->hbalock);
409
410 pring = &phba->sli.ring[LPFC_ELS_RING];
411 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
412 status >>= (4*LPFC_ELS_RING);
413 if ((status & HA_RXMASK)
414 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
415 if (pring->flag & LPFC_STOP_IOCB_MASK) {
416 pring->flag |= LPFC_DEFERRED_RING_EVENT;
417 } else {
418 lpfc_sli_handle_slow_ring_event(phba, pring,
419 (status &
420 HA_RXMASK));
421 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
422 }
423 /*
424 * Turn on Ring interrupts
425 */
426 spin_lock_irq(&phba->hbalock);
427 control = readl(phba->HCregaddr);
428 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
429 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
292 writel(control, phba->HCregaddr); 430 writel(control, phba->HCregaddr);
293 readl(phba->HCregaddr); /* flush */ 431 readl(phba->HCregaddr); /* flush */
294 spin_unlock_irq(phba->host->host_lock);
295 } 432 }
433 spin_unlock_irq(&phba->hbalock);
296 } 434 }
297 435 lpfc_work_list_done(phba);
298 lpfc_work_list_done (phba);
299
300} 436}
301 437
302static int 438static int
303check_work_wait_done(struct lpfc_hba *phba) { 439check_work_wait_done(struct lpfc_hba *phba)
440{
441 struct lpfc_vport *vport;
442 struct lpfc_sli_ring *pring;
443 int rc = 0;
444
445 spin_lock_irq(&phba->hbalock);
446 list_for_each_entry(vport, &phba->port_list, listentry) {
447 if (vport->work_port_events) {
448 rc = 1;
449 goto exit;
450 }
451 }
304 452
305 spin_lock_irq(phba->host->host_lock); 453 if (phba->work_ha || (!list_empty(&phba->work_list)) ||
306 if (phba->work_ha ||
307 phba->work_hba_events ||
308 (!list_empty(&phba->work_list)) ||
309 kthread_should_stop()) { 454 kthread_should_stop()) {
310 spin_unlock_irq(phba->host->host_lock); 455 rc = 1;
311 return 1; 456 goto exit;
312 } else {
313 spin_unlock_irq(phba->host->host_lock);
314 return 0;
315 } 457 }
458
459 pring = &phba->sli.ring[LPFC_ELS_RING];
460 if (pring->flag & LPFC_DEFERRED_RING_EVENT)
461 rc = 1;
462exit:
463 if (rc)
464 phba->work_found++;
465 else
466 phba->work_found = 0;
467
468 spin_unlock_irq(&phba->hbalock);
469 return rc;
316} 470}
317 471
472
318int 473int
319lpfc_do_work(void *p) 474lpfc_do_work(void *p)
320{ 475{
@@ -324,11 +479,13 @@ lpfc_do_work(void *p)
324 479
325 set_user_nice(current, -20); 480 set_user_nice(current, -20);
326 phba->work_wait = &work_waitq; 481 phba->work_wait = &work_waitq;
482 phba->work_found = 0;
327 483
328 while (1) { 484 while (1) {
329 485
330 rc = wait_event_interruptible(work_waitq, 486 rc = wait_event_interruptible(work_waitq,
331 check_work_wait_done(phba)); 487 check_work_wait_done(phba));
488
332 BUG_ON(rc); 489 BUG_ON(rc);
333 490
334 if (kthread_should_stop()) 491 if (kthread_should_stop())
@@ -336,6 +493,17 @@ lpfc_do_work(void *p)
336 493
337 lpfc_work_done(phba); 494 lpfc_work_done(phba);
338 495
496 /* If there is alot of slow ring work, like during link up
497 * check_work_wait_done() may cause this thread to not give
498 * up the CPU for very long periods of time. This may cause
499 * soft lockups or other problems. To avoid these situations
500 * give up the CPU here after LPFC_MAX_WORKER_ITERATION
501 * consecutive iterations.
502 */
503 if (phba->work_found >= LPFC_MAX_WORKER_ITERATION) {
504 phba->work_found = 0;
505 schedule();
506 }
339 } 507 }
340 phba->work_wait = NULL; 508 phba->work_wait = NULL;
341 return 0; 509 return 0;
@@ -347,16 +515,17 @@ lpfc_do_work(void *p)
347 * embedding it in the IOCB. 515 * embedding it in the IOCB.
348 */ 516 */
349int 517int
350lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, 518lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
351 uint32_t evt) 519 uint32_t evt)
352{ 520{
353 struct lpfc_work_evt *evtp; 521 struct lpfc_work_evt *evtp;
522 unsigned long flags;
354 523
355 /* 524 /*
356 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 525 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
357 * be queued to worker thread for processing 526 * be queued to worker thread for processing
358 */ 527 */
359 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL); 528 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
360 if (!evtp) 529 if (!evtp)
361 return 0; 530 return 0;
362 531
@@ -364,136 +533,210 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
364 evtp->evt_arg2 = arg2; 533 evtp->evt_arg2 = arg2;
365 evtp->evt = evt; 534 evtp->evt = evt;
366 535
367 spin_lock_irq(phba->host->host_lock); 536 spin_lock_irqsave(&phba->hbalock, flags);
368 list_add_tail(&evtp->evt_listp, &phba->work_list); 537 list_add_tail(&evtp->evt_listp, &phba->work_list);
369 if (phba->work_wait) 538 if (phba->work_wait)
370 wake_up(phba->work_wait); 539 lpfc_worker_wake_up(phba);
371 spin_unlock_irq(phba->host->host_lock); 540 spin_unlock_irqrestore(&phba->hbalock, flags);
372 541
373 return 1; 542 return 1;
374} 543}
375 544
376int 545void
377lpfc_linkdown(struct lpfc_hba *phba) 546lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
378{ 547{
379 struct lpfc_sli *psli; 548 struct lpfc_hba *phba = vport->phba;
380 struct lpfc_nodelist *ndlp, *next_ndlp; 549 struct lpfc_nodelist *ndlp, *next_ndlp;
381 LPFC_MBOXQ_t *mb; 550 int rc;
382 int rc;
383 551
384 psli = &phba->sli; 552 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
385 /* sysfs or selective reset may call this routine to clean up */ 553 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
386 if (phba->hba_state >= LPFC_LINK_DOWN) { 554 continue;
387 if (phba->hba_state == LPFC_LINK_DOWN)
388 return 0;
389 555
390 spin_lock_irq(phba->host->host_lock); 556 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN)
391 phba->hba_state = LPFC_LINK_DOWN; 557 lpfc_unreg_rpi(vport, ndlp);
392 spin_unlock_irq(phba->host->host_lock); 558
559 /* Leave Fabric nodes alone on link down */
560 if (!remove && ndlp->nlp_type & NLP_FABRIC)
561 continue;
562 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
563 remove
564 ? NLP_EVT_DEVICE_RM
565 : NLP_EVT_DEVICE_RECOVERY);
393 } 566 }
567 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
568 lpfc_mbx_unreg_vpi(vport);
569 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
570 }
571}
572
573static void
574lpfc_linkdown_port(struct lpfc_vport *vport)
575{
576 struct lpfc_nodelist *ndlp, *next_ndlp;
577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
394 578
395 fc_host_post_event(phba->host, fc_get_event_number(), 579 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
396 FCH_EVT_LINKDOWN, 0);
397 580
398 /* Clean up any firmware default rpi's */ 581 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
399 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 582 "Link Down: state:x%x rtry:x%x flg:x%x",
400 lpfc_unreg_did(phba, 0xffffffff, mb); 583 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
401 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
402 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
403 == MBX_NOT_FINISHED) {
404 mempool_free( mb, phba->mbox_mem_pool);
405 }
406 }
407 584
408 /* Cleanup any outstanding RSCN activity */ 585 /* Cleanup any outstanding RSCN activity */
409 lpfc_els_flush_rscn(phba); 586 lpfc_els_flush_rscn(vport);
410 587
411 /* Cleanup any outstanding ELS commands */ 588 /* Cleanup any outstanding ELS commands */
412 lpfc_els_flush_cmd(phba); 589 lpfc_els_flush_cmd(vport);
413 590
414 /* 591 lpfc_cleanup_rpis(vport, 0);
415 * Issue a LINK DOWN event to all nodes. 592
416 */ 593 /* free any ndlp's on unused list */
417 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) { 594 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
418 /* free any ndlp's on unused list */ 595 /* free any ndlp's in unused state */
419 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 596 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
420 lpfc_drop_node(phba, ndlp); 597 lpfc_drop_node(vport, ndlp);
421 else /* otherwise, force node recovery. */ 598
422 rc = lpfc_disc_state_machine(phba, ndlp, NULL, 599 /* Turn off discovery timer if its running */
423 NLP_EVT_DEVICE_RECOVERY); 600 lpfc_can_disctmo(vport);
601}
602
603int
604lpfc_linkdown(struct lpfc_hba *phba)
605{
606 struct lpfc_vport *vport = phba->pport;
607 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
608 struct lpfc_vport *port_iterator;
609 LPFC_MBOXQ_t *mb;
610
611 if (phba->link_state == LPFC_LINK_DOWN) {
612 return 0;
613 }
614 spin_lock_irq(&phba->hbalock);
615 if (phba->link_state > LPFC_LINK_DOWN) {
616 phba->link_state = LPFC_LINK_DOWN;
617 phba->pport->fc_flag &= ~FC_LBIT;
618 }
619 spin_unlock_irq(&phba->hbalock);
620
621 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
622
623 /* Issue a LINK DOWN event to all nodes */
624 lpfc_linkdown_port(port_iterator);
625 }
626
627 /* Clean up any firmware default rpi's */
628 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
629 if (mb) {
630 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb);
631 mb->vport = vport;
632 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
633 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
634 == MBX_NOT_FINISHED) {
635 mempool_free(mb, phba->mbox_mem_pool);
636 }
424 } 637 }
425 638
426 /* Setup myDID for link up if we are in pt2pt mode */ 639 /* Setup myDID for link up if we are in pt2pt mode */
427 if (phba->fc_flag & FC_PT2PT) { 640 if (phba->pport->fc_flag & FC_PT2PT) {
428 phba->fc_myDID = 0; 641 phba->pport->fc_myDID = 0;
429 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 642 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (mb) {
430 lpfc_config_link(phba, mb); 644 lpfc_config_link(phba, mb);
431 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 645 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
432 if (lpfc_sli_issue_mbox 646 mb->vport = vport;
433 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB)) 647 if (lpfc_sli_issue_mbox(phba, mb,
648 (MBX_NOWAIT | MBX_STOP_IOCB))
434 == MBX_NOT_FINISHED) { 649 == MBX_NOT_FINISHED) {
435 mempool_free( mb, phba->mbox_mem_pool); 650 mempool_free(mb, phba->mbox_mem_pool);
436 } 651 }
437 } 652 }
438 spin_lock_irq(phba->host->host_lock); 653 spin_lock_irq(shost->host_lock);
439 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 654 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
440 spin_unlock_irq(phba->host->host_lock); 655 spin_unlock_irq(shost->host_lock);
441 } 656 }
442 spin_lock_irq(phba->host->host_lock);
443 phba->fc_flag &= ~FC_LBIT;
444 spin_unlock_irq(phba->host->host_lock);
445
446 /* Turn off discovery timer if its running */
447 lpfc_can_disctmo(phba);
448 657
449 /* Must process IOCBs on all rings to handle ABORTed I/Os */
450 return 0; 658 return 0;
451} 659}
452 660
453static int 661static void
454lpfc_linkup(struct lpfc_hba *phba) 662lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
455{ 663{
456 struct lpfc_nodelist *ndlp, *next_ndlp; 664 struct lpfc_nodelist *ndlp;
457 665
458 fc_host_post_event(phba->host, fc_get_event_number(), 666 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
459 FCH_EVT_LINKUP, 0); 667 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
460 668 continue;
461 spin_lock_irq(phba->host->host_lock); 669
462 phba->hba_state = LPFC_LINK_UP; 670 if (ndlp->nlp_type & NLP_FABRIC) {
463 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 671 /* On Linkup its safe to clean up the ndlp
464 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 672 * from Fabric connections.
465 phba->fc_flag |= FC_NDISC_ACTIVE; 673 */
466 phba->fc_ns_retry = 0; 674 if (ndlp->nlp_DID != Fabric_DID)
467 spin_unlock_irq(phba->host->host_lock); 675 lpfc_unreg_rpi(vport, ndlp);
468 676 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
469 677 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
470 if (phba->fc_flag & FC_LBIT) { 678 /* Fail outstanding IO now since device is
471 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 679 * marked for PLOGI.
472 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) { 680 */
473 if (ndlp->nlp_type & NLP_FABRIC) { 681 lpfc_unreg_rpi(vport, ndlp);
474 /*
475 * On Linkup its safe to clean up the
476 * ndlp from Fabric connections.
477 */
478 lpfc_nlp_set_state(phba, ndlp,
479 NLP_STE_UNUSED_NODE);
480 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
481 /*
482 * Fail outstanding IO now since
483 * device is marked for PLOGI.
484 */
485 lpfc_unreg_rpi(phba, ndlp);
486 }
487 }
488 } 682 }
489 } 683 }
684}
490 685
491 /* free any ndlp's on unused list */ 686static void
492 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 687lpfc_linkup_port(struct lpfc_vport *vport)
493 nlp_listp) { 688{
689 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
690 struct lpfc_nodelist *ndlp, *next_ndlp;
691 struct lpfc_hba *phba = vport->phba;
692
693 if ((vport->load_flag & FC_UNLOADING) != 0)
694 return;
695
696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
697 "Link Up: top:x%x speed:x%x flg:x%x",
698 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
699
700 /* If NPIV is not enabled, only bring the physical port up */
701 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
702 (vport != phba->pport))
703 return;
704
705 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
706
707 spin_lock_irq(shost->host_lock);
708 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
709 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
710 vport->fc_flag |= FC_NDISC_ACTIVE;
711 vport->fc_ns_retry = 0;
712 spin_unlock_irq(shost->host_lock);
713
714 if (vport->fc_flag & FC_LBIT)
715 lpfc_linkup_cleanup_nodes(vport);
716
717 /* free any ndlp's in unused state */
718 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
719 nlp_listp)
494 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 720 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
495 lpfc_drop_node(phba, ndlp); 721 lpfc_drop_node(vport, ndlp);
722}
723
724static int
725lpfc_linkup(struct lpfc_hba *phba)
726{
727 struct lpfc_vport *vport;
728
729 phba->link_state = LPFC_LINK_UP;
730
731 /* Unblock fabric iocbs if they are blocked */
732 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
733 del_timer_sync(&phba->fabric_block_timer);
734
735 list_for_each_entry(vport, &phba->port_list, listentry) {
736 lpfc_linkup_port(vport);
496 } 737 }
738 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
739 lpfc_issue_clear_la(phba, phba->pport);
497 740
498 return 0; 741 return 0;
499} 742}
@@ -505,14 +748,14 @@ lpfc_linkup(struct lpfc_hba *phba)
505 * handed off to the SLI layer. 748 * handed off to the SLI layer.
506 */ 749 */
507void 750void
508lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 751lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
509{ 752{
510 struct lpfc_sli *psli; 753 struct lpfc_vport *vport = pmb->vport;
511 MAILBOX_t *mb; 754 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
755 struct lpfc_sli *psli = &phba->sli;
756 MAILBOX_t *mb = &pmb->mb;
512 uint32_t control; 757 uint32_t control;
513 758
514 psli = &phba->sli;
515 mb = &pmb->mb;
516 /* Since we don't do discovery right now, turn these off here */ 759 /* Since we don't do discovery right now, turn these off here */
517 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 760 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
518 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 761 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
@@ -522,69 +765,74 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
522 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 765 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
523 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 766 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
524 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 767 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
525 "%d:0320 CLEAR_LA mbxStatus error x%x hba " 768 "%d (%d):0320 CLEAR_LA mbxStatus error x%x hba "
526 "state x%x\n", 769 "state x%x\n",
527 phba->brd_no, mb->mbxStatus, phba->hba_state); 770 phba->brd_no, vport->vpi, mb->mbxStatus,
771 vport->port_state);
528 772
529 phba->hba_state = LPFC_HBA_ERROR; 773 phba->link_state = LPFC_HBA_ERROR;
530 goto out; 774 goto out;
531 } 775 }
532 776
533 if (phba->fc_flag & FC_ABORT_DISCOVERY) 777 if (vport->port_type == LPFC_PHYSICAL_PORT)
534 goto out; 778 phba->link_state = LPFC_HBA_READY;
535 779
536 phba->num_disc_nodes = 0; 780 spin_lock_irq(&phba->hbalock);
537 /* go thru NPR list and issue ELS PLOGIs */ 781 psli->sli_flag |= LPFC_PROCESS_LA;
538 if (phba->fc_npr_cnt) { 782 control = readl(phba->HCregaddr);
539 lpfc_els_disc_plogi(phba); 783 control |= HC_LAINT_ENA;
540 } 784 writel(control, phba->HCregaddr);
785 readl(phba->HCregaddr); /* flush */
786 spin_unlock_irq(&phba->hbalock);
787 return;
788
789 vport->num_disc_nodes = 0;
790 /* go thru NPR nodes and issue ELS PLOGIs */
791 if (vport->fc_npr_cnt)
792 lpfc_els_disc_plogi(vport);
541 793
542 if (!phba->num_disc_nodes) { 794 if (!vport->num_disc_nodes) {
543 spin_lock_irq(phba->host->host_lock); 795 spin_lock_irq(shost->host_lock);
544 phba->fc_flag &= ~FC_NDISC_ACTIVE; 796 vport->fc_flag &= ~FC_NDISC_ACTIVE;
545 spin_unlock_irq(phba->host->host_lock); 797 spin_unlock_irq(shost->host_lock);
546 } 798 }
547 799
548 phba->hba_state = LPFC_HBA_READY; 800 vport->port_state = LPFC_VPORT_READY;
549 801
550out: 802out:
551 /* Device Discovery completes */ 803 /* Device Discovery completes */
552 lpfc_printf_log(phba, 804 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
553 KERN_INFO, 805 "%d (%d):0225 Device Discovery completes\n",
554 LOG_DISCOVERY, 806 phba->brd_no, vport->vpi);
555 "%d:0225 Device Discovery completes\n",
556 phba->brd_no);
557 807
558 mempool_free( pmb, phba->mbox_mem_pool); 808 mempool_free(pmb, phba->mbox_mem_pool);
559 809
560 spin_lock_irq(phba->host->host_lock); 810 spin_lock_irq(shost->host_lock);
561 phba->fc_flag &= ~FC_ABORT_DISCOVERY; 811 vport->fc_flag &= ~(FC_ABORT_DISCOVERY | FC_ESTABLISH_LINK);
562 if (phba->fc_flag & FC_ESTABLISH_LINK) { 812 spin_unlock_irq(shost->host_lock);
563 phba->fc_flag &= ~FC_ESTABLISH_LINK;
564 }
565 spin_unlock_irq(phba->host->host_lock);
566 813
567 del_timer_sync(&phba->fc_estabtmo); 814 del_timer_sync(&phba->fc_estabtmo);
568 815
569 lpfc_can_disctmo(phba); 816 lpfc_can_disctmo(vport);
570 817
571 /* turn on Link Attention interrupts */ 818 /* turn on Link Attention interrupts */
572 spin_lock_irq(phba->host->host_lock); 819
820 spin_lock_irq(&phba->hbalock);
573 psli->sli_flag |= LPFC_PROCESS_LA; 821 psli->sli_flag |= LPFC_PROCESS_LA;
574 control = readl(phba->HCregaddr); 822 control = readl(phba->HCregaddr);
575 control |= HC_LAINT_ENA; 823 control |= HC_LAINT_ENA;
576 writel(control, phba->HCregaddr); 824 writel(control, phba->HCregaddr);
577 readl(phba->HCregaddr); /* flush */ 825 readl(phba->HCregaddr); /* flush */
578 spin_unlock_irq(phba->host->host_lock); 826 spin_unlock_irq(&phba->hbalock);
579 827
580 return; 828 return;
581} 829}
582 830
831
583static void 832static void
584lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 833lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
585{ 834{
586 struct lpfc_sli *psli = &phba->sli; 835 struct lpfc_vport *vport = pmb->vport;
587 int rc;
588 836
589 if (pmb->mb.mbxStatus) 837 if (pmb->mb.mbxStatus)
590 goto out; 838 goto out;
@@ -592,154 +840,139 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
592 mempool_free(pmb, phba->mbox_mem_pool); 840 mempool_free(pmb, phba->mbox_mem_pool);
593 841
594 if (phba->fc_topology == TOPOLOGY_LOOP && 842 if (phba->fc_topology == TOPOLOGY_LOOP &&
595 phba->fc_flag & FC_PUBLIC_LOOP && 843 vport->fc_flag & FC_PUBLIC_LOOP &&
596 !(phba->fc_flag & FC_LBIT)) { 844 !(vport->fc_flag & FC_LBIT)) {
597 /* Need to wait for FAN - use discovery timer 845 /* Need to wait for FAN - use discovery timer
598 * for timeout. hba_state is identically 846 * for timeout. port_state is identically
599 * LPFC_LOCAL_CFG_LINK while waiting for FAN 847 * LPFC_LOCAL_CFG_LINK while waiting for FAN
600 */ 848 */
601 lpfc_set_disctmo(phba); 849 lpfc_set_disctmo(vport);
602 return; 850 return;
603 } 851 }
604 852
605 /* Start discovery by sending a FLOGI. hba_state is identically 853 /* Start discovery by sending a FLOGI. port_state is identically
606 * LPFC_FLOGI while waiting for FLOGI cmpl 854 * LPFC_FLOGI while waiting for FLOGI cmpl
607 */ 855 */
608 phba->hba_state = LPFC_FLOGI; 856 if (vport->port_state != LPFC_FLOGI) {
609 lpfc_set_disctmo(phba); 857 vport->port_state = LPFC_FLOGI;
610 lpfc_initial_flogi(phba); 858 lpfc_set_disctmo(vport);
859 lpfc_initial_flogi(vport);
860 }
611 return; 861 return;
612 862
613out: 863out:
614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 864 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
615 "%d:0306 CONFIG_LINK mbxStatus error x%x " 865 "%d (%d):0306 CONFIG_LINK mbxStatus error x%x "
616 "HBA state x%x\n", 866 "HBA state x%x\n",
617 phba->brd_no, pmb->mb.mbxStatus, phba->hba_state); 867 phba->brd_no, vport->vpi, pmb->mb.mbxStatus,
868 vport->port_state);
618 869
619 lpfc_linkdown(phba); 870 mempool_free(pmb, phba->mbox_mem_pool);
620 871
621 phba->hba_state = LPFC_HBA_ERROR; 872 lpfc_linkdown(phba);
622 873
623 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 874 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
624 "%d:0200 CONFIG_LINK bad hba state x%x\n", 875 "%d (%d):0200 CONFIG_LINK bad hba state x%x\n",
625 phba->brd_no, phba->hba_state); 876 phba->brd_no, vport->vpi, vport->port_state);
626 877
627 lpfc_clear_la(phba, pmb); 878 lpfc_issue_clear_la(phba, vport);
628 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
629 rc = lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
630 if (rc == MBX_NOT_FINISHED) {
631 mempool_free(pmb, phba->mbox_mem_pool);
632 lpfc_disc_flush_list(phba);
633 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
634 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
635 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
636 phba->hba_state = LPFC_HBA_READY;
637 }
638 return; 879 return;
639} 880}
640 881
641static void 882static void
642lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 883lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
643{ 884{
644 struct lpfc_sli *psli = &phba->sli;
645 MAILBOX_t *mb = &pmb->mb; 885 MAILBOX_t *mb = &pmb->mb;
646 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 886 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
887 struct lpfc_vport *vport = pmb->vport;
647 888
648 889
649 /* Check for error */ 890 /* Check for error */
650 if (mb->mbxStatus) { 891 if (mb->mbxStatus) {
651 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 892 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
652 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
653 "%d:0319 READ_SPARAM mbxStatus error x%x " 894 "%d (%d):0319 READ_SPARAM mbxStatus error x%x "
654 "hba state x%x>\n", 895 "hba state x%x>\n",
655 phba->brd_no, mb->mbxStatus, phba->hba_state); 896 phba->brd_no, vport->vpi, mb->mbxStatus,
897 vport->port_state);
656 898
657 lpfc_linkdown(phba); 899 lpfc_linkdown(phba);
658 phba->hba_state = LPFC_HBA_ERROR;
659 goto out; 900 goto out;
660 } 901 }
661 902
662 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt, 903 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
663 sizeof (struct serv_parm)); 904 sizeof (struct serv_parm));
664 if (phba->cfg_soft_wwnn) 905 if (phba->cfg_soft_wwnn)
665 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); 906 u64_to_wwn(phba->cfg_soft_wwnn,
907 vport->fc_sparam.nodeName.u.wwn);
666 if (phba->cfg_soft_wwpn) 908 if (phba->cfg_soft_wwpn)
667 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 909 u64_to_wwn(phba->cfg_soft_wwpn,
668 memcpy((uint8_t *) & phba->fc_nodename, 910 vport->fc_sparam.portName.u.wwn);
669 (uint8_t *) & phba->fc_sparam.nodeName, 911 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
670 sizeof (struct lpfc_name)); 912 sizeof(vport->fc_nodename));
671 memcpy((uint8_t *) & phba->fc_portname, 913 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
672 (uint8_t *) & phba->fc_sparam.portName, 914 sizeof(vport->fc_portname));
673 sizeof (struct lpfc_name)); 915 if (vport->port_type == LPFC_PHYSICAL_PORT) {
916 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
917 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
918 }
919
674 lpfc_mbuf_free(phba, mp->virt, mp->phys); 920 lpfc_mbuf_free(phba, mp->virt, mp->phys);
675 kfree(mp); 921 kfree(mp);
676 mempool_free( pmb, phba->mbox_mem_pool); 922 mempool_free(pmb, phba->mbox_mem_pool);
677 return; 923 return;
678 924
679out: 925out:
680 pmb->context1 = NULL; 926 pmb->context1 = NULL;
681 lpfc_mbuf_free(phba, mp->virt, mp->phys); 927 lpfc_mbuf_free(phba, mp->virt, mp->phys);
682 kfree(mp); 928 kfree(mp);
683 if (phba->hba_state != LPFC_CLEAR_LA) { 929 lpfc_issue_clear_la(phba, vport);
684 lpfc_clear_la(phba, pmb); 930 mempool_free(pmb, phba->mbox_mem_pool);
685 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
686 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
687 == MBX_NOT_FINISHED) {
688 mempool_free( pmb, phba->mbox_mem_pool);
689 lpfc_disc_flush_list(phba);
690 psli->ring[(psli->extra_ring)].flag &=
691 ~LPFC_STOP_IOCB_EVENT;
692 psli->ring[(psli->fcp_ring)].flag &=
693 ~LPFC_STOP_IOCB_EVENT;
694 psli->ring[(psli->next_ring)].flag &=
695 ~LPFC_STOP_IOCB_EVENT;
696 phba->hba_state = LPFC_HBA_READY;
697 }
698 } else {
699 mempool_free( pmb, phba->mbox_mem_pool);
700 }
701 return; 931 return;
702} 932}
703 933
704static void 934static void
705lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 935lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
706{ 936{
707 int i; 937 struct lpfc_vport *vport = phba->pport;
708 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; 938 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
939 int i;
709 struct lpfc_dmabuf *mp; 940 struct lpfc_dmabuf *mp;
710 int rc; 941 int rc;
711 942
712 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 943 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
713 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 944 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
714 945
715 spin_lock_irq(phba->host->host_lock); 946 spin_lock_irq(&phba->hbalock);
716 switch (la->UlnkSpeed) { 947 switch (la->UlnkSpeed) {
717 case LA_1GHZ_LINK: 948 case LA_1GHZ_LINK:
718 phba->fc_linkspeed = LA_1GHZ_LINK; 949 phba->fc_linkspeed = LA_1GHZ_LINK;
719 break; 950 break;
720 case LA_2GHZ_LINK: 951 case LA_2GHZ_LINK:
721 phba->fc_linkspeed = LA_2GHZ_LINK; 952 phba->fc_linkspeed = LA_2GHZ_LINK;
722 break; 953 break;
723 case LA_4GHZ_LINK: 954 case LA_4GHZ_LINK:
724 phba->fc_linkspeed = LA_4GHZ_LINK; 955 phba->fc_linkspeed = LA_4GHZ_LINK;
725 break; 956 break;
726 case LA_8GHZ_LINK: 957 case LA_8GHZ_LINK:
727 phba->fc_linkspeed = LA_8GHZ_LINK; 958 phba->fc_linkspeed = LA_8GHZ_LINK;
728 break; 959 break;
729 default: 960 default:
730 phba->fc_linkspeed = LA_UNKNW_LINK; 961 phba->fc_linkspeed = LA_UNKNW_LINK;
731 break; 962 break;
732 } 963 }
733 964
734 phba->fc_topology = la->topology; 965 phba->fc_topology = la->topology;
966 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
735 967
736 if (phba->fc_topology == TOPOLOGY_LOOP) { 968 if (phba->fc_topology == TOPOLOGY_LOOP) {
737 /* Get Loop Map information */ 969 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
738 970
971 /* Get Loop Map information */
739 if (la->il) 972 if (la->il)
740 phba->fc_flag |= FC_LBIT; 973 vport->fc_flag |= FC_LBIT;
741 974
742 phba->fc_myDID = la->granted_AL_PA; 975 vport->fc_myDID = la->granted_AL_PA;
743 i = la->un.lilpBde64.tus.f.bdeSize; 976 i = la->un.lilpBde64.tus.f.bdeSize;
744 977
745 if (i == 0) { 978 if (i == 0) {
@@ -769,29 +1002,35 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
769 } 1002 }
770 /* Link Up Event ALPA map */ 1003 /* Link Up Event ALPA map */
771 lpfc_printf_log(phba, 1004 lpfc_printf_log(phba,
772 KERN_WARNING, 1005 KERN_WARNING,
773 LOG_LINK_EVENT, 1006 LOG_LINK_EVENT,
774 "%d:1304 Link Up Event " 1007 "%d:1304 Link Up Event "
775 "ALPA map Data: x%x " 1008 "ALPA map Data: x%x "
776 "x%x x%x x%x\n", 1009 "x%x x%x x%x\n",
777 phba->brd_no, 1010 phba->brd_no,
778 un.pa.wd1, un.pa.wd2, 1011 un.pa.wd1, un.pa.wd2,
779 un.pa.wd3, un.pa.wd4); 1012 un.pa.wd3, un.pa.wd4);
780 } 1013 }
781 } 1014 }
782 } 1015 }
783 } else { 1016 } else {
784 phba->fc_myDID = phba->fc_pref_DID; 1017 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
785 phba->fc_flag |= FC_LBIT; 1018 if (phba->max_vpi && phba->cfg_npiv_enable &&
1019 (phba->sli_rev == 3))
1020 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
1021 }
1022 vport->fc_myDID = phba->fc_pref_DID;
1023 vport->fc_flag |= FC_LBIT;
786 } 1024 }
787 spin_unlock_irq(phba->host->host_lock); 1025 spin_unlock_irq(&phba->hbalock);
788 1026
789 lpfc_linkup(phba); 1027 lpfc_linkup(phba);
790 if (sparam_mbox) { 1028 if (sparam_mbox) {
791 lpfc_read_sparam(phba, sparam_mbox); 1029 lpfc_read_sparam(phba, sparam_mbox, 0);
1030 sparam_mbox->vport = vport;
792 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1031 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
793 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, 1032 rc = lpfc_sli_issue_mbox(phba, sparam_mbox,
794 (MBX_NOWAIT | MBX_STOP_IOCB)); 1033 (MBX_NOWAIT | MBX_STOP_IOCB));
795 if (rc == MBX_NOT_FINISHED) { 1034 if (rc == MBX_NOT_FINISHED) {
796 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 1035 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
797 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -799,36 +1038,48 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
799 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1038 mempool_free(sparam_mbox, phba->mbox_mem_pool);
800 if (cfglink_mbox) 1039 if (cfglink_mbox)
801 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1040 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
802 return; 1041 goto out;
803 } 1042 }
804 } 1043 }
805 1044
806 if (cfglink_mbox) { 1045 if (cfglink_mbox) {
807 phba->hba_state = LPFC_LOCAL_CFG_LINK; 1046 vport->port_state = LPFC_LOCAL_CFG_LINK;
808 lpfc_config_link(phba, cfglink_mbox); 1047 lpfc_config_link(phba, cfglink_mbox);
1048 cfglink_mbox->vport = vport;
809 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1049 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
810 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, 1050 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox,
811 (MBX_NOWAIT | MBX_STOP_IOCB)); 1051 (MBX_NOWAIT | MBX_STOP_IOCB));
812 if (rc == MBX_NOT_FINISHED) 1052 if (rc != MBX_NOT_FINISHED)
813 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1053 return;
1054 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
814 } 1055 }
1056out:
1057 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1059 "%d (%d):0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
1060 phba->brd_no, vport->vpi,
1061 vport->port_state, sparam_mbox, cfglink_mbox);
1062
1063 lpfc_issue_clear_la(phba, vport);
1064 return;
815} 1065}
816 1066
817static void 1067static void
818lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { 1068lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
1069{
819 uint32_t control; 1070 uint32_t control;
820 struct lpfc_sli *psli = &phba->sli; 1071 struct lpfc_sli *psli = &phba->sli;
821 1072
822 lpfc_linkdown(phba); 1073 lpfc_linkdown(phba);
823 1074
824 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1075 /* turn on Link Attention interrupts - no CLEAR_LA needed */
825 spin_lock_irq(phba->host->host_lock); 1076 spin_lock_irq(&phba->hbalock);
826 psli->sli_flag |= LPFC_PROCESS_LA; 1077 psli->sli_flag |= LPFC_PROCESS_LA;
827 control = readl(phba->HCregaddr); 1078 control = readl(phba->HCregaddr);
828 control |= HC_LAINT_ENA; 1079 control |= HC_LAINT_ENA;
829 writel(control, phba->HCregaddr); 1080 writel(control, phba->HCregaddr);
830 readl(phba->HCregaddr); /* flush */ 1081 readl(phba->HCregaddr); /* flush */
831 spin_unlock_irq(phba->host->host_lock); 1082 spin_unlock_irq(&phba->hbalock);
832} 1083}
833 1084
834/* 1085/*
@@ -838,22 +1089,21 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
838 * handed off to the SLI layer. 1089 * handed off to the SLI layer.
839 */ 1090 */
840void 1091void
841lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1092lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
842{ 1093{
1094 struct lpfc_vport *vport = pmb->vport;
1095 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
843 READ_LA_VAR *la; 1096 READ_LA_VAR *la;
844 MAILBOX_t *mb = &pmb->mb; 1097 MAILBOX_t *mb = &pmb->mb;
845 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1098 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
846 1099
847 /* Check for error */ 1100 /* Check for error */
848 if (mb->mbxStatus) { 1101 if (mb->mbxStatus) {
849 lpfc_printf_log(phba, 1102 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
850 KERN_INFO,
851 LOG_LINK_EVENT,
852 "%d:1307 READ_LA mbox error x%x state x%x\n", 1103 "%d:1307 READ_LA mbox error x%x state x%x\n",
853 phba->brd_no, 1104 phba->brd_no, mb->mbxStatus, vport->port_state);
854 mb->mbxStatus, phba->hba_state);
855 lpfc_mbx_issue_link_down(phba); 1105 lpfc_mbx_issue_link_down(phba);
856 phba->hba_state = LPFC_HBA_ERROR; 1106 phba->link_state = LPFC_HBA_ERROR;
857 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1107 goto lpfc_mbx_cmpl_read_la_free_mbuf;
858 } 1108 }
859 1109
@@ -861,27 +1111,26 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
861 1111
862 memcpy(&phba->alpa_map[0], mp->virt, 128); 1112 memcpy(&phba->alpa_map[0], mp->virt, 128);
863 1113
864 spin_lock_irq(phba->host->host_lock); 1114 spin_lock_irq(shost->host_lock);
865 if (la->pb) 1115 if (la->pb)
866 phba->fc_flag |= FC_BYPASSED_MODE; 1116 vport->fc_flag |= FC_BYPASSED_MODE;
867 else 1117 else
868 phba->fc_flag &= ~FC_BYPASSED_MODE; 1118 vport->fc_flag &= ~FC_BYPASSED_MODE;
869 spin_unlock_irq(phba->host->host_lock); 1119 spin_unlock_irq(shost->host_lock);
870 1120
871 if (((phba->fc_eventTag + 1) < la->eventTag) || 1121 if (((phba->fc_eventTag + 1) < la->eventTag) ||
872 (phba->fc_eventTag == la->eventTag)) { 1122 (phba->fc_eventTag == la->eventTag)) {
873 phba->fc_stat.LinkMultiEvent++; 1123 phba->fc_stat.LinkMultiEvent++;
874 if (la->attType == AT_LINK_UP) { 1124 if (la->attType == AT_LINK_UP)
875 if (phba->fc_eventTag != 0) 1125 if (phba->fc_eventTag != 0)
876 lpfc_linkdown(phba); 1126 lpfc_linkdown(phba);
877 }
878 } 1127 }
879 1128
880 phba->fc_eventTag = la->eventTag; 1129 phba->fc_eventTag = la->eventTag;
881 1130
882 if (la->attType == AT_LINK_UP) { 1131 if (la->attType == AT_LINK_UP) {
883 phba->fc_stat.LinkUp++; 1132 phba->fc_stat.LinkUp++;
884 if (phba->fc_flag & FC_LOOPBACK_MODE) { 1133 if (phba->link_flag & LS_LOOPBACK_MODE) {
885 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1134 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
886 "%d:1306 Link Up Event in loop back mode " 1135 "%d:1306 Link Up Event in loop back mode "
887 "x%x received Data: x%x x%x x%x x%x\n", 1136 "x%x received Data: x%x x%x x%x x%x\n",
@@ -903,7 +1152,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
903 "%d:1305 Link Down Event x%x received " 1152 "%d:1305 Link Down Event x%x received "
904 "Data: x%x x%x x%x\n", 1153 "Data: x%x x%x x%x\n",
905 phba->brd_no, la->eventTag, phba->fc_eventTag, 1154 phba->brd_no, la->eventTag, phba->fc_eventTag,
906 phba->hba_state, phba->fc_flag); 1155 phba->pport->port_state, vport->fc_flag);
907 lpfc_mbx_issue_link_down(phba); 1156 lpfc_mbx_issue_link_down(phba);
908 } 1157 }
909 1158
@@ -921,31 +1170,115 @@ lpfc_mbx_cmpl_read_la_free_mbuf:
921 * handed off to the SLI layer. 1170 * handed off to the SLI layer.
922 */ 1171 */
923void 1172void
924lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1173lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
925{ 1174{
926 struct lpfc_sli *psli; 1175 struct lpfc_vport *vport = pmb->vport;
927 MAILBOX_t *mb; 1176 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
928 struct lpfc_dmabuf *mp; 1177 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
929 struct lpfc_nodelist *ndlp;
930
931 psli = &phba->sli;
932 mb = &pmb->mb;
933
934 ndlp = (struct lpfc_nodelist *) pmb->context2;
935 mp = (struct lpfc_dmabuf *) (pmb->context1);
936 1178
937 pmb->context1 = NULL; 1179 pmb->context1 = NULL;
938 1180
939 /* Good status, call state machine */ 1181 /* Good status, call state machine */
940 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 1182 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
941 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1183 lpfc_mbuf_free(phba, mp->virt, mp->phys);
942 kfree(mp); 1184 kfree(mp);
943 mempool_free( pmb, phba->mbox_mem_pool); 1185 mempool_free(pmb, phba->mbox_mem_pool);
944 lpfc_nlp_put(ndlp); 1186 lpfc_nlp_put(ndlp);
945 1187
946 return; 1188 return;
947} 1189}
948 1190
1191static void
1192lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1193{
1194 MAILBOX_t *mb = &pmb->mb;
1195 struct lpfc_vport *vport = pmb->vport;
1196 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1197
1198 switch (mb->mbxStatus) {
1199 case 0x0011:
1200 case 0x0020:
1201 case 0x9700:
1202 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1203 "%d (%d):0911 cmpl_unreg_vpi, "
1204 "mb status = 0x%x\n",
1205 phba->brd_no, vport->vpi, mb->mbxStatus);
1206 break;
1207 }
1208 vport->unreg_vpi_cmpl = VPORT_OK;
1209 mempool_free(pmb, phba->mbox_mem_pool);
1210 /*
1211 * This shost reference might have been taken at the beginning of
1212 * lpfc_vport_delete()
1213 */
1214 if (vport->load_flag & FC_UNLOADING)
1215 scsi_host_put(shost);
1216}
1217
1218void
1219lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
1220{
1221 struct lpfc_hba *phba = vport->phba;
1222 LPFC_MBOXQ_t *mbox;
1223 int rc;
1224
1225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1226 if (!mbox)
1227 return;
1228
1229 lpfc_unreg_vpi(phba, vport->vpi, mbox);
1230 mbox->vport = vport;
1231 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
1232 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1233 if (rc == MBX_NOT_FINISHED) {
1234 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1235 "%d (%d):1800 Could not issue unreg_vpi\n",
1236 phba->brd_no, vport->vpi);
1237 mempool_free(mbox, phba->mbox_mem_pool);
1238 vport->unreg_vpi_cmpl = VPORT_ERROR;
1239 }
1240}
1241
1242static void
1243lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1244{
1245 struct lpfc_vport *vport = pmb->vport;
1246 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1247 MAILBOX_t *mb = &pmb->mb;
1248
1249 switch (mb->mbxStatus) {
1250 case 0x0011:
1251 case 0x9601:
1252 case 0x9602:
1253 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1254 "%d (%d):0912 cmpl_reg_vpi, mb status = 0x%x\n",
1255 phba->brd_no, vport->vpi, mb->mbxStatus);
1256 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1257 spin_lock_irq(shost->host_lock);
1258 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1259 spin_unlock_irq(shost->host_lock);
1260 vport->fc_myDID = 0;
1261 goto out;
1262 }
1263
1264 vport->num_disc_nodes = 0;
1265 /* go thru NPR list and issue ELS PLOGIs */
1266 if (vport->fc_npr_cnt)
1267 lpfc_els_disc_plogi(vport);
1268
1269 if (!vport->num_disc_nodes) {
1270 spin_lock_irq(shost->host_lock);
1271 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1272 spin_unlock_irq(shost->host_lock);
1273 lpfc_can_disctmo(vport);
1274 }
1275 vport->port_state = LPFC_VPORT_READY;
1276
1277out:
1278 mempool_free(pmb, phba->mbox_mem_pool);
1279 return;
1280}
1281
949/* 1282/*
950 * This routine handles processing a Fabric REG_LOGIN mailbox 1283 * This routine handles processing a Fabric REG_LOGIN mailbox
951 * command upon completion. It is setup in the LPFC_MBOXQ 1284 * command upon completion. It is setup in the LPFC_MBOXQ
@@ -953,20 +1286,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
953 * handed off to the SLI layer. 1286 * handed off to the SLI layer.
954 */ 1287 */
955void 1288void
956lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1289lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
957{ 1290{
958 struct lpfc_sli *psli; 1291 struct lpfc_vport *vport = pmb->vport;
959 MAILBOX_t *mb; 1292 struct lpfc_vport *next_vport;
960 struct lpfc_dmabuf *mp; 1293 MAILBOX_t *mb = &pmb->mb;
1294 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
961 struct lpfc_nodelist *ndlp; 1295 struct lpfc_nodelist *ndlp;
962 struct lpfc_nodelist *ndlp_fdmi;
963
964
965 psli = &phba->sli;
966 mb = &pmb->mb;
967
968 ndlp = (struct lpfc_nodelist *) pmb->context2; 1296 ndlp = (struct lpfc_nodelist *) pmb->context2;
969 mp = (struct lpfc_dmabuf *) (pmb->context1);
970 1297
971 pmb->context1 = NULL; 1298 pmb->context1 = NULL;
972 pmb->context2 = NULL; 1299 pmb->context2 = NULL;
@@ -977,60 +1304,46 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
977 mempool_free(pmb, phba->mbox_mem_pool); 1304 mempool_free(pmb, phba->mbox_mem_pool);
978 lpfc_nlp_put(ndlp); 1305 lpfc_nlp_put(ndlp);
979 1306
980 /* FLOGI failed, so just use loop map to make discovery list */ 1307 if (phba->fc_topology == TOPOLOGY_LOOP) {
981 lpfc_disc_list_loopmap(phba); 1308 /* FLOGI failed, use loop map to make discovery list */
1309 lpfc_disc_list_loopmap(vport);
1310
1311 /* Start discovery */
1312 lpfc_disc_start(vport);
1313 return;
1314 }
1315
1316 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1317 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1318 "%d (%d):0258 Register Fabric login error: 0x%x\n",
1319 phba->brd_no, vport->vpi, mb->mbxStatus);
982 1320
983 /* Start discovery */
984 lpfc_disc_start(phba);
985 return; 1321 return;
986 } 1322 }
987 1323
988 ndlp->nlp_rpi = mb->un.varWords[0]; 1324 ndlp->nlp_rpi = mb->un.varWords[0];
989 ndlp->nlp_type |= NLP_FABRIC; 1325 ndlp->nlp_type |= NLP_FABRIC;
990 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1326 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
991 1327
992 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */ 1328 lpfc_nlp_put(ndlp); /* Drop the reference from the mbox */
993 1329
994 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) { 1330 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
995 /* This NPort has been assigned an NPort_ID by the fabric as a 1331 list_for_each_entry(next_vport, &phba->port_list, listentry) {
996 * result of the completed fabric login. Issue a State Change 1332 if (next_vport->port_type == LPFC_PHYSICAL_PORT)
997 * Registration (SCR) ELS request to the fabric controller 1333 continue;
998 * (SCR_DID) so that this NPort gets RSCN events from the 1334
999 * fabric. 1335 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
1000 */ 1336 lpfc_initial_fdisc(next_vport);
1001 lpfc_issue_els_scr(phba, SCR_DID, 0); 1337 else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1002 1338 lpfc_vport_set_state(vport,
1003 ndlp = lpfc_findnode_did(phba, NameServer_DID); 1339 FC_VPORT_NO_FABRIC_SUPP);
1004 if (!ndlp) { 1340 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1005 /* Allocate a new node instance. If the pool is empty, 1341 "%d (%d):0259 No NPIV Fabric "
1006 * start the discovery process and skip the Nameserver 1342 "support\n",
1007 * login process. This is attempted again later on. 1343 phba->brd_no, vport->vpi);
1008 * Otherwise, issue a Port Login (PLOGI) to NameServer.
1009 */
1010 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1011 if (!ndlp) {
1012 lpfc_disc_start(phba);
1013 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1014 kfree(mp);
1015 mempool_free(pmb, phba->mbox_mem_pool);
1016 return;
1017 } else {
1018 lpfc_nlp_init(phba, ndlp, NameServer_DID);
1019 ndlp->nlp_type |= NLP_FABRIC;
1020 }
1021 }
1022 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1023 lpfc_issue_els_plogi(phba, NameServer_DID, 0);
1024 if (phba->cfg_fdmi_on) {
1025 ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
1026 GFP_KERNEL);
1027 if (ndlp_fdmi) {
1028 lpfc_nlp_init(phba, ndlp_fdmi, FDMI_DID);
1029 ndlp_fdmi->nlp_type |= NLP_FABRIC;
1030 ndlp_fdmi->nlp_state = NLP_STE_PLOGI_ISSUE;
1031 lpfc_issue_els_plogi(phba, FDMI_DID, 0);
1032 } 1344 }
1033 } 1345 }
1346 lpfc_do_scr_ns_plogi(phba, vport);
1034 } 1347 }
1035 1348
1036 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1349 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -1046,32 +1359,36 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1046 * handed off to the SLI layer. 1359 * handed off to the SLI layer.
1047 */ 1360 */
1048void 1361void
1049lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1362lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1050{ 1363{
1051 struct lpfc_sli *psli; 1364 MAILBOX_t *mb = &pmb->mb;
1052 MAILBOX_t *mb; 1365 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
1053 struct lpfc_dmabuf *mp; 1366 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
1054 struct lpfc_nodelist *ndlp; 1367 struct lpfc_vport *vport = pmb->vport;
1055
1056 psli = &phba->sli;
1057 mb = &pmb->mb;
1058
1059 ndlp = (struct lpfc_nodelist *) pmb->context2;
1060 mp = (struct lpfc_dmabuf *) (pmb->context1);
1061 1368
1062 if (mb->mbxStatus) { 1369 if (mb->mbxStatus) {
1370out:
1063 lpfc_nlp_put(ndlp); 1371 lpfc_nlp_put(ndlp);
1064 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1372 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1065 kfree(mp); 1373 kfree(mp);
1066 mempool_free(pmb, phba->mbox_mem_pool); 1374 mempool_free(pmb, phba->mbox_mem_pool);
1067 lpfc_drop_node(phba, ndlp); 1375 lpfc_drop_node(vport, ndlp);
1068 1376
1069 /* RegLogin failed, so just use loop map to make discovery 1377 if (phba->fc_topology == TOPOLOGY_LOOP) {
1070 list */ 1378 /*
1071 lpfc_disc_list_loopmap(phba); 1379 * RegLogin failed, use loop map to make discovery
1380 * list
1381 */
1382 lpfc_disc_list_loopmap(vport);
1072 1383
1073 /* Start discovery */ 1384 /* Start discovery */
1074 lpfc_disc_start(phba); 1385 lpfc_disc_start(vport);
1386 return;
1387 }
1388 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1389 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
1390 "%d (%d):0260 Register NameServer error: 0x%x\n",
1391 phba->brd_no, vport->vpi, mb->mbxStatus);
1075 return; 1392 return;
1076 } 1393 }
1077 1394
@@ -1079,37 +1396,43 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1079 1396
1080 ndlp->nlp_rpi = mb->un.varWords[0]; 1397 ndlp->nlp_rpi = mb->un.varWords[0];
1081 ndlp->nlp_type |= NLP_FABRIC; 1398 ndlp->nlp_type |= NLP_FABRIC;
1082 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1083 1400
1084 if (phba->hba_state < LPFC_HBA_READY) { 1401 if (vport->port_state < LPFC_VPORT_READY) {
1085 /* Link up discovery requires Fabrib registration. */ 1402 /* Link up discovery requires Fabric registration. */
1086 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID); 1403 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
1087 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN); 1404 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
1088 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID); 1405 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
1089 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFF_ID); 1406 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
1407 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
1408
1409 /* Issue SCR just before NameServer GID_FT Query */
1410 lpfc_issue_els_scr(vport, SCR_DID, 0);
1090 } 1411 }
1091 1412
1092 phba->fc_ns_retry = 0; 1413 vport->fc_ns_retry = 0;
1093 /* Good status, issue CT Request to NameServer */ 1414 /* Good status, issue CT Request to NameServer */
1094 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) { 1415 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
1095 /* Cannot issue NameServer Query, so finish up discovery */ 1416 /* Cannot issue NameServer Query, so finish up discovery */
1096 lpfc_disc_start(phba); 1417 goto out;
1097 } 1418 }
1098 1419
1099 lpfc_nlp_put(ndlp); 1420 lpfc_nlp_put(ndlp);
1100 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1421 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1101 kfree(mp); 1422 kfree(mp);
1102 mempool_free( pmb, phba->mbox_mem_pool); 1423 mempool_free(pmb, phba->mbox_mem_pool);
1103 1424
1104 return; 1425 return;
1105} 1426}
1106 1427
1107static void 1428static void
1108lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1429lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1109{ 1430{
1110 struct fc_rport *rport; 1431 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1432 struct fc_rport *rport;
1111 struct lpfc_rport_data *rdata; 1433 struct lpfc_rport_data *rdata;
1112 struct fc_rport_identifiers rport_ids; 1434 struct fc_rport_identifiers rport_ids;
1435 struct lpfc_hba *phba = vport->phba;
1113 1436
1114 /* Remote port has reappeared. Re-register w/ FC transport */ 1437 /* Remote port has reappeared. Re-register w/ FC transport */
1115 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 1438 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
@@ -1125,10 +1448,15 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1125 * registered the port. 1448 * registered the port.
1126 */ 1449 */
1127 if (ndlp->rport && ndlp->rport->dd_data && 1450 if (ndlp->rport && ndlp->rport->dd_data &&
1128 *(struct lpfc_rport_data **) ndlp->rport->dd_data) { 1451 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) {
1129 lpfc_nlp_put(ndlp); 1452 lpfc_nlp_put(ndlp);
1130 } 1453 }
1131 ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); 1454
1455 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
1456 "rport add: did:x%x flg:x%x type x%x",
1457 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1458
1459 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
1132 if (!rport || !get_device(&rport->dev)) { 1460 if (!rport || !get_device(&rport->dev)) {
1133 dev_printk(KERN_WARNING, &phba->pcidev->dev, 1461 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1134 "Warning: fc_remote_port_add failed\n"); 1462 "Warning: fc_remote_port_add failed\n");
@@ -1151,25 +1479,20 @@ lpfc_register_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1151 fc_remote_port_rolechg(rport, rport_ids.roles); 1479 fc_remote_port_rolechg(rport, rport_ids.roles);
1152 1480
1153 if ((rport->scsi_target_id != -1) && 1481 if ((rport->scsi_target_id != -1) &&
1154 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 1482 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
1155 ndlp->nlp_sid = rport->scsi_target_id; 1483 ndlp->nlp_sid = rport->scsi_target_id;
1156 } 1484 }
1157
1158 return; 1485 return;
1159} 1486}
1160 1487
1161static void 1488static void
1162lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1489lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
1163{ 1490{
1164 struct fc_rport *rport = ndlp->rport; 1491 struct fc_rport *rport = ndlp->rport;
1165 struct lpfc_rport_data *rdata = rport->dd_data;
1166 1492
1167 if (rport->scsi_target_id == -1) { 1493 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
1168 ndlp->rport = NULL; 1494 "rport delete: did:x%x flg:x%x type x%x",
1169 rdata->pnode = NULL; 1495 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
1170 lpfc_nlp_put(ndlp);
1171 put_device(&rport->dev);
1172 }
1173 1496
1174 fc_remote_port_delete(rport); 1497 fc_remote_port_delete(rport);
1175 1498
@@ -1177,42 +1500,46 @@ lpfc_unregister_remote_port(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1177} 1500}
1178 1501
1179static void 1502static void
1180lpfc_nlp_counters(struct lpfc_hba *phba, int state, int count) 1503lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
1181{ 1504{
1182 spin_lock_irq(phba->host->host_lock); 1505 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1506
1507 spin_lock_irq(shost->host_lock);
1183 switch (state) { 1508 switch (state) {
1184 case NLP_STE_UNUSED_NODE: 1509 case NLP_STE_UNUSED_NODE:
1185 phba->fc_unused_cnt += count; 1510 vport->fc_unused_cnt += count;
1186 break; 1511 break;
1187 case NLP_STE_PLOGI_ISSUE: 1512 case NLP_STE_PLOGI_ISSUE:
1188 phba->fc_plogi_cnt += count; 1513 vport->fc_plogi_cnt += count;
1189 break; 1514 break;
1190 case NLP_STE_ADISC_ISSUE: 1515 case NLP_STE_ADISC_ISSUE:
1191 phba->fc_adisc_cnt += count; 1516 vport->fc_adisc_cnt += count;
1192 break; 1517 break;
1193 case NLP_STE_REG_LOGIN_ISSUE: 1518 case NLP_STE_REG_LOGIN_ISSUE:
1194 phba->fc_reglogin_cnt += count; 1519 vport->fc_reglogin_cnt += count;
1195 break; 1520 break;
1196 case NLP_STE_PRLI_ISSUE: 1521 case NLP_STE_PRLI_ISSUE:
1197 phba->fc_prli_cnt += count; 1522 vport->fc_prli_cnt += count;
1198 break; 1523 break;
1199 case NLP_STE_UNMAPPED_NODE: 1524 case NLP_STE_UNMAPPED_NODE:
1200 phba->fc_unmap_cnt += count; 1525 vport->fc_unmap_cnt += count;
1201 break; 1526 break;
1202 case NLP_STE_MAPPED_NODE: 1527 case NLP_STE_MAPPED_NODE:
1203 phba->fc_map_cnt += count; 1528 vport->fc_map_cnt += count;
1204 break; 1529 break;
1205 case NLP_STE_NPR_NODE: 1530 case NLP_STE_NPR_NODE:
1206 phba->fc_npr_cnt += count; 1531 vport->fc_npr_cnt += count;
1207 break; 1532 break;
1208 } 1533 }
1209 spin_unlock_irq(phba->host->host_lock); 1534 spin_unlock_irq(shost->host_lock);
1210} 1535}
1211 1536
1212static void 1537static void
1213lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1538lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1214 int old_state, int new_state) 1539 int old_state, int new_state)
1215{ 1540{
1541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1542
1216 if (new_state == NLP_STE_UNMAPPED_NODE) { 1543 if (new_state == NLP_STE_UNMAPPED_NODE) {
1217 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 1544 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1218 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 1545 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -1226,35 +1553,34 @@ lpfc_nlp_state_cleanup(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1226 /* Transport interface */ 1553 /* Transport interface */
1227 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || 1554 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
1228 old_state == NLP_STE_UNMAPPED_NODE)) { 1555 old_state == NLP_STE_UNMAPPED_NODE)) {
1229 phba->nport_event_cnt++; 1556 vport->phba->nport_event_cnt++;
1230 lpfc_unregister_remote_port(phba, ndlp); 1557 lpfc_unregister_remote_port(ndlp);
1231 } 1558 }
1232 1559
1233 if (new_state == NLP_STE_MAPPED_NODE || 1560 if (new_state == NLP_STE_MAPPED_NODE ||
1234 new_state == NLP_STE_UNMAPPED_NODE) { 1561 new_state == NLP_STE_UNMAPPED_NODE) {
1235 phba->nport_event_cnt++; 1562 vport->phba->nport_event_cnt++;
1236 /* 1563 /*
1237 * Tell the fc transport about the port, if we haven't 1564 * Tell the fc transport about the port, if we haven't
1238 * already. If we have, and it's a scsi entity, be 1565 * already. If we have, and it's a scsi entity, be
1239 * sure to unblock any attached scsi devices 1566 * sure to unblock any attached scsi devices
1240 */ 1567 */
1241 lpfc_register_remote_port(phba, ndlp); 1568 lpfc_register_remote_port(vport, ndlp);
1242 } 1569 }
1243 1570 /*
1244 /* 1571 * if we added to Mapped list, but the remote port
1245 * if we added to Mapped list, but the remote port 1572 * registration failed or assigned a target id outside
1246 * registration failed or assigned a target id outside 1573 * our presentable range - move the node to the
1247 * our presentable range - move the node to the 1574 * Unmapped List
1248 * Unmapped List 1575 */
1249 */
1250 if (new_state == NLP_STE_MAPPED_NODE && 1576 if (new_state == NLP_STE_MAPPED_NODE &&
1251 (!ndlp->rport || 1577 (!ndlp->rport ||
1252 ndlp->rport->scsi_target_id == -1 || 1578 ndlp->rport->scsi_target_id == -1 ||
1253 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 1579 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
1254 spin_lock_irq(phba->host->host_lock); 1580 spin_lock_irq(shost->host_lock);
1255 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 1581 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1256 spin_unlock_irq(phba->host->host_lock); 1582 spin_unlock_irq(shost->host_lock);
1257 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1583 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1258 } 1584 }
1259} 1585}
1260 1586
@@ -1280,61 +1606,74 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state)
1280} 1606}
1281 1607
1282void 1608void
1283lpfc_nlp_set_state(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int state) 1609lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1610 int state)
1284{ 1611{
1612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1285 int old_state = ndlp->nlp_state; 1613 int old_state = ndlp->nlp_state;
1286 char name1[16], name2[16]; 1614 char name1[16], name2[16];
1287 1615
1288 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1616 lpfc_printf_log(vport->phba, KERN_INFO, LOG_NODE,
1289 "%d:0904 NPort state transition x%06x, %s -> %s\n", 1617 "%d (%d):0904 NPort state transition x%06x, %s -> %s\n",
1290 phba->brd_no, 1618 vport->phba->brd_no, vport->vpi,
1291 ndlp->nlp_DID, 1619 ndlp->nlp_DID,
1292 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 1620 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
1293 lpfc_nlp_state_name(name2, sizeof(name2), state)); 1621 lpfc_nlp_state_name(name2, sizeof(name2), state));
1622
1623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
1624 "node statechg did:x%x old:%d ste:%d",
1625 ndlp->nlp_DID, old_state, state);
1626
1294 if (old_state == NLP_STE_NPR_NODE && 1627 if (old_state == NLP_STE_NPR_NODE &&
1295 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 && 1628 (ndlp->nlp_flag & NLP_DELAY_TMO) != 0 &&
1296 state != NLP_STE_NPR_NODE) 1629 state != NLP_STE_NPR_NODE)
1297 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1630 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1298 if (old_state == NLP_STE_UNMAPPED_NODE) { 1631 if (old_state == NLP_STE_UNMAPPED_NODE) {
1299 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 1632 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1300 ndlp->nlp_type &= ~NLP_FC_NODE; 1633 ndlp->nlp_type &= ~NLP_FC_NODE;
1301 } 1634 }
1302 1635
1303 if (list_empty(&ndlp->nlp_listp)) { 1636 if (list_empty(&ndlp->nlp_listp)) {
1304 spin_lock_irq(phba->host->host_lock); 1637 spin_lock_irq(shost->host_lock);
1305 list_add_tail(&ndlp->nlp_listp, &phba->fc_nodes); 1638 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
1306 spin_unlock_irq(phba->host->host_lock); 1639 spin_unlock_irq(shost->host_lock);
1307 } else if (old_state) 1640 } else if (old_state)
1308 lpfc_nlp_counters(phba, old_state, -1); 1641 lpfc_nlp_counters(vport, old_state, -1);
1309 1642
1310 ndlp->nlp_state = state; 1643 ndlp->nlp_state = state;
1311 lpfc_nlp_counters(phba, state, 1); 1644 lpfc_nlp_counters(vport, state, 1);
1312 lpfc_nlp_state_cleanup(phba, ndlp, old_state, state); 1645 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
1313} 1646}
1314 1647
1315void 1648void
1316lpfc_dequeue_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1649lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1317{ 1650{
1651 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1652
1318 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1653 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1319 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1654 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1320 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1655 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1321 lpfc_nlp_counters(phba, ndlp->nlp_state, -1); 1656 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1322 spin_lock_irq(phba->host->host_lock); 1657 spin_lock_irq(shost->host_lock);
1323 list_del_init(&ndlp->nlp_listp); 1658 list_del_init(&ndlp->nlp_listp);
1324 spin_unlock_irq(phba->host->host_lock); 1659 spin_unlock_irq(shost->host_lock);
1325 lpfc_nlp_state_cleanup(phba, ndlp, ndlp->nlp_state, 0); 1660 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
1661 NLP_STE_UNUSED_NODE);
1326} 1662}
1327 1663
1328void 1664void
1329lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 1665lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1330{ 1666{
1667 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1668
1331 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0) 1669 if ((ndlp->nlp_flag & NLP_DELAY_TMO) != 0)
1332 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1670 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1333 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 1671 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
1334 lpfc_nlp_counters(phba, ndlp->nlp_state, -1); 1672 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
1335 spin_lock_irq(phba->host->host_lock); 1673 spin_lock_irq(shost->host_lock);
1336 list_del_init(&ndlp->nlp_listp); 1674 list_del_init(&ndlp->nlp_listp);
1337 spin_unlock_irq(phba->host->host_lock); 1675 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
1676 spin_unlock_irq(shost->host_lock);
1338 lpfc_nlp_put(ndlp); 1677 lpfc_nlp_put(ndlp);
1339} 1678}
1340 1679
@@ -1342,11 +1681,13 @@ lpfc_drop_node(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1342 * Start / ReStart rescue timer for Discovery / RSCN handling 1681 * Start / ReStart rescue timer for Discovery / RSCN handling
1343 */ 1682 */
1344void 1683void
1345lpfc_set_disctmo(struct lpfc_hba * phba) 1684lpfc_set_disctmo(struct lpfc_vport *vport)
1346{ 1685{
1686 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1687 struct lpfc_hba *phba = vport->phba;
1347 uint32_t tmo; 1688 uint32_t tmo;
1348 1689
1349 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) { 1690 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
1350 /* For FAN, timeout should be greater then edtov */ 1691 /* For FAN, timeout should be greater then edtov */
1351 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 1692 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
1352 } else { 1693 } else {
@@ -1356,18 +1697,25 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
1356 tmo = ((phba->fc_ratov * 3) + 3); 1697 tmo = ((phba->fc_ratov * 3) + 3);
1357 } 1698 }
1358 1699
1359 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo); 1700
1360 spin_lock_irq(phba->host->host_lock); 1701 if (!timer_pending(&vport->fc_disctmo)) {
1361 phba->fc_flag |= FC_DISC_TMO; 1702 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1362 spin_unlock_irq(phba->host->host_lock); 1703 "set disc timer: tmo:x%x state:x%x flg:x%x",
1704 tmo, vport->port_state, vport->fc_flag);
1705 }
1706
1707 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
1708 spin_lock_irq(shost->host_lock);
1709 vport->fc_flag |= FC_DISC_TMO;
1710 spin_unlock_irq(shost->host_lock);
1363 1711
1364 /* Start Discovery Timer state <hba_state> */ 1712 /* Start Discovery Timer state <hba_state> */
1365 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1713 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1366 "%d:0247 Start Discovery Timer state x%x " 1714 "%d (%d):0247 Start Discovery Timer state x%x "
1367 "Data: x%x x%lx x%x x%x\n", 1715 "Data: x%x x%lx x%x x%x\n",
1368 phba->brd_no, 1716 phba->brd_no, vport->vpi, vport->port_state, tmo,
1369 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo, 1717 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
1370 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 1718 vport->fc_adisc_cnt);
1371 1719
1372 return; 1720 return;
1373} 1721}
@@ -1376,23 +1724,34 @@ lpfc_set_disctmo(struct lpfc_hba * phba)
1376 * Cancel rescue timer for Discovery / RSCN handling 1724 * Cancel rescue timer for Discovery / RSCN handling
1377 */ 1725 */
1378int 1726int
1379lpfc_can_disctmo(struct lpfc_hba * phba) 1727lpfc_can_disctmo(struct lpfc_vport *vport)
1380{ 1728{
1729 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1730 struct lpfc_hba *phba = vport->phba;
1731 unsigned long iflags;
1732
1733 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1734 "can disc timer: state:x%x rtry:x%x flg:x%x",
1735 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1736
1381 /* Turn off discovery timer if its running */ 1737 /* Turn off discovery timer if its running */
1382 if (phba->fc_flag & FC_DISC_TMO) { 1738 if (vport->fc_flag & FC_DISC_TMO) {
1383 spin_lock_irq(phba->host->host_lock); 1739 spin_lock_irqsave(shost->host_lock, iflags);
1384 phba->fc_flag &= ~FC_DISC_TMO; 1740 vport->fc_flag &= ~FC_DISC_TMO;
1385 spin_unlock_irq(phba->host->host_lock); 1741 spin_unlock_irqrestore(shost->host_lock, iflags);
1386 del_timer_sync(&phba->fc_disctmo); 1742 del_timer_sync(&vport->fc_disctmo);
1387 phba->work_hba_events &= ~WORKER_DISC_TMO; 1743 spin_lock_irqsave(&vport->work_port_lock, iflags);
1744 vport->work_port_events &= ~WORKER_DISC_TMO;
1745 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
1388 } 1746 }
1389 1747
1390 /* Cancel Discovery Timer state <hba_state> */ 1748 /* Cancel Discovery Timer state <hba_state> */
1391 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1749 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1392 "%d:0248 Cancel Discovery Timer state x%x " 1750 "%d (%d):0248 Cancel Discovery Timer state x%x "
1393 "Data: x%x x%x x%x\n", 1751 "Data: x%x x%x x%x\n",
1394 phba->brd_no, phba->hba_state, phba->fc_flag, 1752 phba->brd_no, vport->vpi, vport->port_state,
1395 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 1753 vport->fc_flag, vport->fc_plogi_cnt,
1754 vport->fc_adisc_cnt);
1396 1755
1397 return 0; 1756 return 0;
1398} 1757}
@@ -1402,15 +1761,18 @@ lpfc_can_disctmo(struct lpfc_hba * phba)
1402 * Return true if iocb matches the specified nport 1761 * Return true if iocb matches the specified nport
1403 */ 1762 */
1404int 1763int
1405lpfc_check_sli_ndlp(struct lpfc_hba * phba, 1764lpfc_check_sli_ndlp(struct lpfc_hba *phba,
1406 struct lpfc_sli_ring * pring, 1765 struct lpfc_sli_ring *pring,
1407 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp) 1766 struct lpfc_iocbq *iocb,
1767 struct lpfc_nodelist *ndlp)
1408{ 1768{
1409 struct lpfc_sli *psli; 1769 struct lpfc_sli *psli = &phba->sli;
1410 IOCB_t *icmd; 1770 IOCB_t *icmd = &iocb->iocb;
1771 struct lpfc_vport *vport = ndlp->vport;
1772
1773 if (iocb->vport != vport)
1774 return 0;
1411 1775
1412 psli = &phba->sli;
1413 icmd = &iocb->iocb;
1414 if (pring->ringno == LPFC_ELS_RING) { 1776 if (pring->ringno == LPFC_ELS_RING) {
1415 switch (icmd->ulpCommand) { 1777 switch (icmd->ulpCommand) {
1416 case CMD_GEN_REQUEST64_CR: 1778 case CMD_GEN_REQUEST64_CR:
@@ -1428,7 +1790,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1428 } else if (pring->ringno == psli->fcp_ring) { 1790 } else if (pring->ringno == psli->fcp_ring) {
1429 /* Skip match check if waiting to relogin to FCP target */ 1791 /* Skip match check if waiting to relogin to FCP target */
1430 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 1792 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1431 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 1793 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1432 return 0; 1794 return 0;
1433 } 1795 }
1434 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 1796 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
@@ -1445,7 +1807,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1445 * associated with nlp_rpi in the LPFC_NODELIST entry. 1807 * associated with nlp_rpi in the LPFC_NODELIST entry.
1446 */ 1808 */
1447static int 1809static int
1448lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1810lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1449{ 1811{
1450 LIST_HEAD(completions); 1812 LIST_HEAD(completions);
1451 struct lpfc_sli *psli; 1813 struct lpfc_sli *psli;
@@ -1454,6 +1816,8 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1454 IOCB_t *icmd; 1816 IOCB_t *icmd;
1455 uint32_t rpi, i; 1817 uint32_t rpi, i;
1456 1818
1819 lpfc_fabric_abort_nport(ndlp);
1820
1457 /* 1821 /*
1458 * Everything that matches on txcmplq will be returned 1822 * Everything that matches on txcmplq will be returned
1459 * by firmware with a no rpi error. 1823 * by firmware with a no rpi error.
@@ -1465,15 +1829,15 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1465 for (i = 0; i < psli->num_rings; i++) { 1829 for (i = 0; i < psli->num_rings; i++) {
1466 pring = &psli->ring[i]; 1830 pring = &psli->ring[i];
1467 1831
1468 spin_lock_irq(phba->host->host_lock); 1832 spin_lock_irq(&phba->hbalock);
1469 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, 1833 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1470 list) { 1834 list) {
1471 /* 1835 /*
1472 * Check to see if iocb matches the nport we are 1836 * Check to see if iocb matches the nport we are
1473 * looking for 1837 * looking for
1474 */ 1838 */
1475 if ((lpfc_check_sli_ndlp 1839 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
1476 (phba, pring, iocb, ndlp))) { 1840 ndlp))) {
1477 /* It matches, so deque and call compl 1841 /* It matches, so deque and call compl
1478 with an error */ 1842 with an error */
1479 list_move_tail(&iocb->list, 1843 list_move_tail(&iocb->list,
@@ -1481,22 +1845,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1481 pring->txq_cnt--; 1845 pring->txq_cnt--;
1482 } 1846 }
1483 } 1847 }
1484 spin_unlock_irq(phba->host->host_lock); 1848 spin_unlock_irq(&phba->hbalock);
1485
1486 } 1849 }
1487 } 1850 }
1488 1851
1489 while (!list_empty(&completions)) { 1852 while (!list_empty(&completions)) {
1490 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1853 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1491 list_del(&iocb->list); 1854 list_del_init(&iocb->list);
1492 1855
1493 if (iocb->iocb_cmpl) { 1856 if (!iocb->iocb_cmpl)
1857 lpfc_sli_release_iocbq(phba, iocb);
1858 else {
1494 icmd = &iocb->iocb; 1859 icmd = &iocb->iocb;
1495 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1860 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1496 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1861 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1497 (iocb->iocb_cmpl) (phba, iocb, iocb); 1862 (iocb->iocb_cmpl)(phba, iocb, iocb);
1498 } else 1863 }
1499 lpfc_sli_release_iocbq(phba, iocb);
1500 } 1864 }
1501 1865
1502 return 0; 1866 return 0;
@@ -1512,19 +1876,22 @@ lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1512 * we are waiting to PLOGI back to the remote NPort. 1876 * we are waiting to PLOGI back to the remote NPort.
1513 */ 1877 */
1514int 1878int
1515lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1879lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1516{ 1880{
1517 LPFC_MBOXQ_t *mbox; 1881 struct lpfc_hba *phba = vport->phba;
1882 LPFC_MBOXQ_t *mbox;
1518 int rc; 1883 int rc;
1519 1884
1520 if (ndlp->nlp_rpi) { 1885 if (ndlp->nlp_rpi) {
1521 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 1886 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1522 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); 1887 if (mbox) {
1523 mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 1888 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
1524 rc = lpfc_sli_issue_mbox 1889 mbox->vport = vport;
1525 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 1890 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1891 rc = lpfc_sli_issue_mbox(phba, mbox,
1892 (MBX_NOWAIT | MBX_STOP_IOCB));
1526 if (rc == MBX_NOT_FINISHED) 1893 if (rc == MBX_NOT_FINISHED)
1527 mempool_free( mbox, phba->mbox_mem_pool); 1894 mempool_free(mbox, phba->mbox_mem_pool);
1528 } 1895 }
1529 lpfc_no_rpi(phba, ndlp); 1896 lpfc_no_rpi(phba, ndlp);
1530 ndlp->nlp_rpi = 0; 1897 ndlp->nlp_rpi = 0;
@@ -1533,25 +1900,70 @@ lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1533 return 0; 1900 return 0;
1534} 1901}
1535 1902
1903void
1904lpfc_unreg_all_rpis(struct lpfc_vport *vport)
1905{
1906 struct lpfc_hba *phba = vport->phba;
1907 LPFC_MBOXQ_t *mbox;
1908 int rc;
1909
1910 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1911 if (mbox) {
1912 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
1913 mbox->vport = vport;
1914 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1915 rc = lpfc_sli_issue_mbox(phba, mbox,
1916 (MBX_NOWAIT | MBX_STOP_IOCB));
1917 if (rc == MBX_NOT_FINISHED) {
1918 mempool_free(mbox, phba->mbox_mem_pool);
1919 }
1920 }
1921}
1922
1923void
1924lpfc_unreg_default_rpis(struct lpfc_vport *vport)
1925{
1926 struct lpfc_hba *phba = vport->phba;
1927 LPFC_MBOXQ_t *mbox;
1928 int rc;
1929
1930 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1931 if (mbox) {
1932 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox);
1933 mbox->vport = vport;
1934 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1935 rc = lpfc_sli_issue_mbox(phba, mbox,
1936 (MBX_NOWAIT | MBX_STOP_IOCB));
1937 if (rc == MBX_NOT_FINISHED) {
1938 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1939 "%d (%d):1815 Could not issue "
1940 "unreg_did (default rpis)\n",
1941 phba->brd_no, vport->vpi);
1942 mempool_free(mbox, phba->mbox_mem_pool);
1943 }
1944 }
1945}
1946
1536/* 1947/*
1537 * Free resources associated with LPFC_NODELIST entry 1948 * Free resources associated with LPFC_NODELIST entry
1538 * so it can be freed. 1949 * so it can be freed.
1539 */ 1950 */
1540static int 1951static int
1541lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 1952lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1542{ 1953{
1543 LPFC_MBOXQ_t *mb; 1954 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1544 LPFC_MBOXQ_t *nextmb; 1955 struct lpfc_hba *phba = vport->phba;
1956 LPFC_MBOXQ_t *mb, *nextmb;
1545 struct lpfc_dmabuf *mp; 1957 struct lpfc_dmabuf *mp;
1546 1958
1547 /* Cleanup node for NPort <nlp_DID> */ 1959 /* Cleanup node for NPort <nlp_DID> */
1548 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 1960 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1549 "%d:0900 Cleanup node for NPort x%x " 1961 "%d (%d):0900 Cleanup node for NPort x%x "
1550 "Data: x%x x%x x%x\n", 1962 "Data: x%x x%x x%x\n",
1551 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 1963 phba->brd_no, vport->vpi, ndlp->nlp_DID, ndlp->nlp_flag,
1552 ndlp->nlp_state, ndlp->nlp_rpi); 1964 ndlp->nlp_state, ndlp->nlp_rpi);
1553 1965
1554 lpfc_dequeue_node(phba, ndlp); 1966 lpfc_dequeue_node(vport, ndlp);
1555 1967
1556 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 1968 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1557 if ((mb = phba->sli.mbox_active)) { 1969 if ((mb = phba->sli.mbox_active)) {
@@ -1562,13 +1974,13 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1562 } 1974 }
1563 } 1975 }
1564 1976
1565 spin_lock_irq(phba->host->host_lock); 1977 spin_lock_irq(&phba->hbalock);
1566 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1978 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1567 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1979 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1568 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1980 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1569 mp = (struct lpfc_dmabuf *) (mb->context1); 1981 mp = (struct lpfc_dmabuf *) (mb->context1);
1570 if (mp) { 1982 if (mp) {
1571 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1983 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
1572 kfree(mp); 1984 kfree(mp);
1573 } 1985 }
1574 list_del(&mb->list); 1986 list_del(&mb->list);
@@ -1576,20 +1988,27 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1576 lpfc_nlp_put(ndlp); 1988 lpfc_nlp_put(ndlp);
1577 } 1989 }
1578 } 1990 }
1579 spin_unlock_irq(phba->host->host_lock); 1991 spin_unlock_irq(&phba->hbalock);
1580 1992
1581 lpfc_els_abort(phba,ndlp); 1993 lpfc_els_abort(phba,ndlp);
1582 spin_lock_irq(phba->host->host_lock); 1994 spin_lock_irq(shost->host_lock);
1583 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 1995 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1584 spin_unlock_irq(phba->host->host_lock); 1996 spin_unlock_irq(shost->host_lock);
1585 1997
1586 ndlp->nlp_last_elscmd = 0; 1998 ndlp->nlp_last_elscmd = 0;
1587 del_timer_sync(&ndlp->nlp_delayfunc); 1999 del_timer_sync(&ndlp->nlp_delayfunc);
1588 2000
1589 if (!list_empty(&ndlp->els_retry_evt.evt_listp)) 2001 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1590 list_del_init(&ndlp->els_retry_evt.evt_listp); 2002 list_del_init(&ndlp->els_retry_evt.evt_listp);
2003 if (!list_empty(&ndlp->dev_loss_evt.evt_listp))
2004 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2005
2006 if (!list_empty(&ndlp->dev_loss_evt.evt_listp)) {
2007 list_del_init(&ndlp->dev_loss_evt.evt_listp);
2008 complete((struct completion *)(ndlp->dev_loss_evt.evt_arg2));
2009 }
1591 2010
1592 lpfc_unreg_rpi(phba, ndlp); 2011 lpfc_unreg_rpi(vport, ndlp);
1593 2012
1594 return 0; 2013 return 0;
1595} 2014}
@@ -1600,18 +2019,22 @@ lpfc_cleanup_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1600 * machine, defer the free till we reach the end of the state machine. 2019 * machine, defer the free till we reach the end of the state machine.
1601 */ 2020 */
1602static void 2021static void
1603lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 2022lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1604{ 2023{
1605 struct lpfc_rport_data *rdata; 2024 struct lpfc_rport_data *rdata;
1606 2025
1607 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 2026 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1608 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2027 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1609 } 2028 }
1610 2029
1611 lpfc_cleanup_node(phba, ndlp); 2030 lpfc_cleanup_node(vport, ndlp);
1612 2031
1613 if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) { 2032 /*
1614 put_device(&ndlp->rport->dev); 2033 * We can get here with a non-NULL ndlp->rport because when we
2034 * unregister a rport we don't break the rport/node linkage. So if we
2035 * do, make sure we don't leaving any dangling pointers behind.
2036 */
2037 if (ndlp->rport) {
1615 rdata = ndlp->rport->dd_data; 2038 rdata = ndlp->rport->dd_data;
1616 rdata->pnode = NULL; 2039 rdata->pnode = NULL;
1617 ndlp->rport = NULL; 2040 ndlp->rport = NULL;
@@ -1619,11 +2042,10 @@ lpfc_nlp_remove(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1619} 2042}
1620 2043
1621static int 2044static int
1622lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) 2045lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2046 uint32_t did)
1623{ 2047{
1624 D_ID mydid; 2048 D_ID mydid, ndlpdid, matchdid;
1625 D_ID ndlpdid;
1626 D_ID matchdid;
1627 2049
1628 if (did == Bcast_DID) 2050 if (did == Bcast_DID)
1629 return 0; 2051 return 0;
@@ -1637,7 +2059,7 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
1637 return 1; 2059 return 1;
1638 2060
1639 /* Next check for area/domain identically equals 0 match */ 2061 /* Next check for area/domain identically equals 0 match */
1640 mydid.un.word = phba->fc_myDID; 2062 mydid.un.word = vport->fc_myDID;
1641 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 2063 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1642 return 0; 2064 return 0;
1643 } 2065 }
@@ -1669,101 +2091,116 @@ lpfc_matchdid(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did)
1669} 2091}
1670 2092
1671/* Search for a nodelist entry */ 2093/* Search for a nodelist entry */
1672struct lpfc_nodelist * 2094static struct lpfc_nodelist *
1673lpfc_findnode_did(struct lpfc_hba *phba, uint32_t did) 2095__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1674{ 2096{
2097 struct lpfc_hba *phba = vport->phba;
1675 struct lpfc_nodelist *ndlp; 2098 struct lpfc_nodelist *ndlp;
1676 uint32_t data1; 2099 uint32_t data1;
1677 2100
1678 spin_lock_irq(phba->host->host_lock); 2101 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1679 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2102 if (lpfc_matchdid(vport, ndlp, did)) {
1680 if (lpfc_matchdid(phba, ndlp, did)) {
1681 data1 = (((uint32_t) ndlp->nlp_state << 24) | 2103 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1682 ((uint32_t) ndlp->nlp_xri << 16) | 2104 ((uint32_t) ndlp->nlp_xri << 16) |
1683 ((uint32_t) ndlp->nlp_type << 8) | 2105 ((uint32_t) ndlp->nlp_type << 8) |
1684 ((uint32_t) ndlp->nlp_rpi & 0xff)); 2106 ((uint32_t) ndlp->nlp_rpi & 0xff));
1685 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2107 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1686 "%d:0929 FIND node DID " 2108 "%d (%d):0929 FIND node DID "
1687 " Data: x%p x%x x%x x%x\n", 2109 " Data: x%p x%x x%x x%x\n",
1688 phba->brd_no, 2110 phba->brd_no, vport->vpi,
1689 ndlp, ndlp->nlp_DID, 2111 ndlp, ndlp->nlp_DID,
1690 ndlp->nlp_flag, data1); 2112 ndlp->nlp_flag, data1);
1691 spin_unlock_irq(phba->host->host_lock);
1692 return ndlp; 2113 return ndlp;
1693 } 2114 }
1694 } 2115 }
1695 spin_unlock_irq(phba->host->host_lock);
1696 2116
1697 /* FIND node did <did> NOT FOUND */ 2117 /* FIND node did <did> NOT FOUND */
1698 lpfc_printf_log(phba, KERN_INFO, LOG_NODE, 2118 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1699 "%d:0932 FIND node did x%x NOT FOUND.\n", 2119 "%d (%d):0932 FIND node did x%x NOT FOUND.\n",
1700 phba->brd_no, did); 2120 phba->brd_no, vport->vpi, did);
1701 return NULL; 2121 return NULL;
1702} 2122}
1703 2123
1704struct lpfc_nodelist * 2124struct lpfc_nodelist *
1705lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) 2125lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
1706{ 2126{
2127 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1707 struct lpfc_nodelist *ndlp; 2128 struct lpfc_nodelist *ndlp;
1708 2129
1709 ndlp = lpfc_findnode_did(phba, did); 2130 spin_lock_irq(shost->host_lock);
2131 ndlp = __lpfc_findnode_did(vport, did);
2132 spin_unlock_irq(shost->host_lock);
2133 return ndlp;
2134}
2135
2136struct lpfc_nodelist *
2137lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
2138{
2139 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2140 struct lpfc_nodelist *ndlp;
2141
2142 ndlp = lpfc_findnode_did(vport, did);
1710 if (!ndlp) { 2143 if (!ndlp) {
1711 if ((phba->fc_flag & FC_RSCN_MODE) && 2144 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
1712 ((lpfc_rscn_payload_check(phba, did) == 0))) 2145 lpfc_rscn_payload_check(vport, did) == 0)
1713 return NULL; 2146 return NULL;
1714 ndlp = (struct lpfc_nodelist *) 2147 ndlp = (struct lpfc_nodelist *)
1715 mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 2148 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
1716 if (!ndlp) 2149 if (!ndlp)
1717 return NULL; 2150 return NULL;
1718 lpfc_nlp_init(phba, ndlp, did); 2151 lpfc_nlp_init(vport, ndlp, did);
1719 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 2152 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2153 spin_lock_irq(shost->host_lock);
1720 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2154 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2155 spin_unlock_irq(shost->host_lock);
1721 return ndlp; 2156 return ndlp;
1722 } 2157 }
1723 if (phba->fc_flag & FC_RSCN_MODE) { 2158 if (vport->fc_flag & FC_RSCN_MODE) {
1724 if (lpfc_rscn_payload_check(phba, did)) { 2159 if (lpfc_rscn_payload_check(vport, did)) {
2160 spin_lock_irq(shost->host_lock);
1725 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2161 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2162 spin_unlock_irq(shost->host_lock);
1726 2163
1727 /* Since this node is marked for discovery, 2164 /* Since this node is marked for discovery,
1728 * delay timeout is not needed. 2165 * delay timeout is not needed.
1729 */ 2166 */
1730 if (ndlp->nlp_flag & NLP_DELAY_TMO) 2167 if (ndlp->nlp_flag & NLP_DELAY_TMO)
1731 lpfc_cancel_retry_delay_tmo(phba, ndlp); 2168 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1732 } else 2169 } else
1733 ndlp = NULL; 2170 ndlp = NULL;
1734 } else { 2171 } else {
1735 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 2172 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
1736 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) 2173 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE)
1737 return NULL; 2174 return NULL;
1738 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 2175 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2176 spin_lock_irq(shost->host_lock);
1739 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2177 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2178 spin_unlock_irq(shost->host_lock);
1740 } 2179 }
1741 return ndlp; 2180 return ndlp;
1742} 2181}
1743 2182
1744/* Build a list of nodes to discover based on the loopmap */ 2183/* Build a list of nodes to discover based on the loopmap */
1745void 2184void
1746lpfc_disc_list_loopmap(struct lpfc_hba * phba) 2185lpfc_disc_list_loopmap(struct lpfc_vport *vport)
1747{ 2186{
2187 struct lpfc_hba *phba = vport->phba;
1748 int j; 2188 int j;
1749 uint32_t alpa, index; 2189 uint32_t alpa, index;
1750 2190
1751 if (phba->hba_state <= LPFC_LINK_DOWN) { 2191 if (!lpfc_is_link_up(phba))
1752 return; 2192 return;
1753 } 2193
1754 if (phba->fc_topology != TOPOLOGY_LOOP) { 2194 if (phba->fc_topology != TOPOLOGY_LOOP)
1755 return; 2195 return;
1756 }
1757 2196
1758 /* Check for loop map present or not */ 2197 /* Check for loop map present or not */
1759 if (phba->alpa_map[0]) { 2198 if (phba->alpa_map[0]) {
1760 for (j = 1; j <= phba->alpa_map[0]; j++) { 2199 for (j = 1; j <= phba->alpa_map[0]; j++) {
1761 alpa = phba->alpa_map[j]; 2200 alpa = phba->alpa_map[j];
1762 2201 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
1763 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1764 continue; 2202 continue;
1765 } 2203 lpfc_setup_disc_node(vport, alpa);
1766 lpfc_setup_disc_node(phba, alpa);
1767 } 2204 }
1768 } else { 2205 } else {
1769 /* No alpamap, so try all alpa's */ 2206 /* No alpamap, so try all alpa's */
@@ -1776,113 +2213,167 @@ lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1776 else 2213 else
1777 index = FC_MAXLOOP - j - 1; 2214 index = FC_MAXLOOP - j - 1;
1778 alpa = lpfcAlpaArray[index]; 2215 alpa = lpfcAlpaArray[index];
1779 if ((phba->fc_myDID & 0xff) == alpa) { 2216 if ((vport->fc_myDID & 0xff) == alpa)
1780 continue; 2217 continue;
1781 } 2218 lpfc_setup_disc_node(vport, alpa);
1782
1783 lpfc_setup_disc_node(phba, alpa);
1784 } 2219 }
1785 } 2220 }
1786 return; 2221 return;
1787} 2222}
1788 2223
1789/* Start Link up / RSCN discovery on NPR list */
1790void 2224void
1791lpfc_disc_start(struct lpfc_hba * phba) 2225lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
1792{ 2226{
1793 struct lpfc_sli *psli;
1794 LPFC_MBOXQ_t *mbox; 2227 LPFC_MBOXQ_t *mbox;
1795 struct lpfc_nodelist *ndlp, *next_ndlp; 2228 struct lpfc_sli *psli = &phba->sli;
2229 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
2230 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
2231 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
2232 int rc;
2233
2234 /*
2235 * if it's not a physical port or if we already send
2236 * clear_la then don't send it.
2237 */
2238 if ((phba->link_state >= LPFC_CLEAR_LA) ||
2239 (vport->port_type != LPFC_PHYSICAL_PORT))
2240 return;
2241
2242 /* Link up discovery */
2243 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
2244 phba->link_state = LPFC_CLEAR_LA;
2245 lpfc_clear_la(phba, mbox);
2246 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2247 mbox->vport = vport;
2248 rc = lpfc_sli_issue_mbox(phba, mbox, (MBX_NOWAIT |
2249 MBX_STOP_IOCB));
2250 if (rc == MBX_NOT_FINISHED) {
2251 mempool_free(mbox, phba->mbox_mem_pool);
2252 lpfc_disc_flush_list(vport);
2253 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2254 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2255 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
2256 phba->link_state = LPFC_HBA_ERROR;
2257 }
2258 }
2259}
2260
2261/* Reg_vpi to tell firmware to resume normal operations */
2262void
2263lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
2264{
2265 LPFC_MBOXQ_t *regvpimbox;
2266
2267 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2268 if (regvpimbox) {
2269 lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
2270 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
2271 regvpimbox->vport = vport;
2272 if (lpfc_sli_issue_mbox(phba, regvpimbox,
2273 (MBX_NOWAIT | MBX_STOP_IOCB))
2274 == MBX_NOT_FINISHED) {
2275 mempool_free(regvpimbox, phba->mbox_mem_pool);
2276 }
2277 }
2278}
2279
2280/* Start Link up / RSCN discovery on NPR nodes */
2281void
2282lpfc_disc_start(struct lpfc_vport *vport)
2283{
2284 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2285 struct lpfc_hba *phba = vport->phba;
1796 uint32_t num_sent; 2286 uint32_t num_sent;
1797 uint32_t clear_la_pending; 2287 uint32_t clear_la_pending;
1798 int did_changed; 2288 int did_changed;
1799 int rc;
1800 2289
1801 psli = &phba->sli; 2290 if (!lpfc_is_link_up(phba))
1802
1803 if (phba->hba_state <= LPFC_LINK_DOWN) {
1804 return; 2291 return;
1805 } 2292
1806 if (phba->hba_state == LPFC_CLEAR_LA) 2293 if (phba->link_state == LPFC_CLEAR_LA)
1807 clear_la_pending = 1; 2294 clear_la_pending = 1;
1808 else 2295 else
1809 clear_la_pending = 0; 2296 clear_la_pending = 0;
1810 2297
1811 if (phba->hba_state < LPFC_HBA_READY) { 2298 if (vport->port_state < LPFC_VPORT_READY)
1812 phba->hba_state = LPFC_DISC_AUTH; 2299 vport->port_state = LPFC_DISC_AUTH;
1813 }
1814 lpfc_set_disctmo(phba);
1815 2300
1816 if (phba->fc_prevDID == phba->fc_myDID) { 2301 lpfc_set_disctmo(vport);
2302
2303 if (vport->fc_prevDID == vport->fc_myDID)
1817 did_changed = 0; 2304 did_changed = 0;
1818 } else { 2305 else
1819 did_changed = 1; 2306 did_changed = 1;
1820 } 2307
1821 phba->fc_prevDID = phba->fc_myDID; 2308 vport->fc_prevDID = vport->fc_myDID;
1822 phba->num_disc_nodes = 0; 2309 vport->num_disc_nodes = 0;
1823 2310
1824 /* Start Discovery state <hba_state> */ 2311 /* Start Discovery state <hba_state> */
1825 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 2312 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1826 "%d:0202 Start Discovery hba state x%x " 2313 "%d (%d):0202 Start Discovery hba state x%x "
1827 "Data: x%x x%x x%x\n", 2314 "Data: x%x x%x x%x\n",
1828 phba->brd_no, phba->hba_state, phba->fc_flag, 2315 phba->brd_no, vport->vpi, vport->port_state,
1829 phba->fc_plogi_cnt, phba->fc_adisc_cnt); 2316 vport->fc_flag, vport->fc_plogi_cnt,
1830 2317 vport->fc_adisc_cnt);
1831 /* If our did changed, we MUST do PLOGI */
1832 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) {
1833 if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
1834 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
1835 did_changed) {
1836 spin_lock_irq(phba->host->host_lock);
1837 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1838 spin_unlock_irq(phba->host->host_lock);
1839 }
1840 }
1841 2318
1842 /* First do ADISCs - if any */ 2319 /* First do ADISCs - if any */
1843 num_sent = lpfc_els_disc_adisc(phba); 2320 num_sent = lpfc_els_disc_adisc(vport);
1844 2321
1845 if (num_sent) 2322 if (num_sent)
1846 return; 2323 return;
1847 2324
1848 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) { 2325 /*
2326 * For SLI3, cmpl_reg_vpi will set port_state to READY, and
2327 * continue discovery.
2328 */
2329 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2330 !(vport->fc_flag & FC_RSCN_MODE)) {
2331 lpfc_issue_reg_vpi(phba, vport);
2332 return;
2333 }
2334
2335 /*
2336 * For SLI2, we need to set port_state to READY and continue
2337 * discovery.
2338 */
2339 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
1849 /* If we get here, there is nothing to ADISC */ 2340 /* If we get here, there is nothing to ADISC */
1850 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { 2341 if (vport->port_type == LPFC_PHYSICAL_PORT)
1851 phba->hba_state = LPFC_CLEAR_LA; 2342 lpfc_issue_clear_la(phba, vport);
1852 lpfc_clear_la(phba, mbox); 2343
1853 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2344 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1854 rc = lpfc_sli_issue_mbox(phba, mbox, 2345 vport->num_disc_nodes = 0;
1855 (MBX_NOWAIT | MBX_STOP_IOCB)); 2346 /* go thru NPR nodes and issue ELS PLOGIs */
1856 if (rc == MBX_NOT_FINISHED) { 2347 if (vport->fc_npr_cnt)
1857 mempool_free( mbox, phba->mbox_mem_pool); 2348 lpfc_els_disc_plogi(vport);
1858 lpfc_disc_flush_list(phba); 2349
1859 psli->ring[(psli->extra_ring)].flag &= 2350 if (!vport->num_disc_nodes) {
1860 ~LPFC_STOP_IOCB_EVENT; 2351 spin_lock_irq(shost->host_lock);
1861 psli->ring[(psli->fcp_ring)].flag &= 2352 vport->fc_flag &= ~FC_NDISC_ACTIVE;
1862 ~LPFC_STOP_IOCB_EVENT; 2353 spin_unlock_irq(shost->host_lock);
1863 psli->ring[(psli->next_ring)].flag &= 2354 lpfc_can_disctmo(vport);
1864 ~LPFC_STOP_IOCB_EVENT;
1865 phba->hba_state = LPFC_HBA_READY;
1866 } 2355 }
1867 } 2356 }
2357 vport->port_state = LPFC_VPORT_READY;
1868 } else { 2358 } else {
1869 /* Next do PLOGIs - if any */ 2359 /* Next do PLOGIs - if any */
1870 num_sent = lpfc_els_disc_plogi(phba); 2360 num_sent = lpfc_els_disc_plogi(vport);
1871 2361
1872 if (num_sent) 2362 if (num_sent)
1873 return; 2363 return;
1874 2364
1875 if (phba->fc_flag & FC_RSCN_MODE) { 2365 if (vport->fc_flag & FC_RSCN_MODE) {
1876 /* Check to see if more RSCNs came in while we 2366 /* Check to see if more RSCNs came in while we
1877 * were processing this one. 2367 * were processing this one.
1878 */ 2368 */
1879 if ((phba->fc_rscn_id_cnt == 0) && 2369 if ((vport->fc_rscn_id_cnt == 0) &&
1880 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { 2370 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
1881 spin_lock_irq(phba->host->host_lock); 2371 spin_lock_irq(shost->host_lock);
1882 phba->fc_flag &= ~FC_RSCN_MODE; 2372 vport->fc_flag &= ~FC_RSCN_MODE;
1883 spin_unlock_irq(phba->host->host_lock); 2373 spin_unlock_irq(shost->host_lock);
2374 lpfc_can_disctmo(vport);
1884 } else 2375 } else
1885 lpfc_els_handle_rscn(phba); 2376 lpfc_els_handle_rscn(vport);
1886 } 2377 }
1887 } 2378 }
1888 return; 2379 return;
@@ -1893,7 +2384,7 @@ lpfc_disc_start(struct lpfc_hba * phba)
1893 * ring the match the sppecified nodelist. 2384 * ring the match the sppecified nodelist.
1894 */ 2385 */
1895static void 2386static void
1896lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 2387lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1897{ 2388{
1898 LIST_HEAD(completions); 2389 LIST_HEAD(completions);
1899 struct lpfc_sli *psli; 2390 struct lpfc_sli *psli;
@@ -1907,7 +2398,7 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1907 /* Error matching iocb on txq or txcmplq 2398 /* Error matching iocb on txq or txcmplq
1908 * First check the txq. 2399 * First check the txq.
1909 */ 2400 */
1910 spin_lock_irq(phba->host->host_lock); 2401 spin_lock_irq(&phba->hbalock);
1911 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 2402 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1912 if (iocb->context1 != ndlp) { 2403 if (iocb->context1 != ndlp) {
1913 continue; 2404 continue;
@@ -1927,36 +2418,36 @@ lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1927 continue; 2418 continue;
1928 } 2419 }
1929 icmd = &iocb->iocb; 2420 icmd = &iocb->iocb;
1930 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 2421 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
1931 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 2422 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
1932 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 2423 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1933 } 2424 }
1934 } 2425 }
1935 spin_unlock_irq(phba->host->host_lock); 2426 spin_unlock_irq(&phba->hbalock);
1936 2427
1937 while (!list_empty(&completions)) { 2428 while (!list_empty(&completions)) {
1938 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2429 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1939 list_del(&iocb->list); 2430 list_del_init(&iocb->list);
1940 2431
1941 if (iocb->iocb_cmpl) { 2432 if (!iocb->iocb_cmpl)
2433 lpfc_sli_release_iocbq(phba, iocb);
2434 else {
1942 icmd = &iocb->iocb; 2435 icmd = &iocb->iocb;
1943 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2436 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1944 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 2437 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1945 (iocb->iocb_cmpl) (phba, iocb, iocb); 2438 (iocb->iocb_cmpl) (phba, iocb, iocb);
1946 } else 2439 }
1947 lpfc_sli_release_iocbq(phba, iocb);
1948 } 2440 }
1949
1950 return;
1951} 2441}
1952 2442
1953void 2443void
1954lpfc_disc_flush_list(struct lpfc_hba * phba) 2444lpfc_disc_flush_list(struct lpfc_vport *vport)
1955{ 2445{
1956 struct lpfc_nodelist *ndlp, *next_ndlp; 2446 struct lpfc_nodelist *ndlp, *next_ndlp;
2447 struct lpfc_hba *phba = vport->phba;
1957 2448
1958 if (phba->fc_plogi_cnt || phba->fc_adisc_cnt) { 2449 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
1959 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 2450 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
1960 nlp_listp) { 2451 nlp_listp) {
1961 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 2452 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
1962 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 2453 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
@@ -1967,6 +2458,14 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
1967 } 2458 }
1968} 2459}
1969 2460
2461void
2462lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
2463{
2464 lpfc_els_flush_rscn(vport);
2465 lpfc_els_flush_cmd(vport);
2466 lpfc_disc_flush_list(vport);
2467}
2468
1970/*****************************************************************************/ 2469/*****************************************************************************/
1971/* 2470/*
1972 * NAME: lpfc_disc_timeout 2471 * NAME: lpfc_disc_timeout
@@ -1985,158 +2484,154 @@ lpfc_disc_flush_list(struct lpfc_hba * phba)
1985void 2484void
1986lpfc_disc_timeout(unsigned long ptr) 2485lpfc_disc_timeout(unsigned long ptr)
1987{ 2486{
1988 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 2487 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
2488 struct lpfc_hba *phba = vport->phba;
1989 unsigned long flags = 0; 2489 unsigned long flags = 0;
1990 2490
1991 if (unlikely(!phba)) 2491 if (unlikely(!phba))
1992 return; 2492 return;
1993 2493
1994 spin_lock_irqsave(phba->host->host_lock, flags); 2494 if ((vport->work_port_events & WORKER_DISC_TMO) == 0) {
1995 if (!(phba->work_hba_events & WORKER_DISC_TMO)) { 2495 spin_lock_irqsave(&vport->work_port_lock, flags);
1996 phba->work_hba_events |= WORKER_DISC_TMO; 2496 vport->work_port_events |= WORKER_DISC_TMO;
2497 spin_unlock_irqrestore(&vport->work_port_lock, flags);
2498
2499 spin_lock_irqsave(&phba->hbalock, flags);
1997 if (phba->work_wait) 2500 if (phba->work_wait)
1998 wake_up(phba->work_wait); 2501 lpfc_worker_wake_up(phba);
2502 spin_unlock_irqrestore(&phba->hbalock, flags);
1999 } 2503 }
2000 spin_unlock_irqrestore(phba->host->host_lock, flags);
2001 return; 2504 return;
2002} 2505}
2003 2506
2004static void 2507static void
2005lpfc_disc_timeout_handler(struct lpfc_hba *phba) 2508lpfc_disc_timeout_handler(struct lpfc_vport *vport)
2006{ 2509{
2007 struct lpfc_sli *psli; 2510 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2511 struct lpfc_hba *phba = vport->phba;
2512 struct lpfc_sli *psli = &phba->sli;
2008 struct lpfc_nodelist *ndlp, *next_ndlp; 2513 struct lpfc_nodelist *ndlp, *next_ndlp;
2009 LPFC_MBOXQ_t *clearlambox, *initlinkmbox; 2514 LPFC_MBOXQ_t *initlinkmbox;
2010 int rc, clrlaerr = 0; 2515 int rc, clrlaerr = 0;
2011 2516
2012 if (unlikely(!phba)) 2517 if (!(vport->fc_flag & FC_DISC_TMO))
2013 return; 2518 return;
2014 2519
2015 if (!(phba->fc_flag & FC_DISC_TMO)) 2520 spin_lock_irq(shost->host_lock);
2016 return; 2521 vport->fc_flag &= ~FC_DISC_TMO;
2522 spin_unlock_irq(shost->host_lock);
2017 2523
2018 psli = &phba->sli; 2524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2525 "disc timeout: state:x%x rtry:x%x flg:x%x",
2526 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
2019 2527
2020 spin_lock_irq(phba->host->host_lock); 2528 switch (vport->port_state) {
2021 phba->fc_flag &= ~FC_DISC_TMO;
2022 spin_unlock_irq(phba->host->host_lock);
2023
2024 switch (phba->hba_state) {
2025 2529
2026 case LPFC_LOCAL_CFG_LINK: 2530 case LPFC_LOCAL_CFG_LINK:
2027 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */ 2531 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
2028 /* FAN timeout */ 2532 * FAN
2029 lpfc_printf_log(phba, 2533 */
2030 KERN_WARNING, 2534 /* FAN timeout */
2031 LOG_DISCOVERY, 2535 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
2032 "%d:0221 FAN timeout\n", 2536 "%d (%d):0221 FAN timeout\n",
2033 phba->brd_no); 2537 phba->brd_no, vport->vpi);
2034 2538
2035 /* Start discovery by sending FLOGI, clean up old rpis */ 2539 /* Start discovery by sending FLOGI, clean up old rpis */
2036 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, 2540 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
2037 nlp_listp) { 2541 nlp_listp) {
2038 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 2542 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
2039 continue; 2543 continue;
2040 if (ndlp->nlp_type & NLP_FABRIC) { 2544 if (ndlp->nlp_type & NLP_FABRIC) {
2041 /* Clean up the ndlp on Fabric connections */ 2545 /* Clean up the ndlp on Fabric connections */
2042 lpfc_drop_node(phba, ndlp); 2546 lpfc_drop_node(vport, ndlp);
2043 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 2547 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2044 /* Fail outstanding IO now since device 2548 /* Fail outstanding IO now since device
2045 * is marked for PLOGI. 2549 * is marked for PLOGI.
2046 */ 2550 */
2047 lpfc_unreg_rpi(phba, ndlp); 2551 lpfc_unreg_rpi(vport, ndlp);
2048 } 2552 }
2049 } 2553 }
2050 phba->hba_state = LPFC_FLOGI; 2554 if (vport->port_state != LPFC_FLOGI) {
2051 lpfc_set_disctmo(phba); 2555 vport->port_state = LPFC_FLOGI;
2052 lpfc_initial_flogi(phba); 2556 lpfc_set_disctmo(vport);
2557 lpfc_initial_flogi(vport);
2558 }
2053 break; 2559 break;
2054 2560
2561 case LPFC_FDISC:
2055 case LPFC_FLOGI: 2562 case LPFC_FLOGI:
2056 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 2563 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2057 /* Initial FLOGI timeout */ 2564 /* Initial FLOGI timeout */
2058 lpfc_printf_log(phba, 2565 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2059 KERN_ERR, 2566 "%d (%d):0222 Initial %s timeout\n",
2060 LOG_DISCOVERY, 2567 phba->brd_no, vport->vpi,
2061 "%d:0222 Initial FLOGI timeout\n", 2568 vport->vpi ? "FLOGI" : "FDISC");
2062 phba->brd_no);
2063 2569
2064 /* Assume no Fabric and go on with discovery. 2570 /* Assume no Fabric and go on with discovery.
2065 * Check for outstanding ELS FLOGI to abort. 2571 * Check for outstanding ELS FLOGI to abort.
2066 */ 2572 */
2067 2573
2068 /* FLOGI failed, so just use loop map to make discovery list */ 2574 /* FLOGI failed, so just use loop map to make discovery list */
2069 lpfc_disc_list_loopmap(phba); 2575 lpfc_disc_list_loopmap(vport);
2070 2576
2071 /* Start discovery */ 2577 /* Start discovery */
2072 lpfc_disc_start(phba); 2578 lpfc_disc_start(vport);
2073 break; 2579 break;
2074 2580
2075 case LPFC_FABRIC_CFG_LINK: 2581 case LPFC_FABRIC_CFG_LINK:
2076 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 2582 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2077 NameServer login */ 2583 NameServer login */
2078 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2584 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2079 "%d:0223 Timeout while waiting for NameServer " 2585 "%d (%d):0223 Timeout while waiting for "
2080 "login\n", phba->brd_no); 2586 "NameServer login\n",
2587 phba->brd_no, vport->vpi);
2081 2588
2082 /* Next look for NameServer ndlp */ 2589 /* Next look for NameServer ndlp */
2083 ndlp = lpfc_findnode_did(phba, NameServer_DID); 2590 ndlp = lpfc_findnode_did(vport, NameServer_DID);
2084 if (ndlp) 2591 if (ndlp)
2085 lpfc_nlp_put(ndlp); 2592 lpfc_nlp_put(ndlp);
2086 /* Start discovery */ 2593 /* Start discovery */
2087 lpfc_disc_start(phba); 2594 lpfc_disc_start(vport);
2088 break; 2595 break;
2089 2596
2090 case LPFC_NS_QRY: 2597 case LPFC_NS_QRY:
2091 /* Check for wait for NameServer Rsp timeout */ 2598 /* Check for wait for NameServer Rsp timeout */
2092 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2599 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2093 "%d:0224 NameServer Query timeout " 2600 "%d (%d):0224 NameServer Query timeout "
2094 "Data: x%x x%x\n", 2601 "Data: x%x x%x\n",
2095 phba->brd_no, 2602 phba->brd_no, vport->vpi,
2096 phba->fc_ns_retry, LPFC_MAX_NS_RETRY); 2603 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2097 2604
2098 ndlp = lpfc_findnode_did(phba, NameServer_DID); 2605 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2099 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 2606 /* Try it one more time */
2100 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) { 2607 vport->fc_ns_retry++;
2101 /* Try it one more time */ 2608 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
2102 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT); 2609 vport->fc_ns_retry, 0);
2103 if (rc == 0) 2610 if (rc == 0)
2104 break; 2611 break;
2105 }
2106 phba->fc_ns_retry = 0;
2107 } 2612 }
2613 vport->fc_ns_retry = 0;
2108 2614
2109 /* Nothing to authenticate, so CLEAR_LA right now */ 2615 /*
2110 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2616 * Discovery is over.
2111 if (!clearlambox) { 2617 * set port_state to PORT_READY if SLI2.
2112 clrlaerr = 1; 2618 * cmpl_reg_vpi will set port_state to READY for SLI3.
2113 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2619 */
2114 "%d:0226 Device Discovery " 2620 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2115 "completion error\n", 2621 lpfc_issue_reg_vpi(phba, vport);
2116 phba->brd_no); 2622 else { /* NPIV Not enabled */
2117 phba->hba_state = LPFC_HBA_ERROR; 2623 lpfc_issue_clear_la(phba, vport);
2118 break; 2624 vport->port_state = LPFC_VPORT_READY;
2119 }
2120
2121 phba->hba_state = LPFC_CLEAR_LA;
2122 lpfc_clear_la(phba, clearlambox);
2123 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2124 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2125 (MBX_NOWAIT | MBX_STOP_IOCB));
2126 if (rc == MBX_NOT_FINISHED) {
2127 mempool_free(clearlambox, phba->mbox_mem_pool);
2128 clrlaerr = 1;
2129 break;
2130 } 2625 }
2131 2626
2132 /* Setup and issue mailbox INITIALIZE LINK command */ 2627 /* Setup and issue mailbox INITIALIZE LINK command */
2133 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2628 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2134 if (!initlinkmbox) { 2629 if (!initlinkmbox) {
2135 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2630 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2136 "%d:0206 Device Discovery " 2631 "%d (%d):0206 Device Discovery "
2137 "completion error\n", 2632 "completion error\n",
2138 phba->brd_no); 2633 phba->brd_no, vport->vpi);
2139 phba->hba_state = LPFC_HBA_ERROR; 2634 phba->link_state = LPFC_HBA_ERROR;
2140 break; 2635 break;
2141 } 2636 }
2142 2637
@@ -2144,6 +2639,8 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2144 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 2639 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2145 phba->cfg_link_speed); 2640 phba->cfg_link_speed);
2146 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; 2641 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2642 initlinkmbox->vport = vport;
2643 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2147 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, 2644 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2148 (MBX_NOWAIT | MBX_STOP_IOCB)); 2645 (MBX_NOWAIT | MBX_STOP_IOCB));
2149 lpfc_set_loopback_flag(phba); 2646 lpfc_set_loopback_flag(phba);
@@ -2154,67 +2651,81 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2154 2651
2155 case LPFC_DISC_AUTH: 2652 case LPFC_DISC_AUTH:
2156 /* Node Authentication timeout */ 2653 /* Node Authentication timeout */
2157 lpfc_printf_log(phba, 2654 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2158 KERN_ERR, 2655 "%d (%d):0227 Node Authentication timeout\n",
2159 LOG_DISCOVERY, 2656 phba->brd_no, vport->vpi);
2160 "%d:0227 Node Authentication timeout\n", 2657 lpfc_disc_flush_list(vport);
2161 phba->brd_no); 2658
2162 lpfc_disc_flush_list(phba); 2659 /*
2163 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2660 * set port_state to PORT_READY if SLI2.
2164 if (!clearlambox) { 2661 * cmpl_reg_vpi will set port_state to READY for SLI3.
2165 clrlaerr = 1; 2662 */
2166 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 2663 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2167 "%d:0207 Device Discovery " 2664 lpfc_issue_reg_vpi(phba, vport);
2168 "completion error\n", 2665 else { /* NPIV Not enabled */
2169 phba->brd_no); 2666 lpfc_issue_clear_la(phba, vport);
2170 phba->hba_state = LPFC_HBA_ERROR; 2667 vport->port_state = LPFC_VPORT_READY;
2171 break;
2172 } 2668 }
2173 phba->hba_state = LPFC_CLEAR_LA; 2669 break;
2174 lpfc_clear_la(phba, clearlambox); 2670
2175 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 2671 case LPFC_VPORT_READY:
2176 rc = lpfc_sli_issue_mbox(phba, clearlambox, 2672 if (vport->fc_flag & FC_RSCN_MODE) {
2177 (MBX_NOWAIT | MBX_STOP_IOCB)); 2673 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2178 if (rc == MBX_NOT_FINISHED) { 2674 "%d (%d):0231 RSCN timeout Data: x%x "
2179 mempool_free(clearlambox, phba->mbox_mem_pool); 2675 "x%x\n",
2180 clrlaerr = 1; 2676 phba->brd_no, vport->vpi,
2677 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
2678
2679 /* Cleanup any outstanding ELS commands */
2680 lpfc_els_flush_cmd(vport);
2681
2682 lpfc_els_flush_rscn(vport);
2683 lpfc_disc_flush_list(vport);
2181 } 2684 }
2182 break; 2685 break;
2183 2686
2687 default:
2688 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2689 "%d (%d):0229 Unexpected discovery timeout, "
2690 "vport State x%x\n",
2691 phba->brd_no, vport->vpi, vport->port_state);
2692
2693 break;
2694 }
2695
2696 switch (phba->link_state) {
2184 case LPFC_CLEAR_LA: 2697 case LPFC_CLEAR_LA:
2185 /* CLEAR LA timeout */ 2698 /* CLEAR LA timeout */
2186 lpfc_printf_log(phba, 2699 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2187 KERN_ERR, 2700 "%d (%d):0228 CLEAR LA timeout\n",
2188 LOG_DISCOVERY, 2701 phba->brd_no, vport->vpi);
2189 "%d:0228 CLEAR LA timeout\n",
2190 phba->brd_no);
2191 clrlaerr = 1; 2702 clrlaerr = 1;
2192 break; 2703 break;
2193 2704
2194 case LPFC_HBA_READY: 2705 case LPFC_LINK_UNKNOWN:
2195 if (phba->fc_flag & FC_RSCN_MODE) { 2706 case LPFC_WARM_START:
2196 lpfc_printf_log(phba, 2707 case LPFC_INIT_START:
2197 KERN_ERR, 2708 case LPFC_INIT_MBX_CMDS:
2198 LOG_DISCOVERY, 2709 case LPFC_LINK_DOWN:
2199 "%d:0231 RSCN timeout Data: x%x x%x\n", 2710 case LPFC_LINK_UP:
2200 phba->brd_no, 2711 case LPFC_HBA_ERROR:
2201 phba->fc_ns_retry, LPFC_MAX_NS_RETRY); 2712 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2202 2713 "%d (%d):0230 Unexpected timeout, hba link "
2203 /* Cleanup any outstanding ELS commands */ 2714 "state x%x\n",
2204 lpfc_els_flush_cmd(phba); 2715 phba->brd_no, vport->vpi, phba->link_state);
2716 clrlaerr = 1;
2717 break;
2205 2718
2206 lpfc_els_flush_rscn(phba); 2719 case LPFC_HBA_READY:
2207 lpfc_disc_flush_list(phba);
2208 }
2209 break; 2720 break;
2210 } 2721 }
2211 2722
2212 if (clrlaerr) { 2723 if (clrlaerr) {
2213 lpfc_disc_flush_list(phba); 2724 lpfc_disc_flush_list(vport);
2214 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2725 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2215 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2726 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2216 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 2727 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2217 phba->hba_state = LPFC_HBA_READY; 2728 vport->port_state = LPFC_VPORT_READY;
2218 } 2729 }
2219 2730
2220 return; 2731 return;
@@ -2227,37 +2738,29 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2227 * handed off to the SLI layer. 2738 * handed off to the SLI layer.
2228 */ 2739 */
2229void 2740void
2230lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 2741lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2231{ 2742{
2232 struct lpfc_sli *psli; 2743 MAILBOX_t *mb = &pmb->mb;
2233 MAILBOX_t *mb; 2744 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
2234 struct lpfc_dmabuf *mp; 2745 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2235 struct lpfc_nodelist *ndlp; 2746 struct lpfc_vport *vport = pmb->vport;
2236
2237 psli = &phba->sli;
2238 mb = &pmb->mb;
2239
2240 ndlp = (struct lpfc_nodelist *) pmb->context2;
2241 mp = (struct lpfc_dmabuf *) (pmb->context1);
2242 2747
2243 pmb->context1 = NULL; 2748 pmb->context1 = NULL;
2244 2749
2245 ndlp->nlp_rpi = mb->un.varWords[0]; 2750 ndlp->nlp_rpi = mb->un.varWords[0];
2246 ndlp->nlp_type |= NLP_FABRIC; 2751 ndlp->nlp_type |= NLP_FABRIC;
2247 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 2752 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
2248 2753
2249 /* Start issuing Fabric-Device Management Interface (FDMI) 2754 /*
2250 * command to 0xfffffa (FDMI well known port) 2755 * Start issuing Fabric-Device Management Interface (FDMI) command to
2756 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
2757 * fdmi-on=2 (supporting RPA/hostnmae)
2251 */ 2758 */
2252 if (phba->cfg_fdmi_on == 1) { 2759
2253 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA); 2760 if (phba->cfg_fdmi_on == 1)
2254 } else { 2761 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
2255 /* 2762 else
2256 * Delay issuing FDMI command if fdmi-on=2 2763 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
2257 * (supporting RPA/hostnmae)
2258 */
2259 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2260 }
2261 2764
2262 /* Mailbox took a reference to the node */ 2765 /* Mailbox took a reference to the node */
2263 lpfc_nlp_put(ndlp); 2766 lpfc_nlp_put(ndlp);
@@ -2283,16 +2786,12 @@ lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
2283 sizeof(ndlp->nlp_portname)) == 0; 2786 sizeof(ndlp->nlp_portname)) == 0;
2284} 2787}
2285 2788
2286/*
2287 * Search node lists for a remote port matching filter criteria
2288 * Caller needs to hold host_lock before calling this routine.
2289 */
2290struct lpfc_nodelist * 2789struct lpfc_nodelist *
2291__lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) 2790__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2292{ 2791{
2293 struct lpfc_nodelist *ndlp; 2792 struct lpfc_nodelist *ndlp;
2294 2793
2295 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 2794 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2296 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE && 2795 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE &&
2297 filter(ndlp, param)) 2796 filter(ndlp, param))
2298 return ndlp; 2797 return ndlp;
@@ -2302,68 +2801,104 @@ __lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param)
2302 2801
2303/* 2802/*
2304 * Search node lists for a remote port matching filter criteria 2803 * Search node lists for a remote port matching filter criteria
2305 * This routine is used when the caller does NOT have host_lock. 2804 * Caller needs to hold host_lock before calling this routine.
2306 */ 2805 */
2307struct lpfc_nodelist * 2806struct lpfc_nodelist *
2308lpfc_find_node(struct lpfc_hba *phba, node_filter filter, void *param) 2807lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
2309{ 2808{
2809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2310 struct lpfc_nodelist *ndlp; 2810 struct lpfc_nodelist *ndlp;
2311 2811
2312 spin_lock_irq(phba->host->host_lock); 2812 spin_lock_irq(shost->host_lock);
2313 ndlp = __lpfc_find_node(phba, filter, param); 2813 ndlp = __lpfc_find_node(vport, filter, param);
2314 spin_unlock_irq(phba->host->host_lock); 2814 spin_unlock_irq(shost->host_lock);
2315 return ndlp; 2815 return ndlp;
2316} 2816}
2317 2817
2318/* 2818/*
2319 * This routine looks up the ndlp lists for the given RPI. If rpi found it 2819 * This routine looks up the ndlp lists for the given RPI. If rpi found it
2320 * returns the node list pointer else return NULL. 2820 * returns the node list element pointer else return NULL.
2321 */ 2821 */
2322struct lpfc_nodelist * 2822struct lpfc_nodelist *
2323__lpfc_findnode_rpi(struct lpfc_hba *phba, uint16_t rpi) 2823__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2324{ 2824{
2325 return __lpfc_find_node(phba, lpfc_filter_by_rpi, &rpi); 2825 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
2326} 2826}
2327 2827
2328struct lpfc_nodelist * 2828struct lpfc_nodelist *
2329lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi) 2829lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
2330{ 2830{
2831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2331 struct lpfc_nodelist *ndlp; 2832 struct lpfc_nodelist *ndlp;
2332 2833
2333 spin_lock_irq(phba->host->host_lock); 2834 spin_lock_irq(shost->host_lock);
2334 ndlp = __lpfc_findnode_rpi(phba, rpi); 2835 ndlp = __lpfc_findnode_rpi(vport, rpi);
2335 spin_unlock_irq(phba->host->host_lock); 2836 spin_unlock_irq(shost->host_lock);
2336 return ndlp; 2837 return ndlp;
2337} 2838}
2338 2839
2339/* 2840/*
2340 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 2841 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
2341 * returns the node list pointer else return NULL. 2842 * returns the node element list pointer else return NULL.
2342 */ 2843 */
2343struct lpfc_nodelist * 2844struct lpfc_nodelist *
2344lpfc_findnode_wwpn(struct lpfc_hba *phba, struct lpfc_name *wwpn) 2845lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
2345{ 2846{
2847 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2346 struct lpfc_nodelist *ndlp; 2848 struct lpfc_nodelist *ndlp;
2347 2849
2348 spin_lock_irq(phba->host->host_lock); 2850 spin_lock_irq(shost->host_lock);
2349 ndlp = __lpfc_find_node(phba, lpfc_filter_by_wwpn, wwpn); 2851 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
2350 spin_unlock_irq(phba->host->host_lock); 2852 spin_unlock_irq(shost->host_lock);
2351 return NULL; 2853 return ndlp;
2352} 2854}
2353 2855
2354void 2856void
2355lpfc_nlp_init(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint32_t did) 2857lpfc_dev_loss_delay(unsigned long ptr)
2858{
2859 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2860 struct lpfc_vport *vport = ndlp->vport;
2861 struct lpfc_hba *phba = vport->phba;
2862 struct lpfc_work_evt *evtp = &ndlp->dev_loss_evt;
2863 unsigned long flags;
2864
2865 evtp = &ndlp->dev_loss_evt;
2866
2867 spin_lock_irqsave(&phba->hbalock, flags);
2868 if (!list_empty(&evtp->evt_listp)) {
2869 spin_unlock_irqrestore(&phba->hbalock, flags);
2870 return;
2871 }
2872
2873 evtp->evt_arg1 = ndlp;
2874 evtp->evt = LPFC_EVT_DEV_LOSS_DELAY;
2875 list_add_tail(&evtp->evt_listp, &phba->work_list);
2876 if (phba->work_wait)
2877 lpfc_worker_wake_up(phba);
2878 spin_unlock_irqrestore(&phba->hbalock, flags);
2879 return;
2880}
2881
2882void
2883lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2884 uint32_t did)
2356{ 2885{
2357 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 2886 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2358 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2887 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2888 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
2359 init_timer(&ndlp->nlp_delayfunc); 2889 init_timer(&ndlp->nlp_delayfunc);
2360 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2890 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2361 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2891 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2362 ndlp->nlp_DID = did; 2892 ndlp->nlp_DID = did;
2363 ndlp->nlp_phba = phba; 2893 ndlp->vport = vport;
2364 ndlp->nlp_sid = NLP_NO_SID; 2894 ndlp->nlp_sid = NLP_NO_SID;
2365 INIT_LIST_HEAD(&ndlp->nlp_listp); 2895 INIT_LIST_HEAD(&ndlp->nlp_listp);
2366 kref_init(&ndlp->kref); 2896 kref_init(&ndlp->kref);
2897
2898 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2899 "node init: did:x%x",
2900 ndlp->nlp_DID, 0, 0);
2901
2367 return; 2902 return;
2368} 2903}
2369 2904
@@ -2372,8 +2907,13 @@ lpfc_nlp_release(struct kref *kref)
2372{ 2907{
2373 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 2908 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
2374 kref); 2909 kref);
2375 lpfc_nlp_remove(ndlp->nlp_phba, ndlp); 2910
2376 mempool_free(ndlp, ndlp->nlp_phba->nlp_mem_pool); 2911 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
2912 "node release: did:x%x flg:x%x type:x%x",
2913 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
2914
2915 lpfc_nlp_remove(ndlp->vport, ndlp);
2916 mempool_free(ndlp, ndlp->vport->phba->nlp_mem_pool);
2377} 2917}
2378 2918
2379struct lpfc_nodelist * 2919struct lpfc_nodelist *
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 2623a9bc7775..c2fb59f595f3 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -59,6 +59,12 @@
59#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24 59#define SLI2_IOCB_CMD_R3XTRA_ENTRIES 24
60#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32 60#define SLI2_IOCB_RSP_R3XTRA_ENTRIES 32
61 61
62#define SLI2_IOCB_CMD_SIZE 32
63#define SLI2_IOCB_RSP_SIZE 32
64#define SLI3_IOCB_CMD_SIZE 128
65#define SLI3_IOCB_RSP_SIZE 64
66
67
62/* Common Transport structures and definitions */ 68/* Common Transport structures and definitions */
63 69
64union CtRevisionId { 70union CtRevisionId {
@@ -79,6 +85,9 @@ union CtCommandResponse {
79 uint32_t word; 85 uint32_t word;
80}; 86};
81 87
88#define FC4_FEATURE_INIT 0x2
89#define FC4_FEATURE_TARGET 0x1
90
82struct lpfc_sli_ct_request { 91struct lpfc_sli_ct_request {
83 /* Structure is in Big Endian format */ 92 /* Structure is in Big Endian format */
84 union CtRevisionId RevisionId; 93 union CtRevisionId RevisionId;
@@ -121,20 +130,6 @@ struct lpfc_sli_ct_request {
121 130
122 uint32_t rsvd[7]; 131 uint32_t rsvd[7];
123 } rft; 132 } rft;
124 struct rff {
125 uint32_t PortId;
126 uint8_t reserved[2];
127#ifdef __BIG_ENDIAN_BITFIELD
128 uint8_t feature_res:6;
129 uint8_t feature_init:1;
130 uint8_t feature_tgt:1;
131#else /* __LITTLE_ENDIAN_BITFIELD */
132 uint8_t feature_tgt:1;
133 uint8_t feature_init:1;
134 uint8_t feature_res:6;
135#endif
136 uint8_t type_code; /* type=8 for FCP */
137 } rff;
138 struct rnn { 133 struct rnn {
139 uint32_t PortId; /* For RNN_ID requests */ 134 uint32_t PortId; /* For RNN_ID requests */
140 uint8_t wwnn[8]; 135 uint8_t wwnn[8];
@@ -144,15 +139,42 @@ struct lpfc_sli_ct_request {
144 uint8_t len; 139 uint8_t len;
145 uint8_t symbname[255]; 140 uint8_t symbname[255];
146 } rsnn; 141 } rsnn;
142 struct rspn { /* For RSPN_ID requests */
143 uint32_t PortId;
144 uint8_t len;
145 uint8_t symbname[255];
146 } rspn;
147 struct gff {
148 uint32_t PortId;
149 } gff;
150 struct gff_acc {
151 uint8_t fbits[128];
152 } gff_acc;
153#define FCP_TYPE_FEATURE_OFFSET 4
154 struct rff {
155 uint32_t PortId;
156 uint8_t reserved[2];
157 uint8_t fbits;
158 uint8_t type_code; /* type=8 for FCP */
159 } rff;
147 } un; 160 } un;
148}; 161};
149 162
150#define SLI_CT_REVISION 1 163#define SLI_CT_REVISION 1
151#define GID_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 260) 164#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
152#define RFT_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 228) 165 sizeof(struct gid))
153#define RFF_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 235) 166#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
154#define RNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request) - 252) 167 sizeof(struct gff))
155#define RSNN_REQUEST_SZ (sizeof(struct lpfc_sli_ct_request)) 168#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
169 sizeof(struct rft))
170#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
171 sizeof(struct rff))
172#define RNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
173 sizeof(struct rnn))
174#define RSNN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
175 sizeof(struct rsnn))
176#define RSPN_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
177 sizeof(struct rspn))
156 178
157/* 179/*
158 * FsType Definitions 180 * FsType Definitions
@@ -227,6 +249,7 @@ struct lpfc_sli_ct_request {
227#define SLI_CTNS_GFT_ID 0x0117 249#define SLI_CTNS_GFT_ID 0x0117
228#define SLI_CTNS_GSPN_ID 0x0118 250#define SLI_CTNS_GSPN_ID 0x0118
229#define SLI_CTNS_GPT_ID 0x011A 251#define SLI_CTNS_GPT_ID 0x011A
252#define SLI_CTNS_GFF_ID 0x011F
230#define SLI_CTNS_GID_PN 0x0121 253#define SLI_CTNS_GID_PN 0x0121
231#define SLI_CTNS_GID_NN 0x0131 254#define SLI_CTNS_GID_NN 0x0131
232#define SLI_CTNS_GIP_NN 0x0135 255#define SLI_CTNS_GIP_NN 0x0135
@@ -240,9 +263,9 @@ struct lpfc_sli_ct_request {
240#define SLI_CTNS_RNN_ID 0x0213 263#define SLI_CTNS_RNN_ID 0x0213
241#define SLI_CTNS_RCS_ID 0x0214 264#define SLI_CTNS_RCS_ID 0x0214
242#define SLI_CTNS_RFT_ID 0x0217 265#define SLI_CTNS_RFT_ID 0x0217
243#define SLI_CTNS_RFF_ID 0x021F
244#define SLI_CTNS_RSPN_ID 0x0218 266#define SLI_CTNS_RSPN_ID 0x0218
245#define SLI_CTNS_RPT_ID 0x021A 267#define SLI_CTNS_RPT_ID 0x021A
268#define SLI_CTNS_RFF_ID 0x021F
246#define SLI_CTNS_RIP_NN 0x0235 269#define SLI_CTNS_RIP_NN 0x0235
247#define SLI_CTNS_RIPA_NN 0x0236 270#define SLI_CTNS_RIPA_NN 0x0236
248#define SLI_CTNS_RSNN_NN 0x0239 271#define SLI_CTNS_RSNN_NN 0x0239
@@ -311,9 +334,9 @@ struct csp {
311 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ 334 uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
312 335
313#ifdef __BIG_ENDIAN_BITFIELD 336#ifdef __BIG_ENDIAN_BITFIELD
314 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ 337 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
315 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 338 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
316 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ 339 uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
317 uint16_t fPort:1; /* FC Word 1, bit 28 */ 340 uint16_t fPort:1; /* FC Word 1, bit 28 */
318 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ 341 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
319 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ 342 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
@@ -332,9 +355,9 @@ struct csp {
332 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ 355 uint16_t edtovResolution:1; /* FC Word 1, bit 26 */
333 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ 356 uint16_t altBbCredit:1; /* FC Word 1, bit 27 */
334 uint16_t fPort:1; /* FC Word 1, bit 28 */ 357 uint16_t fPort:1; /* FC Word 1, bit 28 */
335 uint16_t word1Reserved2:1; /* FC Word 1, bit 29 */ 358 uint16_t response_multiple_NPort:1; /* FC Word 1, bit 29 */
336 uint16_t randomOffset:1; /* FC Word 1, bit 30 */ 359 uint16_t randomOffset:1; /* FC Word 1, bit 30 */
337 uint16_t increasingOffset:1; /* FC Word 1, bit 31 */ 360 uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
338 361
339 uint16_t payloadlength:1; /* FC Word 1, bit 16 */ 362 uint16_t payloadlength:1; /* FC Word 1, bit 16 */
340 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ 363 uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */
@@ -1255,7 +1278,9 @@ typedef struct { /* FireFly BIU registers */
1255#define MBX_KILL_BOARD 0x24 1278#define MBX_KILL_BOARD 0x24
1256#define MBX_CONFIG_FARP 0x25 1279#define MBX_CONFIG_FARP 0x25
1257#define MBX_BEACON 0x2A 1280#define MBX_BEACON 0x2A
1281#define MBX_HEARTBEAT 0x31
1258 1282
1283#define MBX_CONFIG_HBQ 0x7C
1259#define MBX_LOAD_AREA 0x81 1284#define MBX_LOAD_AREA 0x81
1260#define MBX_RUN_BIU_DIAG64 0x84 1285#define MBX_RUN_BIU_DIAG64 0x84
1261#define MBX_CONFIG_PORT 0x88 1286#define MBX_CONFIG_PORT 0x88
@@ -1263,6 +1288,10 @@ typedef struct { /* FireFly BIU registers */
1263#define MBX_READ_RPI64 0x8F 1288#define MBX_READ_RPI64 0x8F
1264#define MBX_REG_LOGIN64 0x93 1289#define MBX_REG_LOGIN64 0x93
1265#define MBX_READ_LA64 0x95 1290#define MBX_READ_LA64 0x95
1291#define MBX_REG_VPI 0x96
1292#define MBX_UNREG_VPI 0x97
1293#define MBX_REG_VNPID 0x96
1294#define MBX_UNREG_VNPID 0x97
1266 1295
1267#define MBX_FLASH_WR_ULA 0x98 1296#define MBX_FLASH_WR_ULA 0x98
1268#define MBX_SET_DEBUG 0x99 1297#define MBX_SET_DEBUG 0x99
@@ -1335,6 +1364,10 @@ typedef struct { /* FireFly BIU registers */
1335#define CMD_FCP_TRECEIVE64_CX 0xA1 1364#define CMD_FCP_TRECEIVE64_CX 0xA1
1336#define CMD_FCP_TRSP64_CX 0xA3 1365#define CMD_FCP_TRSP64_CX 0xA3
1337 1366
1367#define CMD_IOCB_RCV_SEQ64_CX 0xB5
1368#define CMD_IOCB_RCV_ELS64_CX 0xB7
1369#define CMD_IOCB_RCV_CONT64_CX 0xBB
1370
1338#define CMD_GEN_REQUEST64_CR 0xC2 1371#define CMD_GEN_REQUEST64_CR 0xC2
1339#define CMD_GEN_REQUEST64_CX 0xC3 1372#define CMD_GEN_REQUEST64_CX 0xC3
1340 1373
@@ -1561,6 +1594,7 @@ typedef struct {
1561#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */ 1594#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
1562#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */ 1595#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
1563#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */ 1596#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
1597#define FLAGS_UNREG_LOGIN_ALL 0x08 /* UNREG_LOGIN all on link down */
1564#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */ 1598#define FLAGS_LIRP_LILP 0x80 /* LIRP / LILP is disabled */
1565 1599
1566#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ 1600#define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */
@@ -1744,8 +1778,6 @@ typedef struct {
1744#define LMT_4Gb 0x040 1778#define LMT_4Gb 0x040
1745#define LMT_8Gb 0x080 1779#define LMT_8Gb 0x080
1746#define LMT_10Gb 0x100 1780#define LMT_10Gb 0x100
1747
1748
1749 uint32_t rsvd2; 1781 uint32_t rsvd2;
1750 uint32_t rsvd3; 1782 uint32_t rsvd3;
1751 uint32_t max_xri; 1783 uint32_t max_xri;
@@ -1754,7 +1786,10 @@ typedef struct {
1754 uint32_t avail_xri; 1786 uint32_t avail_xri;
1755 uint32_t avail_iocb; 1787 uint32_t avail_iocb;
1756 uint32_t avail_rpi; 1788 uint32_t avail_rpi;
1757 uint32_t default_rpi; 1789 uint32_t max_vpi;
1790 uint32_t rsvd4;
1791 uint32_t rsvd5;
1792 uint32_t avail_vpi;
1758} READ_CONFIG_VAR; 1793} READ_CONFIG_VAR;
1759 1794
1760/* Structure for MB Command READ_RCONFIG (12) */ 1795/* Structure for MB Command READ_RCONFIG (12) */
@@ -1818,6 +1853,13 @@ typedef struct {
1818 structure */ 1853 structure */
1819 struct ulp_bde64 sp64; 1854 struct ulp_bde64 sp64;
1820 } un; 1855 } un;
1856#ifdef __BIG_ENDIAN_BITFIELD
1857 uint16_t rsvd3;
1858 uint16_t vpi;
1859#else /* __LITTLE_ENDIAN_BITFIELD */
1860 uint16_t vpi;
1861 uint16_t rsvd3;
1862#endif
1821} READ_SPARM_VAR; 1863} READ_SPARM_VAR;
1822 1864
1823/* Structure for MB Command READ_STATUS (14) */ 1865/* Structure for MB Command READ_STATUS (14) */
@@ -1918,11 +1960,17 @@ typedef struct {
1918#ifdef __BIG_ENDIAN_BITFIELD 1960#ifdef __BIG_ENDIAN_BITFIELD
1919 uint32_t cv:1; 1961 uint32_t cv:1;
1920 uint32_t rr:1; 1962 uint32_t rr:1;
1921 uint32_t rsvd1:29; 1963 uint32_t rsvd2:2;
1964 uint32_t v3req:1;
1965 uint32_t v3rsp:1;
1966 uint32_t rsvd1:25;
1922 uint32_t rv:1; 1967 uint32_t rv:1;
1923#else /* __LITTLE_ENDIAN_BITFIELD */ 1968#else /* __LITTLE_ENDIAN_BITFIELD */
1924 uint32_t rv:1; 1969 uint32_t rv:1;
1925 uint32_t rsvd1:29; 1970 uint32_t rsvd1:25;
1971 uint32_t v3rsp:1;
1972 uint32_t v3req:1;
1973 uint32_t rsvd2:2;
1926 uint32_t rr:1; 1974 uint32_t rr:1;
1927 uint32_t cv:1; 1975 uint32_t cv:1;
1928#endif 1976#endif
@@ -1972,8 +2020,8 @@ typedef struct {
1972 uint8_t sli1FwName[16]; 2020 uint8_t sli1FwName[16];
1973 uint32_t sli2FwRev; 2021 uint32_t sli2FwRev;
1974 uint8_t sli2FwName[16]; 2022 uint8_t sli2FwName[16];
1975 uint32_t rsvd2; 2023 uint32_t sli3Feat;
1976 uint32_t RandomData[7]; 2024 uint32_t RandomData[6];
1977} READ_REV_VAR; 2025} READ_REV_VAR;
1978 2026
1979/* Structure for MB Command READ_LINK_STAT (18) */ 2027/* Structure for MB Command READ_LINK_STAT (18) */
@@ -2013,6 +2061,14 @@ typedef struct {
2013 struct ulp_bde64 sp64; 2061 struct ulp_bde64 sp64;
2014 } un; 2062 } un;
2015 2063
2064#ifdef __BIG_ENDIAN_BITFIELD
2065 uint16_t rsvd6;
2066 uint16_t vpi;
2067#else /* __LITTLE_ENDIAN_BITFIELD */
2068 uint16_t vpi;
2069 uint16_t rsvd6;
2070#endif
2071
2016} REG_LOGIN_VAR; 2072} REG_LOGIN_VAR;
2017 2073
2018/* Word 30 contents for REG_LOGIN */ 2074/* Word 30 contents for REG_LOGIN */
@@ -2037,16 +2093,78 @@ typedef struct {
2037#ifdef __BIG_ENDIAN_BITFIELD 2093#ifdef __BIG_ENDIAN_BITFIELD
2038 uint16_t rsvd1; 2094 uint16_t rsvd1;
2039 uint16_t rpi; 2095 uint16_t rpi;
2096 uint32_t rsvd2;
2097 uint32_t rsvd3;
2098 uint32_t rsvd4;
2099 uint32_t rsvd5;
2100 uint16_t rsvd6;
2101 uint16_t vpi;
2040#else /* __LITTLE_ENDIAN_BITFIELD */ 2102#else /* __LITTLE_ENDIAN_BITFIELD */
2041 uint16_t rpi; 2103 uint16_t rpi;
2042 uint16_t rsvd1; 2104 uint16_t rsvd1;
2105 uint32_t rsvd2;
2106 uint32_t rsvd3;
2107 uint32_t rsvd4;
2108 uint32_t rsvd5;
2109 uint16_t vpi;
2110 uint16_t rsvd6;
2043#endif 2111#endif
2044} UNREG_LOGIN_VAR; 2112} UNREG_LOGIN_VAR;
2045 2113
2114/* Structure for MB Command REG_VPI (0x96) */
2115typedef struct {
2116#ifdef __BIG_ENDIAN_BITFIELD
2117 uint32_t rsvd1;
2118 uint32_t rsvd2:8;
2119 uint32_t sid:24;
2120 uint32_t rsvd3;
2121 uint32_t rsvd4;
2122 uint32_t rsvd5;
2123 uint16_t rsvd6;
2124 uint16_t vpi;
2125#else /* __LITTLE_ENDIAN */
2126 uint32_t rsvd1;
2127 uint32_t sid:24;
2128 uint32_t rsvd2:8;
2129 uint32_t rsvd3;
2130 uint32_t rsvd4;
2131 uint32_t rsvd5;
2132 uint16_t vpi;
2133 uint16_t rsvd6;
2134#endif
2135} REG_VPI_VAR;
2136
2137/* Structure for MB Command UNREG_VPI (0x97) */
2138typedef struct {
2139 uint32_t rsvd1;
2140 uint32_t rsvd2;
2141 uint32_t rsvd3;
2142 uint32_t rsvd4;
2143 uint32_t rsvd5;
2144#ifdef __BIG_ENDIAN_BITFIELD
2145 uint16_t rsvd6;
2146 uint16_t vpi;
2147#else /* __LITTLE_ENDIAN */
2148 uint16_t vpi;
2149 uint16_t rsvd6;
2150#endif
2151} UNREG_VPI_VAR;
2152
2046/* Structure for MB Command UNREG_D_ID (0x23) */ 2153/* Structure for MB Command UNREG_D_ID (0x23) */
2047 2154
2048typedef struct { 2155typedef struct {
2049 uint32_t did; 2156 uint32_t did;
2157 uint32_t rsvd2;
2158 uint32_t rsvd3;
2159 uint32_t rsvd4;
2160 uint32_t rsvd5;
2161#ifdef __BIG_ENDIAN_BITFIELD
2162 uint16_t rsvd6;
2163 uint16_t vpi;
2164#else
2165 uint16_t vpi;
2166 uint16_t rsvd6;
2167#endif
2050} UNREG_D_ID_VAR; 2168} UNREG_D_ID_VAR;
2051 2169
2052/* Structure for MB Command READ_LA (21) */ 2170/* Structure for MB Command READ_LA (21) */
@@ -2178,13 +2296,240 @@ typedef struct {
2178#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ 2296#define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */
2179#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ 2297#define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */
2180 2298
2181/* Structure for MB Command CONFIG_PORT (0x88) */ 2299struct hbq_mask {
2300#ifdef __BIG_ENDIAN_BITFIELD
2301 uint8_t tmatch;
2302 uint8_t tmask;
2303 uint8_t rctlmatch;
2304 uint8_t rctlmask;
2305#else /* __LITTLE_ENDIAN */
2306 uint8_t rctlmask;
2307 uint8_t rctlmatch;
2308 uint8_t tmask;
2309 uint8_t tmatch;
2310#endif
2311};
2312
2313
2314/* Structure for MB Command CONFIG_HBQ (7c) */
2315
2316struct config_hbq_var {
2317#ifdef __BIG_ENDIAN_BITFIELD
2318 uint32_t rsvd1 :7;
2319 uint32_t recvNotify :1; /* Receive Notification */
2320 uint32_t numMask :8; /* # Mask Entries */
2321 uint32_t profile :8; /* Selection Profile */
2322 uint32_t rsvd2 :8;
2323#else /* __LITTLE_ENDIAN */
2324 uint32_t rsvd2 :8;
2325 uint32_t profile :8; /* Selection Profile */
2326 uint32_t numMask :8; /* # Mask Entries */
2327 uint32_t recvNotify :1; /* Receive Notification */
2328 uint32_t rsvd1 :7;
2329#endif
2330
2331#ifdef __BIG_ENDIAN_BITFIELD
2332 uint32_t hbqId :16;
2333 uint32_t rsvd3 :12;
2334 uint32_t ringMask :4;
2335#else /* __LITTLE_ENDIAN */
2336 uint32_t ringMask :4;
2337 uint32_t rsvd3 :12;
2338 uint32_t hbqId :16;
2339#endif
2340
2341#ifdef __BIG_ENDIAN_BITFIELD
2342 uint32_t entry_count :16;
2343 uint32_t rsvd4 :8;
2344 uint32_t headerLen :8;
2345#else /* __LITTLE_ENDIAN */
2346 uint32_t headerLen :8;
2347 uint32_t rsvd4 :8;
2348 uint32_t entry_count :16;
2349#endif
2350
2351 uint32_t hbqaddrLow;
2352 uint32_t hbqaddrHigh;
2353
2354#ifdef __BIG_ENDIAN_BITFIELD
2355 uint32_t rsvd5 :31;
2356 uint32_t logEntry :1;
2357#else /* __LITTLE_ENDIAN */
2358 uint32_t logEntry :1;
2359 uint32_t rsvd5 :31;
2360#endif
2361
2362 uint32_t rsvd6; /* w7 */
2363 uint32_t rsvd7; /* w8 */
2364 uint32_t rsvd8; /* w9 */
2365
2366 struct hbq_mask hbqMasks[6];
2367
2368
2369 union {
2370 uint32_t allprofiles[12];
2371
2372 struct {
2373 #ifdef __BIG_ENDIAN_BITFIELD
2374 uint32_t seqlenoff :16;
2375 uint32_t maxlen :16;
2376 #else /* __LITTLE_ENDIAN */
2377 uint32_t maxlen :16;
2378 uint32_t seqlenoff :16;
2379 #endif
2380 #ifdef __BIG_ENDIAN_BITFIELD
2381 uint32_t rsvd1 :28;
2382 uint32_t seqlenbcnt :4;
2383 #else /* __LITTLE_ENDIAN */
2384 uint32_t seqlenbcnt :4;
2385 uint32_t rsvd1 :28;
2386 #endif
2387 uint32_t rsvd[10];
2388 } profile2;
2389
2390 struct {
2391 #ifdef __BIG_ENDIAN_BITFIELD
2392 uint32_t seqlenoff :16;
2393 uint32_t maxlen :16;
2394 #else /* __LITTLE_ENDIAN */
2395 uint32_t maxlen :16;
2396 uint32_t seqlenoff :16;
2397 #endif
2398 #ifdef __BIG_ENDIAN_BITFIELD
2399 uint32_t cmdcodeoff :28;
2400 uint32_t rsvd1 :12;
2401 uint32_t seqlenbcnt :4;
2402 #else /* __LITTLE_ENDIAN */
2403 uint32_t seqlenbcnt :4;
2404 uint32_t rsvd1 :12;
2405 uint32_t cmdcodeoff :28;
2406 #endif
2407 uint32_t cmdmatch[8];
2408
2409 uint32_t rsvd[2];
2410 } profile3;
2411
2412 struct {
2413 #ifdef __BIG_ENDIAN_BITFIELD
2414 uint32_t seqlenoff :16;
2415 uint32_t maxlen :16;
2416 #else /* __LITTLE_ENDIAN */
2417 uint32_t maxlen :16;
2418 uint32_t seqlenoff :16;
2419 #endif
2420 #ifdef __BIG_ENDIAN_BITFIELD
2421 uint32_t cmdcodeoff :28;
2422 uint32_t rsvd1 :12;
2423 uint32_t seqlenbcnt :4;
2424 #else /* __LITTLE_ENDIAN */
2425 uint32_t seqlenbcnt :4;
2426 uint32_t rsvd1 :12;
2427 uint32_t cmdcodeoff :28;
2428 #endif
2429 uint32_t cmdmatch[8];
2430
2431 uint32_t rsvd[2];
2432 } profile5;
2433
2434 } profiles;
2182 2435
2436};
2437
2438
2439
2440/* Structure for MB Command CONFIG_PORT (0x88) */
2183typedef struct { 2441typedef struct {
2184 uint32_t pcbLen; 2442#ifdef __BIG_ENDIAN_BITFIELD
2443 uint32_t cBE : 1;
2444 uint32_t cET : 1;
2445 uint32_t cHpcb : 1;
2446 uint32_t cMA : 1;
2447 uint32_t sli_mode : 4;
2448 uint32_t pcbLen : 24; /* bit 23:0 of memory based port
2449 * config block */
2450#else /* __LITTLE_ENDIAN */
2451 uint32_t pcbLen : 24; /* bit 23:0 of memory based port
2452 * config block */
2453 uint32_t sli_mode : 4;
2454 uint32_t cMA : 1;
2455 uint32_t cHpcb : 1;
2456 uint32_t cET : 1;
2457 uint32_t cBE : 1;
2458#endif
2459
2185 uint32_t pcbLow; /* bit 31:0 of memory based port config block */ 2460 uint32_t pcbLow; /* bit 31:0 of memory based port config block */
2186 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */ 2461 uint32_t pcbHigh; /* bit 63:32 of memory based port config block */
2187 uint32_t hbainit[5]; 2462 uint32_t hbainit[6];
2463
2464#ifdef __BIG_ENDIAN_BITFIELD
2465 uint32_t rsvd : 24; /* Reserved */
2466 uint32_t cmv : 1; /* Configure Max VPIs */
2467 uint32_t ccrp : 1; /* Config Command Ring Polling */
2468 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2469 uint32_t chbs : 1; /* Cofigure Host Backing store */
2470 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
2471 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
2472 uint32_t cmx : 1; /* Configure Max XRIs */
2473 uint32_t cmr : 1; /* Configure Max RPIs */
2474#else /* __LITTLE_ENDIAN */
2475 uint32_t cmr : 1; /* Configure Max RPIs */
2476 uint32_t cmx : 1; /* Configure Max XRIs */
2477 uint32_t cerbm : 1; /* Configure Enhanced Receive Buf Mgmt */
2478 uint32_t cinb : 1; /* Enable Interrupt Notification Block */
2479 uint32_t chbs : 1; /* Cofigure Host Backing store */
2480 uint32_t csah : 1; /* Configure Synchronous Abort Handling */
2481 uint32_t ccrp : 1; /* Config Command Ring Polling */
2482 uint32_t cmv : 1; /* Configure Max VPIs */
2483 uint32_t rsvd : 24; /* Reserved */
2484#endif
2485#ifdef __BIG_ENDIAN_BITFIELD
2486 uint32_t rsvd2 : 24; /* Reserved */
2487 uint32_t gmv : 1; /* Grant Max VPIs */
2488 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2489 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
2490 uint32_t ghbs : 1; /* Grant Host Backing Store */
2491 uint32_t ginb : 1; /* Grant Interrupt Notification Block */
2492 uint32_t gerbm : 1; /* Grant ERBM Request */
2493 uint32_t gmx : 1; /* Grant Max XRIs */
2494 uint32_t gmr : 1; /* Grant Max RPIs */
2495#else /* __LITTLE_ENDIAN */
2496 uint32_t gmr : 1; /* Grant Max RPIs */
2497 uint32_t gmx : 1; /* Grant Max XRIs */
2498 uint32_t gerbm : 1; /* Grant ERBM Request */
2499 uint32_t ginb : 1; /* Grant Interrupt Notification Block */
2500 uint32_t ghbs : 1; /* Grant Host Backing Store */
2501 uint32_t gsah : 1; /* Grant Synchronous Abort Handling */
2502 uint32_t gcrp : 1; /* Grant Command Ring Polling */
2503 uint32_t gmv : 1; /* Grant Max VPIs */
2504 uint32_t rsvd2 : 24; /* Reserved */
2505#endif
2506
2507#ifdef __BIG_ENDIAN_BITFIELD
2508 uint32_t max_rpi : 16; /* Max RPIs Port should configure */
2509 uint32_t max_xri : 16; /* Max XRIs Port should configure */
2510#else /* __LITTLE_ENDIAN */
2511 uint32_t max_xri : 16; /* Max XRIs Port should configure */
2512 uint32_t max_rpi : 16; /* Max RPIs Port should configure */
2513#endif
2514
2515#ifdef __BIG_ENDIAN_BITFIELD
2516 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2517 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
2518#else /* __LITTLE_ENDIAN */
2519 uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */
2520 uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */
2521#endif
2522
2523 uint32_t rsvd4; /* Reserved */
2524
2525#ifdef __BIG_ENDIAN_BITFIELD
2526 uint32_t rsvd5 : 16; /* Reserved */
2527 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2528#else /* __LITTLE_ENDIAN */
2529 uint32_t max_vpi : 16; /* Max number of virt N-Ports */
2530 uint32_t rsvd5 : 16; /* Reserved */
2531#endif
2532
2188} CONFIG_PORT_VAR; 2533} CONFIG_PORT_VAR;
2189 2534
2190/* SLI-2 Port Control Block */ 2535/* SLI-2 Port Control Block */
@@ -2262,33 +2607,40 @@ typedef struct {
2262#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) 2607#define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t))
2263 2608
2264typedef union { 2609typedef union {
2265 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; 2610 uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/
2266 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */ 2611 * feature/max ring number
2267 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */ 2612 */
2268 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */ 2613 LOAD_SM_VAR varLdSM; /* cmd = 1 (LOAD_SM) */
2269 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */ 2614 READ_NV_VAR varRDnvp; /* cmd = 2 (READ_NVPARMS) */
2270 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */ 2615 WRITE_NV_VAR varWTnvp; /* cmd = 3 (WRITE_NVPARMS) */
2616 BIU_DIAG_VAR varBIUdiag; /* cmd = 4 (RUN_BIU_DIAG) */
2617 INIT_LINK_VAR varInitLnk; /* cmd = 5 (INIT_LINK) */
2271 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */ 2618 DOWN_LINK_VAR varDwnLnk; /* cmd = 6 (DOWN_LINK) */
2272 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */ 2619 CONFIG_LINK varCfgLnk; /* cmd = 7 (CONFIG_LINK) */
2273 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */ 2620 PART_SLIM_VAR varSlim; /* cmd = 8 (PART_SLIM) */
2274 CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */ 2621 CONFIG_RING_VAR varCfgRing; /* cmd = 9 (CONFIG_RING) */
2275 RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */ 2622 RESET_RING_VAR varRstRing; /* cmd = 10 (RESET_RING) */
2276 READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */ 2623 READ_CONFIG_VAR varRdConfig; /* cmd = 11 (READ_CONFIG) */
2277 READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */ 2624 READ_RCONF_VAR varRdRConfig; /* cmd = 12 (READ_RCONFIG) */
2278 READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */ 2625 READ_SPARM_VAR varRdSparm; /* cmd = 13 (READ_SPARM(64)) */
2279 READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */ 2626 READ_STATUS_VAR varRdStatus; /* cmd = 14 (READ_STATUS) */
2280 READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */ 2627 READ_RPI_VAR varRdRPI; /* cmd = 15 (READ_RPI(64)) */
2281 READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */ 2628 READ_XRI_VAR varRdXRI; /* cmd = 16 (READ_XRI) */
2282 READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */ 2629 READ_REV_VAR varRdRev; /* cmd = 17 (READ_REV) */
2283 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */ 2630 READ_LNK_VAR varRdLnk; /* cmd = 18 (READ_LNK_STAT) */
2284 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */ 2631 REG_LOGIN_VAR varRegLogin; /* cmd = 19 (REG_LOGIN(64)) */
2285 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */ 2632 UNREG_LOGIN_VAR varUnregLogin; /* cmd = 20 (UNREG_LOGIN) */
2286 READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */ 2633 READ_LA_VAR varReadLA; /* cmd = 21 (READ_LA(64)) */
2287 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */ 2634 CLEAR_LA_VAR varClearLA; /* cmd = 22 (CLEAR_LA) */
2288 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */ 2635 DUMP_VAR varDmp; /* Warm Start DUMP mbx cmd */
2289 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */ 2636 UNREG_D_ID_VAR varUnregDID; /* cmd = 0x23 (UNREG_D_ID) */
2290 CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP) NEW_FEATURE */ 2637 CONFIG_FARP_VAR varCfgFarp; /* cmd = 0x25 (CONFIG_FARP)
2291 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */ 2638 * NEW_FEATURE
2639 */
2640 struct config_hbq_var varCfgHbq;/* cmd = 0x7c (CONFIG_HBQ) */
2641 CONFIG_PORT_VAR varCfgPort; /* cmd = 0x88 (CONFIG_PORT) */
2642 REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */
2643 UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */
2292} MAILVARIANTS; 2644} MAILVARIANTS;
2293 2645
2294/* 2646/*
@@ -2305,14 +2657,27 @@ struct lpfc_pgp {
2305 __le32 rspPutInx; 2657 __le32 rspPutInx;
2306}; 2658};
2307 2659
2308typedef struct _SLI2_DESC { 2660struct sli2_desc {
2309 struct lpfc_hgp host[MAX_RINGS];
2310 uint32_t unused1[16]; 2661 uint32_t unused1[16];
2662 struct lpfc_hgp host[MAX_RINGS];
2663 struct lpfc_pgp port[MAX_RINGS];
2664};
2665
2666struct sli3_desc {
2667 struct lpfc_hgp host[MAX_RINGS];
2668 uint32_t reserved[8];
2669 uint32_t hbq_put[16];
2670};
2671
2672struct sli3_pgp {
2311 struct lpfc_pgp port[MAX_RINGS]; 2673 struct lpfc_pgp port[MAX_RINGS];
2312} SLI2_DESC; 2674 uint32_t hbq_get[16];
2675};
2313 2676
2314typedef union { 2677typedef union {
2315 SLI2_DESC s2; 2678 struct sli2_desc s2;
2679 struct sli3_desc s3;
2680 struct sli3_pgp s3_pgp;
2316} SLI_VAR; 2681} SLI_VAR;
2317 2682
2318typedef struct { 2683typedef struct {
@@ -2618,6 +2983,25 @@ typedef struct {
2618 uint32_t fcpt_Length; /* transfer ready for IWRITE */ 2983 uint32_t fcpt_Length; /* transfer ready for IWRITE */
2619} FCPT_FIELDS64; 2984} FCPT_FIELDS64;
2620 2985
2986/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
2987 or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
2988
2989struct rcv_sli3 {
2990 uint32_t word8Rsvd;
2991#ifdef __BIG_ENDIAN_BITFIELD
2992 uint16_t vpi;
2993 uint16_t word9Rsvd;
2994#else /* __LITTLE_ENDIAN */
2995 uint16_t word9Rsvd;
2996 uint16_t vpi;
2997#endif
2998 uint32_t word10Rsvd;
2999 uint32_t acc_len; /* accumulated length */
3000 struct ulp_bde64 bde2;
3001};
3002
3003
3004
2621typedef struct _IOCB { /* IOCB structure */ 3005typedef struct _IOCB { /* IOCB structure */
2622 union { 3006 union {
2623 GENERIC_RSP grsp; /* Generic response */ 3007 GENERIC_RSP grsp; /* Generic response */
@@ -2632,8 +3016,8 @@ typedef struct _IOCB { /* IOCB structure */
2632 3016
2633 /* SLI-2 structures */ 3017 /* SLI-2 structures */
2634 3018
2635 struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation 3019 struct ulp_bde64 cont64[2]; /* up to 2 64 bit continuation
2636 bde_64s */ 3020 * bde_64s */
2637 ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */ 3021 ELS_REQUEST64 elsreq64; /* ELS_REQUEST template */
2638 GEN_REQUEST64 genreq64; /* GEN_REQUEST template */ 3022 GEN_REQUEST64 genreq64; /* GEN_REQUEST template */
2639 RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */ 3023 RCV_ELS_REQ64 rcvels64; /* RCV_ELS_REQ template */
@@ -2695,9 +3079,20 @@ typedef struct _IOCB { /* IOCB structure */
2695 uint32_t ulpTimeout:8; 3079 uint32_t ulpTimeout:8;
2696#endif 3080#endif
2697 3081
3082 union {
3083 struct rcv_sli3 rcvsli3; /* words 8 - 15 */
3084 uint32_t sli3Words[24]; /* 96 extra bytes for SLI-3 */
3085 } unsli3;
3086
3087#define ulpCt_h ulpXS
3088#define ulpCt_l ulpFCP2Rcvy
3089
3090#define IOCB_FCP 1 /* IOCB is used for FCP ELS cmds-ulpRsvByte */
3091#define IOCB_IP 2 /* IOCB is used for IP ELS cmds */
2698#define PARM_UNUSED 0 /* PU field (Word 4) not used */ 3092#define PARM_UNUSED 0 /* PU field (Word 4) not used */
2699#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */ 3093#define PARM_REL_OFF 1 /* PU field (Word 4) = R. O. */
2700#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */ 3094#define PARM_READ_CHECK 2 /* PU field (Word 4) = Data Transfer Length */
3095#define PARM_NPIV_DID 3
2701#define CLASS1 0 /* Class 1 */ 3096#define CLASS1 0 /* Class 1 */
2702#define CLASS2 1 /* Class 2 */ 3097#define CLASS2 1 /* Class 2 */
2703#define CLASS3 2 /* Class 3 */ 3098#define CLASS3 2 /* Class 3 */
@@ -2718,39 +3113,51 @@ typedef struct _IOCB { /* IOCB structure */
2718#define IOSTAT_RSVD2 0xC 3113#define IOSTAT_RSVD2 0xC
2719#define IOSTAT_RSVD3 0xD 3114#define IOSTAT_RSVD3 0xD
2720#define IOSTAT_RSVD4 0xE 3115#define IOSTAT_RSVD4 0xE
2721#define IOSTAT_RSVD5 0xF 3116#define IOSTAT_NEED_BUFFER 0xF
2722#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */ 3117#define IOSTAT_DRIVER_REJECT 0x10 /* ulpStatus - Driver defined */
2723#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */ 3118#define IOSTAT_DEFAULT 0xF /* Same as rsvd5 for now */
2724#define IOSTAT_CNT 0x11 3119#define IOSTAT_CNT 0x11
2725 3120
2726} IOCB_t; 3121} IOCB_t;
2727 3122
3123/* Structure used for a single HBQ entry */
3124struct lpfc_hbq_entry {
3125 struct ulp_bde64 bde;
3126 uint32_t buffer_tag;
3127};
3128
2728 3129
2729#define SLI1_SLIM_SIZE (4 * 1024) 3130#define SLI1_SLIM_SIZE (4 * 1024)
2730 3131
2731/* Up to 498 IOCBs will fit into 16k 3132/* Up to 498 IOCBs will fit into 16k
2732 * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384 3133 * 256 (MAILBOX_t) + 140 (PCB_t) + ( 32 (IOCB_t) * 498 ) = < 16384
2733 */ 3134 */
2734#define SLI2_SLIM_SIZE (16 * 1024) 3135#define SLI2_SLIM_SIZE (64 * 1024)
2735 3136
2736/* Maximum IOCBs that will fit in SLI2 slim */ 3137/* Maximum IOCBs that will fit in SLI2 slim */
2737#define MAX_SLI2_IOCB 498 3138#define MAX_SLI2_IOCB 498
3139#define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \
3140 (sizeof(MAILBOX_t) + sizeof(PCB_t)))
3141
3142/* HBQ entries are 4 words each = 4k */
3143#define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \
3144 lpfc_sli_hbq_count())
2738 3145
2739struct lpfc_sli2_slim { 3146struct lpfc_sli2_slim {
2740 MAILBOX_t mbx; 3147 MAILBOX_t mbx;
2741 PCB_t pcb; 3148 PCB_t pcb;
2742 IOCB_t IOCBs[MAX_SLI2_IOCB]; 3149 IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE];
2743}; 3150};
2744 3151
2745/******************************************************************* 3152/*
2746This macro check PCI device to allow special handling for LC HBAs. 3153 * This function checks PCI device to allow special handling for LC HBAs.
2747 3154 *
2748Parameters: 3155 * Parameters:
2749device : struct pci_dev 's device field 3156 * device : struct pci_dev 's device field
2750 3157 *
2751return 1 => TRUE 3158 * return 1 => TRUE
2752 0 => FALSE 3159 * 0 => FALSE
2753 *******************************************************************/ 3160 */
2754static inline int 3161static inline int
2755lpfc_is_LC_HBA(unsigned short device) 3162lpfc_is_LC_HBA(unsigned short device)
2756{ 3163{
@@ -2766,3 +3173,16 @@ lpfc_is_LC_HBA(unsigned short device)
2766 else 3173 else
2767 return 0; 3174 return 0;
2768} 3175}
3176
3177/*
3178 * Determine if an IOCB failed because of a link event or firmware reset.
3179 */
3180
3181static inline int
3182lpfc_error_lost_link(IOCB_t *iocbp)
3183{
3184 return (iocbp->ulpStatus == IOSTAT_LOCAL_REJECT &&
3185 (iocbp->un.ulpWord[4] == IOERR_SLI_ABORTED ||
3186 iocbp->un.ulpWord[4] == IOERR_LINK_DOWN ||
3187 iocbp->un.ulpWord[4] == IOERR_SLI_DOWN));
3188}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 955b2e48d041..f81f85ee190f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -27,6 +27,7 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/ctype.h>
30 31
31#include <scsi/scsi.h> 32#include <scsi/scsi.h>
32#include <scsi/scsi_device.h> 33#include <scsi/scsi_device.h>
@@ -40,15 +41,20 @@
40#include "lpfc.h" 41#include "lpfc.h"
41#include "lpfc_logmsg.h" 42#include "lpfc_logmsg.h"
42#include "lpfc_crtn.h" 43#include "lpfc_crtn.h"
44#include "lpfc_vport.h"
43#include "lpfc_version.h" 45#include "lpfc_version.h"
46#include "lpfc_vport.h"
44 47
45static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); 48static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
46static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 49static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
47static int lpfc_post_rcv_buf(struct lpfc_hba *); 50static int lpfc_post_rcv_buf(struct lpfc_hba *);
48 51
49static struct scsi_transport_template *lpfc_transport_template = NULL; 52static struct scsi_transport_template *lpfc_transport_template = NULL;
53static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
50static DEFINE_IDR(lpfc_hba_index); 54static DEFINE_IDR(lpfc_hba_index);
51 55
56
57
52/************************************************************************/ 58/************************************************************************/
53/* */ 59/* */
54/* lpfc_config_port_prep */ 60/* lpfc_config_port_prep */
@@ -61,7 +67,7 @@ static DEFINE_IDR(lpfc_hba_index);
61/* */ 67/* */
62/************************************************************************/ 68/************************************************************************/
63int 69int
64lpfc_config_port_prep(struct lpfc_hba * phba) 70lpfc_config_port_prep(struct lpfc_hba *phba)
65{ 71{
66 lpfc_vpd_t *vp = &phba->vpd; 72 lpfc_vpd_t *vp = &phba->vpd;
67 int i = 0, rc; 73 int i = 0, rc;
@@ -75,12 +81,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
75 81
76 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 82 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
77 if (!pmb) { 83 if (!pmb) {
78 phba->hba_state = LPFC_HBA_ERROR; 84 phba->link_state = LPFC_HBA_ERROR;
79 return -ENOMEM; 85 return -ENOMEM;
80 } 86 }
81 87
82 mb = &pmb->mb; 88 mb = &pmb->mb;
83 phba->hba_state = LPFC_INIT_MBX_CMDS; 89 phba->link_state = LPFC_INIT_MBX_CMDS;
84 90
85 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 91 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
86 if (init_key) { 92 if (init_key) {
@@ -100,9 +106,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
100 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 106 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
101 107
102 if (rc != MBX_SUCCESS) { 108 if (rc != MBX_SUCCESS) {
103 lpfc_printf_log(phba, 109 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
104 KERN_ERR,
105 LOG_MBOX,
106 "%d:0324 Config Port initialization " 110 "%d:0324 Config Port initialization "
107 "error, mbxCmd x%x READ_NVPARM, " 111 "error, mbxCmd x%x READ_NVPARM, "
108 "mbxStatus x%x\n", 112 "mbxStatus x%x\n",
@@ -112,16 +116,18 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
112 return -ERESTART; 116 return -ERESTART;
113 } 117 }
114 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename, 118 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
115 sizeof (mb->un.varRDnvp.nodename)); 119 sizeof(phba->wwnn));
120 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
121 sizeof(phba->wwpn));
116 } 122 }
117 123
124 phba->sli3_options = 0x0;
125
118 /* Setup and issue mailbox READ REV command */ 126 /* Setup and issue mailbox READ REV command */
119 lpfc_read_rev(phba, pmb); 127 lpfc_read_rev(phba, pmb);
120 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 128 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
121 if (rc != MBX_SUCCESS) { 129 if (rc != MBX_SUCCESS) {
122 lpfc_printf_log(phba, 130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
123 KERN_ERR,
124 LOG_INIT,
125 "%d:0439 Adapter failed to init, mbxCmd x%x " 131 "%d:0439 Adapter failed to init, mbxCmd x%x "
126 "READ_REV, mbxStatus x%x\n", 132 "READ_REV, mbxStatus x%x\n",
127 phba->brd_no, 133 phba->brd_no,
@@ -130,6 +136,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
130 return -ERESTART; 136 return -ERESTART;
131 } 137 }
132 138
139
133 /* 140 /*
134 * The value of rr must be 1 since the driver set the cv field to 1. 141 * The value of rr must be 1 since the driver set the cv field to 1.
135 * This setting requires the FW to set all revision fields. 142 * This setting requires the FW to set all revision fields.
@@ -144,8 +151,12 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
144 return -ERESTART; 151 return -ERESTART;
145 } 152 }
146 153
154 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp)
155 return -EINVAL;
156
147 /* Save information as VPD data */ 157 /* Save information as VPD data */
148 vp->rev.rBit = 1; 158 vp->rev.rBit = 1;
159 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
149 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev; 160 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
150 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16); 161 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
151 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev; 162 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
@@ -161,6 +172,13 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
161 vp->rev.postKernRev = mb->un.varRdRev.postKernRev; 172 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
162 vp->rev.opFwRev = mb->un.varRdRev.opFwRev; 173 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
163 174
175 /* If the sli feature level is less then 9, we must
176 * tear down all RPIs and VPIs on link down if NPIV
177 * is enabled.
178 */
179 if (vp->rev.feaLevelHigh < 9)
180 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
181
164 if (lpfc_is_LC_HBA(phba->pcidev->device)) 182 if (lpfc_is_LC_HBA(phba->pcidev->device))
165 memcpy(phba->RandomData, (char *)&mb->un.varWords[24], 183 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
166 sizeof (phba->RandomData)); 184 sizeof (phba->RandomData));
@@ -188,7 +206,7 @@ lpfc_config_port_prep(struct lpfc_hba * phba)
188 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) 206 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
189 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; 207 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
190 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset, 208 lpfc_sli_pcimem_bcopy(pmb->context2, lpfc_vpd_data + offset,
191 mb->un.varDmp.word_cnt); 209 mb->un.varDmp.word_cnt);
192 offset += mb->un.varDmp.word_cnt; 210 offset += mb->un.varDmp.word_cnt;
193 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE); 211 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
194 lpfc_parse_vpd(phba, lpfc_vpd_data, offset); 212 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
@@ -212,48 +230,34 @@ out_free_mbox:
212/* */ 230/* */
213/************************************************************************/ 231/************************************************************************/
214int 232int
215lpfc_config_port_post(struct lpfc_hba * phba) 233lpfc_config_port_post(struct lpfc_hba *phba)
216{ 234{
235 struct lpfc_vport *vport = phba->pport;
217 LPFC_MBOXQ_t *pmb; 236 LPFC_MBOXQ_t *pmb;
218 MAILBOX_t *mb; 237 MAILBOX_t *mb;
219 struct lpfc_dmabuf *mp; 238 struct lpfc_dmabuf *mp;
220 struct lpfc_sli *psli = &phba->sli; 239 struct lpfc_sli *psli = &phba->sli;
221 uint32_t status, timeout; 240 uint32_t status, timeout;
222 int i, j, rc; 241 int i, j;
242 int rc;
223 243
224 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 244 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225 if (!pmb) { 245 if (!pmb) {
226 phba->hba_state = LPFC_HBA_ERROR; 246 phba->link_state = LPFC_HBA_ERROR;
227 return -ENOMEM; 247 return -ENOMEM;
228 } 248 }
229 mb = &pmb->mb; 249 mb = &pmb->mb;
230 250
231 lpfc_config_link(phba, pmb);
232 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
233 if (rc != MBX_SUCCESS) {
234 lpfc_printf_log(phba,
235 KERN_ERR,
236 LOG_INIT,
237 "%d:0447 Adapter failed init, mbxCmd x%x "
238 "CONFIG_LINK mbxStatus x%x\n",
239 phba->brd_no,
240 mb->mbxCommand, mb->mbxStatus);
241 phba->hba_state = LPFC_HBA_ERROR;
242 mempool_free( pmb, phba->mbox_mem_pool);
243 return -EIO;
244 }
245
246 /* Get login parameters for NID. */ 251 /* Get login parameters for NID. */
247 lpfc_read_sparam(phba, pmb); 252 lpfc_read_sparam(phba, pmb, 0);
253 pmb->vport = vport;
248 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 254 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
249 lpfc_printf_log(phba, 255 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
250 KERN_ERR,
251 LOG_INIT,
252 "%d:0448 Adapter failed init, mbxCmd x%x " 256 "%d:0448 Adapter failed init, mbxCmd x%x "
253 "READ_SPARM mbxStatus x%x\n", 257 "READ_SPARM mbxStatus x%x\n",
254 phba->brd_no, 258 phba->brd_no,
255 mb->mbxCommand, mb->mbxStatus); 259 mb->mbxCommand, mb->mbxStatus);
256 phba->hba_state = LPFC_HBA_ERROR; 260 phba->link_state = LPFC_HBA_ERROR;
257 mp = (struct lpfc_dmabuf *) pmb->context1; 261 mp = (struct lpfc_dmabuf *) pmb->context1;
258 mempool_free( pmb, phba->mbox_mem_pool); 262 mempool_free( pmb, phba->mbox_mem_pool);
259 lpfc_mbuf_free(phba, mp->virt, mp->phys); 263 lpfc_mbuf_free(phba, mp->virt, mp->phys);
@@ -263,25 +267,27 @@ lpfc_config_port_post(struct lpfc_hba * phba)
263 267
264 mp = (struct lpfc_dmabuf *) pmb->context1; 268 mp = (struct lpfc_dmabuf *) pmb->context1;
265 269
266 memcpy(&phba->fc_sparam, mp->virt, sizeof (struct serv_parm)); 270 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
267 lpfc_mbuf_free(phba, mp->virt, mp->phys); 271 lpfc_mbuf_free(phba, mp->virt, mp->phys);
268 kfree(mp); 272 kfree(mp);
269 pmb->context1 = NULL; 273 pmb->context1 = NULL;
270 274
271 if (phba->cfg_soft_wwnn) 275 if (phba->cfg_soft_wwnn)
272 u64_to_wwn(phba->cfg_soft_wwnn, phba->fc_sparam.nodeName.u.wwn); 276 u64_to_wwn(phba->cfg_soft_wwnn,
277 vport->fc_sparam.nodeName.u.wwn);
273 if (phba->cfg_soft_wwpn) 278 if (phba->cfg_soft_wwpn)
274 u64_to_wwn(phba->cfg_soft_wwpn, phba->fc_sparam.portName.u.wwn); 279 u64_to_wwn(phba->cfg_soft_wwpn,
275 memcpy(&phba->fc_nodename, &phba->fc_sparam.nodeName, 280 vport->fc_sparam.portName.u.wwn);
281 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
276 sizeof (struct lpfc_name)); 282 sizeof (struct lpfc_name));
277 memcpy(&phba->fc_portname, &phba->fc_sparam.portName, 283 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
278 sizeof (struct lpfc_name)); 284 sizeof (struct lpfc_name));
279 /* If no serial number in VPD data, use low 6 bytes of WWNN */ 285 /* If no serial number in VPD data, use low 6 bytes of WWNN */
280 /* This should be consolidated into parse_vpd ? - mr */ 286 /* This should be consolidated into parse_vpd ? - mr */
281 if (phba->SerialNumber[0] == 0) { 287 if (phba->SerialNumber[0] == 0) {
282 uint8_t *outptr; 288 uint8_t *outptr;
283 289
284 outptr = &phba->fc_nodename.u.s.IEEE[0]; 290 outptr = &vport->fc_nodename.u.s.IEEE[0];
285 for (i = 0; i < 12; i++) { 291 for (i = 0; i < 12; i++) {
286 status = *outptr++; 292 status = *outptr++;
287 j = ((status & 0xf0) >> 4); 293 j = ((status & 0xf0) >> 4);
@@ -303,15 +309,14 @@ lpfc_config_port_post(struct lpfc_hba * phba)
303 } 309 }
304 310
305 lpfc_read_config(phba, pmb); 311 lpfc_read_config(phba, pmb);
312 pmb->vport = vport;
306 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 313 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
307 lpfc_printf_log(phba, 314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
308 KERN_ERR,
309 LOG_INIT,
310 "%d:0453 Adapter failed to init, mbxCmd x%x " 315 "%d:0453 Adapter failed to init, mbxCmd x%x "
311 "READ_CONFIG, mbxStatus x%x\n", 316 "READ_CONFIG, mbxStatus x%x\n",
312 phba->brd_no, 317 phba->brd_no,
313 mb->mbxCommand, mb->mbxStatus); 318 mb->mbxCommand, mb->mbxStatus);
314 phba->hba_state = LPFC_HBA_ERROR; 319 phba->link_state = LPFC_HBA_ERROR;
315 mempool_free( pmb, phba->mbox_mem_pool); 320 mempool_free( pmb, phba->mbox_mem_pool);
316 return -EIO; 321 return -EIO;
317 } 322 }
@@ -338,9 +343,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
338 || ((phba->cfg_link_speed == LINK_SPEED_10G) 343 || ((phba->cfg_link_speed == LINK_SPEED_10G)
339 && !(phba->lmt & LMT_10Gb))) { 344 && !(phba->lmt & LMT_10Gb))) {
340 /* Reset link speed to auto */ 345 /* Reset link speed to auto */
341 lpfc_printf_log(phba, 346 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
342 KERN_WARNING,
343 LOG_LINK_EVENT,
344 "%d:1302 Invalid speed for this board: " 347 "%d:1302 Invalid speed for this board: "
345 "Reset link speed to auto: x%x\n", 348 "Reset link speed to auto: x%x\n",
346 phba->brd_no, 349 phba->brd_no,
@@ -348,7 +351,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
348 phba->cfg_link_speed = LINK_SPEED_AUTO; 351 phba->cfg_link_speed = LINK_SPEED_AUTO;
349 } 352 }
350 353
351 phba->hba_state = LPFC_LINK_DOWN; 354 phba->link_state = LPFC_LINK_DOWN;
352 355
353 /* Only process IOCBs on ring 0 till hba_state is READY */ 356 /* Only process IOCBs on ring 0 till hba_state is READY */
354 if (psli->ring[psli->extra_ring].cmdringaddr) 357 if (psli->ring[psli->extra_ring].cmdringaddr)
@@ -359,10 +362,11 @@ lpfc_config_port_post(struct lpfc_hba * phba)
359 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT; 362 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
360 363
361 /* Post receive buffers for desired rings */ 364 /* Post receive buffers for desired rings */
362 lpfc_post_rcv_buf(phba); 365 if (phba->sli_rev != 3)
366 lpfc_post_rcv_buf(phba);
363 367
364 /* Enable appropriate host interrupts */ 368 /* Enable appropriate host interrupts */
365 spin_lock_irq(phba->host->host_lock); 369 spin_lock_irq(&phba->hbalock);
366 status = readl(phba->HCregaddr); 370 status = readl(phba->HCregaddr);
367 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; 371 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
368 if (psli->num_rings > 0) 372 if (psli->num_rings > 0)
@@ -380,22 +384,24 @@ lpfc_config_port_post(struct lpfc_hba * phba)
380 384
381 writel(status, phba->HCregaddr); 385 writel(status, phba->HCregaddr);
382 readl(phba->HCregaddr); /* flush */ 386 readl(phba->HCregaddr); /* flush */
383 spin_unlock_irq(phba->host->host_lock); 387 spin_unlock_irq(&phba->hbalock);
384 388
385 /* 389 /*
386 * Setup the ring 0 (els) timeout handler 390 * Setup the ring 0 (els) timeout handler
387 */ 391 */
388 timeout = phba->fc_ratov << 1; 392 timeout = phba->fc_ratov << 1;
389 mod_timer(&phba->els_tmofunc, jiffies + HZ * timeout); 393 mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
394 mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
395 phba->hb_outstanding = 0;
396 phba->last_completion_time = jiffies;
390 397
391 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); 398 lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
392 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 399 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
400 pmb->vport = vport;
393 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 401 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
394 lpfc_set_loopback_flag(phba); 402 lpfc_set_loopback_flag(phba);
395 if (rc != MBX_SUCCESS) { 403 if (rc != MBX_SUCCESS) {
396 lpfc_printf_log(phba, 404 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
397 KERN_ERR,
398 LOG_INIT,
399 "%d:0454 Adapter failed to init, mbxCmd x%x " 405 "%d:0454 Adapter failed to init, mbxCmd x%x "
400 "INIT_LINK, mbxStatus x%x\n", 406 "INIT_LINK, mbxStatus x%x\n",
401 phba->brd_no, 407 phba->brd_no,
@@ -408,7 +414,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
408 writel(0xffffffff, phba->HAregaddr); 414 writel(0xffffffff, phba->HAregaddr);
409 readl(phba->HAregaddr); /* flush */ 415 readl(phba->HAregaddr); /* flush */
410 416
411 phba->hba_state = LPFC_HBA_ERROR; 417 phba->link_state = LPFC_HBA_ERROR;
412 if (rc != MBX_BUSY) 418 if (rc != MBX_BUSY)
413 mempool_free(pmb, phba->mbox_mem_pool); 419 mempool_free(pmb, phba->mbox_mem_pool);
414 return -EIO; 420 return -EIO;
@@ -429,18 +435,19 @@ lpfc_config_port_post(struct lpfc_hba * phba)
429/* */ 435/* */
430/************************************************************************/ 436/************************************************************************/
431int 437int
432lpfc_hba_down_prep(struct lpfc_hba * phba) 438lpfc_hba_down_prep(struct lpfc_hba *phba)
433{ 439{
440 struct lpfc_vport *vport = phba->pport;
441
434 /* Disable interrupts */ 442 /* Disable interrupts */
435 writel(0, phba->HCregaddr); 443 writel(0, phba->HCregaddr);
436 readl(phba->HCregaddr); /* flush */ 444 readl(phba->HCregaddr); /* flush */
437 445
438 /* Cleanup potential discovery resources */ 446 list_for_each_entry(vport, &phba->port_list, listentry) {
439 lpfc_els_flush_rscn(phba); 447 lpfc_cleanup_discovery_resources(vport);
440 lpfc_els_flush_cmd(phba); 448 }
441 lpfc_disc_flush_list(phba);
442 449
443 return (0); 450 return 0;
444} 451}
445 452
446/************************************************************************/ 453/************************************************************************/
@@ -453,20 +460,24 @@ lpfc_hba_down_prep(struct lpfc_hba * phba)
453/* */ 460/* */
454/************************************************************************/ 461/************************************************************************/
455int 462int
456lpfc_hba_down_post(struct lpfc_hba * phba) 463lpfc_hba_down_post(struct lpfc_hba *phba)
457{ 464{
458 struct lpfc_sli *psli = &phba->sli; 465 struct lpfc_sli *psli = &phba->sli;
459 struct lpfc_sli_ring *pring; 466 struct lpfc_sli_ring *pring;
460 struct lpfc_dmabuf *mp, *next_mp; 467 struct lpfc_dmabuf *mp, *next_mp;
461 int i; 468 int i;
462 469
463 /* Cleanup preposted buffers on the ELS ring */ 470 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
464 pring = &psli->ring[LPFC_ELS_RING]; 471 lpfc_sli_hbqbuf_free_all(phba);
465 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 472 else {
466 list_del(&mp->list); 473 /* Cleanup preposted buffers on the ELS ring */
467 pring->postbufq_cnt--; 474 pring = &psli->ring[LPFC_ELS_RING];
468 lpfc_mbuf_free(phba, mp->virt, mp->phys); 475 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
469 kfree(mp); 476 list_del(&mp->list);
477 pring->postbufq_cnt--;
478 lpfc_mbuf_free(phba, mp->virt, mp->phys);
479 kfree(mp);
480 }
470 } 481 }
471 482
472 for (i = 0; i < psli->num_rings; i++) { 483 for (i = 0; i < psli->num_rings; i++) {
@@ -477,6 +488,119 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
477 return 0; 488 return 0;
478} 489}
479 490
491/* HBA heart beat timeout handler */
492void
493lpfc_hb_timeout(unsigned long ptr)
494{
495 struct lpfc_hba *phba;
496 unsigned long iflag;
497
498 phba = (struct lpfc_hba *)ptr;
499 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
500 if (!(phba->pport->work_port_events & WORKER_HB_TMO))
501 phba->pport->work_port_events |= WORKER_HB_TMO;
502 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
503
504 if (phba->work_wait)
505 wake_up(phba->work_wait);
506 return;
507}
508
509static void
510lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
511{
512 unsigned long drvr_flag;
513
514 spin_lock_irqsave(&phba->hbalock, drvr_flag);
515 phba->hb_outstanding = 0;
516 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
517
518 mempool_free(pmboxq, phba->mbox_mem_pool);
519 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
520 !(phba->link_state == LPFC_HBA_ERROR) &&
521 !(phba->pport->fc_flag & FC_UNLOADING))
522 mod_timer(&phba->hb_tmofunc,
523 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
524 return;
525}
526
527void
528lpfc_hb_timeout_handler(struct lpfc_hba *phba)
529{
530 LPFC_MBOXQ_t *pmboxq;
531 int retval;
532 struct lpfc_sli *psli = &phba->sli;
533
534 if ((phba->link_state == LPFC_HBA_ERROR) ||
535 (phba->pport->fc_flag & FC_UNLOADING) ||
536 (phba->pport->fc_flag & FC_OFFLINE_MODE))
537 return;
538
539 spin_lock_irq(&phba->pport->work_port_lock);
540 /* If the timer is already canceled do nothing */
541 if (!(phba->pport->work_port_events & WORKER_HB_TMO)) {
542 spin_unlock_irq(&phba->pport->work_port_lock);
543 return;
544 }
545
546 if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
547 jiffies)) {
548 spin_unlock_irq(&phba->pport->work_port_lock);
549 if (!phba->hb_outstanding)
550 mod_timer(&phba->hb_tmofunc,
551 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
552 else
553 mod_timer(&phba->hb_tmofunc,
554 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
555 return;
556 }
557 spin_unlock_irq(&phba->pport->work_port_lock);
558
559 /* If there is no heart beat outstanding, issue a heartbeat command */
560 if (!phba->hb_outstanding) {
561 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
562 if (!pmboxq) {
563 mod_timer(&phba->hb_tmofunc,
564 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
565 return;
566 }
567
568 lpfc_heart_beat(phba, pmboxq);
569 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
570 pmboxq->vport = phba->pport;
571 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
572
573 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
574 mempool_free(pmboxq, phba->mbox_mem_pool);
575 mod_timer(&phba->hb_tmofunc,
576 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
577 return;
578 }
579 mod_timer(&phba->hb_tmofunc,
580 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
581 phba->hb_outstanding = 1;
582 return;
583 } else {
584 /*
585 * If heart beat timeout called with hb_outstanding set we
586 * need to take the HBA offline.
587 */
588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
589 "%d:0459 Adapter heartbeat failure, taking "
590 "this port offline.\n", phba->brd_no);
591
592 spin_lock_irq(&phba->hbalock);
593 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
594 spin_unlock_irq(&phba->hbalock);
595
596 lpfc_offline_prep(phba);
597 lpfc_offline(phba);
598 lpfc_unblock_mgmt_io(phba);
599 phba->link_state = LPFC_HBA_ERROR;
600 lpfc_hba_down_post(phba);
601 }
602}
603
480/************************************************************************/ 604/************************************************************************/
481/* */ 605/* */
482/* lpfc_handle_eratt */ 606/* lpfc_handle_eratt */
@@ -486,11 +610,15 @@ lpfc_hba_down_post(struct lpfc_hba * phba)
486/* */ 610/* */
487/************************************************************************/ 611/************************************************************************/
488void 612void
489lpfc_handle_eratt(struct lpfc_hba * phba) 613lpfc_handle_eratt(struct lpfc_hba *phba)
490{ 614{
491 struct lpfc_sli *psli = &phba->sli; 615 struct lpfc_vport *vport = phba->pport;
616 struct lpfc_sli *psli = &phba->sli;
492 struct lpfc_sli_ring *pring; 617 struct lpfc_sli_ring *pring;
618 struct lpfc_vport *port_iterator;
493 uint32_t event_data; 619 uint32_t event_data;
620 struct Scsi_Host *shost;
621
494 /* If the pci channel is offline, ignore possible errors, 622 /* If the pci channel is offline, ignore possible errors,
495 * since we cannot communicate with the pci card anyway. */ 623 * since we cannot communicate with the pci card anyway. */
496 if (pci_channel_offline(phba->pcidev)) 624 if (pci_channel_offline(phba->pcidev))
@@ -504,10 +632,17 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
504 "Data: x%x x%x x%x\n", 632 "Data: x%x x%x x%x\n",
505 phba->brd_no, phba->work_hs, 633 phba->brd_no, phba->work_hs,
506 phba->work_status[0], phba->work_status[1]); 634 phba->work_status[0], phba->work_status[1]);
507 spin_lock_irq(phba->host->host_lock); 635 list_for_each_entry(port_iterator, &phba->port_list,
508 phba->fc_flag |= FC_ESTABLISH_LINK; 636 listentry) {
637 shost = lpfc_shost_from_vport(port_iterator);
638
639 spin_lock_irq(shost->host_lock);
640 port_iterator->fc_flag |= FC_ESTABLISH_LINK;
641 spin_unlock_irq(shost->host_lock);
642 }
643 spin_lock_irq(&phba->hbalock);
509 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 644 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
510 spin_unlock_irq(phba->host->host_lock); 645 spin_unlock_irq(&phba->hbalock);
511 646
512 /* 647 /*
513 * Firmware stops when it triggled erratt with HS_FFER6. 648 * Firmware stops when it triggled erratt with HS_FFER6.
@@ -544,15 +679,18 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
544 phba->work_status[0], phba->work_status[1]); 679 phba->work_status[0], phba->work_status[1]);
545 680
546 event_data = FC_REG_DUMP_EVENT; 681 event_data = FC_REG_DUMP_EVENT;
547 fc_host_post_vendor_event(phba->host, fc_get_event_number(), 682 shost = lpfc_shost_from_vport(vport);
683 fc_host_post_vendor_event(shost, fc_get_event_number(),
548 sizeof(event_data), (char *) &event_data, 684 sizeof(event_data), (char *) &event_data,
549 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 685 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
550 686
687 spin_lock_irq(&phba->hbalock);
551 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 688 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
689 spin_unlock_irq(&phba->hbalock);
552 lpfc_offline_prep(phba); 690 lpfc_offline_prep(phba);
553 lpfc_offline(phba); 691 lpfc_offline(phba);
554 lpfc_unblock_mgmt_io(phba); 692 lpfc_unblock_mgmt_io(phba);
555 phba->hba_state = LPFC_HBA_ERROR; 693 phba->link_state = LPFC_HBA_ERROR;
556 lpfc_hba_down_post(phba); 694 lpfc_hba_down_post(phba);
557 } 695 }
558} 696}
@@ -566,9 +704,11 @@ lpfc_handle_eratt(struct lpfc_hba * phba)
566/* */ 704/* */
567/************************************************************************/ 705/************************************************************************/
568void 706void
569lpfc_handle_latt(struct lpfc_hba * phba) 707lpfc_handle_latt(struct lpfc_hba *phba)
570{ 708{
571 struct lpfc_sli *psli = &phba->sli; 709 struct lpfc_vport *vport = phba->pport;
710 struct lpfc_sli *psli = &phba->sli;
711 struct lpfc_vport *port_iterator;
572 LPFC_MBOXQ_t *pmb; 712 LPFC_MBOXQ_t *pmb;
573 volatile uint32_t control; 713 volatile uint32_t control;
574 struct lpfc_dmabuf *mp; 714 struct lpfc_dmabuf *mp;
@@ -589,20 +729,22 @@ lpfc_handle_latt(struct lpfc_hba * phba)
589 rc = -EIO; 729 rc = -EIO;
590 730
591 /* Cleanup any outstanding ELS commands */ 731 /* Cleanup any outstanding ELS commands */
592 lpfc_els_flush_cmd(phba); 732 list_for_each_entry(port_iterator, &phba->port_list, listentry)
733 lpfc_els_flush_cmd(port_iterator);
593 734
594 psli->slistat.link_event++; 735 psli->slistat.link_event++;
595 lpfc_read_la(phba, pmb, mp); 736 lpfc_read_la(phba, pmb, mp);
596 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la; 737 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
738 pmb->vport = vport;
597 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB)); 739 rc = lpfc_sli_issue_mbox (phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB));
598 if (rc == MBX_NOT_FINISHED) 740 if (rc == MBX_NOT_FINISHED)
599 goto lpfc_handle_latt_free_mbuf; 741 goto lpfc_handle_latt_free_mbuf;
600 742
601 /* Clear Link Attention in HA REG */ 743 /* Clear Link Attention in HA REG */
602 spin_lock_irq(phba->host->host_lock); 744 spin_lock_irq(&phba->hbalock);
603 writel(HA_LATT, phba->HAregaddr); 745 writel(HA_LATT, phba->HAregaddr);
604 readl(phba->HAregaddr); /* flush */ 746 readl(phba->HAregaddr); /* flush */
605 spin_unlock_irq(phba->host->host_lock); 747 spin_unlock_irq(&phba->hbalock);
606 748
607 return; 749 return;
608 750
@@ -614,7 +756,7 @@ lpfc_handle_latt_free_pmb:
614 mempool_free(pmb, phba->mbox_mem_pool); 756 mempool_free(pmb, phba->mbox_mem_pool);
615lpfc_handle_latt_err_exit: 757lpfc_handle_latt_err_exit:
616 /* Enable Link attention interrupts */ 758 /* Enable Link attention interrupts */
617 spin_lock_irq(phba->host->host_lock); 759 spin_lock_irq(&phba->hbalock);
618 psli->sli_flag |= LPFC_PROCESS_LA; 760 psli->sli_flag |= LPFC_PROCESS_LA;
619 control = readl(phba->HCregaddr); 761 control = readl(phba->HCregaddr);
620 control |= HC_LAINT_ENA; 762 control |= HC_LAINT_ENA;
@@ -624,15 +766,13 @@ lpfc_handle_latt_err_exit:
624 /* Clear Link Attention in HA REG */ 766 /* Clear Link Attention in HA REG */
625 writel(HA_LATT, phba->HAregaddr); 767 writel(HA_LATT, phba->HAregaddr);
626 readl(phba->HAregaddr); /* flush */ 768 readl(phba->HAregaddr); /* flush */
627 spin_unlock_irq(phba->host->host_lock); 769 spin_unlock_irq(&phba->hbalock);
628 lpfc_linkdown(phba); 770 lpfc_linkdown(phba);
629 phba->hba_state = LPFC_HBA_ERROR; 771 phba->link_state = LPFC_HBA_ERROR;
630 772
631 /* The other case is an error from issue_mbox */ 773 /* The other case is an error from issue_mbox */
632 if (rc == -ENOMEM) 774 if (rc == -ENOMEM)
633 lpfc_printf_log(phba, 775 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
634 KERN_WARNING,
635 LOG_MBOX,
636 "%d:0300 READ_LA: no buffers\n", 776 "%d:0300 READ_LA: no buffers\n",
637 phba->brd_no); 777 phba->brd_no);
638 778
@@ -646,7 +786,7 @@ lpfc_handle_latt_err_exit:
646/* */ 786/* */
647/************************************************************************/ 787/************************************************************************/
648static int 788static int
649lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len) 789lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
650{ 790{
651 uint8_t lenlo, lenhi; 791 uint8_t lenlo, lenhi;
652 int Length; 792 int Length;
@@ -658,9 +798,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
658 return 0; 798 return 0;
659 799
660 /* Vital Product */ 800 /* Vital Product */
661 lpfc_printf_log(phba, 801 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
662 KERN_INFO,
663 LOG_INIT,
664 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n", 802 "%d:0455 Vital Product Data: x%x x%x x%x x%x\n",
665 phba->brd_no, 803 phba->brd_no,
666 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2], 804 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
@@ -785,7 +923,7 @@ lpfc_parse_vpd(struct lpfc_hba * phba, uint8_t * vpd, int len)
785} 923}
786 924
787static void 925static void
788lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) 926lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
789{ 927{
790 lpfc_vpd_t *vp; 928 lpfc_vpd_t *vp;
791 uint16_t dev_id = phba->pcidev->device; 929 uint16_t dev_id = phba->pcidev->device;
@@ -943,7 +1081,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp)
943/* Returns the number of buffers NOT posted. */ 1081/* Returns the number of buffers NOT posted. */
944/**************************************************/ 1082/**************************************************/
945int 1083int
946lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt, 1084lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt,
947 int type) 1085 int type)
948{ 1086{
949 IOCB_t *icmd; 1087 IOCB_t *icmd;
@@ -955,9 +1093,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
955 /* While there are buffers to post */ 1093 /* While there are buffers to post */
956 while (cnt > 0) { 1094 while (cnt > 0) {
957 /* Allocate buffer for command iocb */ 1095 /* Allocate buffer for command iocb */
958 spin_lock_irq(phba->host->host_lock);
959 iocb = lpfc_sli_get_iocbq(phba); 1096 iocb = lpfc_sli_get_iocbq(phba);
960 spin_unlock_irq(phba->host->host_lock);
961 if (iocb == NULL) { 1097 if (iocb == NULL) {
962 pring->missbufcnt = cnt; 1098 pring->missbufcnt = cnt;
963 return cnt; 1099 return cnt;
@@ -972,9 +1108,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
972 &mp1->phys); 1108 &mp1->phys);
973 if (mp1 == 0 || mp1->virt == 0) { 1109 if (mp1 == 0 || mp1->virt == 0) {
974 kfree(mp1); 1110 kfree(mp1);
975 spin_lock_irq(phba->host->host_lock);
976 lpfc_sli_release_iocbq(phba, iocb); 1111 lpfc_sli_release_iocbq(phba, iocb);
977 spin_unlock_irq(phba->host->host_lock);
978 pring->missbufcnt = cnt; 1112 pring->missbufcnt = cnt;
979 return cnt; 1113 return cnt;
980 } 1114 }
@@ -990,9 +1124,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
990 kfree(mp2); 1124 kfree(mp2);
991 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1125 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
992 kfree(mp1); 1126 kfree(mp1);
993 spin_lock_irq(phba->host->host_lock);
994 lpfc_sli_release_iocbq(phba, iocb); 1127 lpfc_sli_release_iocbq(phba, iocb);
995 spin_unlock_irq(phba->host->host_lock);
996 pring->missbufcnt = cnt; 1128 pring->missbufcnt = cnt;
997 return cnt; 1129 return cnt;
998 } 1130 }
@@ -1018,7 +1150,6 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
1018 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; 1150 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1019 icmd->ulpLe = 1; 1151 icmd->ulpLe = 1;
1020 1152
1021 spin_lock_irq(phba->host->host_lock);
1022 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { 1153 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
1023 lpfc_mbuf_free(phba, mp1->virt, mp1->phys); 1154 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1024 kfree(mp1); 1155 kfree(mp1);
@@ -1030,14 +1161,11 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
1030 } 1161 }
1031 lpfc_sli_release_iocbq(phba, iocb); 1162 lpfc_sli_release_iocbq(phba, iocb);
1032 pring->missbufcnt = cnt; 1163 pring->missbufcnt = cnt;
1033 spin_unlock_irq(phba->host->host_lock);
1034 return cnt; 1164 return cnt;
1035 } 1165 }
1036 spin_unlock_irq(phba->host->host_lock);
1037 lpfc_sli_ringpostbuf_put(phba, pring, mp1); 1166 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1038 if (mp2) { 1167 if (mp2)
1039 lpfc_sli_ringpostbuf_put(phba, pring, mp2); 1168 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1040 }
1041 } 1169 }
1042 pring->missbufcnt = 0; 1170 pring->missbufcnt = 0;
1043 return 0; 1171 return 0;
@@ -1050,7 +1178,7 @@ lpfc_post_buffer(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, int cnt,
1050/* */ 1178/* */
1051/************************************************************************/ 1179/************************************************************************/
1052static int 1180static int
1053lpfc_post_rcv_buf(struct lpfc_hba * phba) 1181lpfc_post_rcv_buf(struct lpfc_hba *phba)
1054{ 1182{
1055 struct lpfc_sli *psli = &phba->sli; 1183 struct lpfc_sli *psli = &phba->sli;
1056 1184
@@ -1151,7 +1279,7 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1151{ 1279{
1152 int t; 1280 int t;
1153 uint32_t *HashWorking; 1281 uint32_t *HashWorking;
1154 uint32_t *pwwnn = phba->wwnn; 1282 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1155 1283
1156 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL); 1284 HashWorking = kmalloc(80 * sizeof(uint32_t), GFP_KERNEL);
1157 if (!HashWorking) 1285 if (!HashWorking)
@@ -1170,64 +1298,76 @@ lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1170} 1298}
1171 1299
1172static void 1300static void
1173lpfc_cleanup(struct lpfc_hba * phba) 1301lpfc_cleanup(struct lpfc_vport *vport)
1174{ 1302{
1175 struct lpfc_nodelist *ndlp, *next_ndlp; 1303 struct lpfc_nodelist *ndlp, *next_ndlp;
1176 1304
1177 /* clean up phba - lpfc specific */ 1305 /* clean up phba - lpfc specific */
1178 lpfc_can_disctmo(phba); 1306 lpfc_can_disctmo(vport);
1179 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) 1307 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
1180 lpfc_nlp_put(ndlp); 1308 lpfc_nlp_put(ndlp);
1181
1182 INIT_LIST_HEAD(&phba->fc_nodes);
1183
1184 return; 1309 return;
1185} 1310}
1186 1311
1187static void 1312static void
1188lpfc_establish_link_tmo(unsigned long ptr) 1313lpfc_establish_link_tmo(unsigned long ptr)
1189{ 1314{
1190 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 1315 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1316 struct lpfc_vport *vport = phba->pport;
1191 unsigned long iflag; 1317 unsigned long iflag;
1192 1318
1193
1194 /* Re-establishing Link, timer expired */ 1319 /* Re-establishing Link, timer expired */
1195 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1320 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
1196 "%d:1300 Re-establishing Link, timer expired " 1321 "%d:1300 Re-establishing Link, timer expired "
1197 "Data: x%x x%x\n", 1322 "Data: x%x x%x\n",
1198 phba->brd_no, phba->fc_flag, phba->hba_state); 1323 phba->brd_no, vport->fc_flag,
1199 spin_lock_irqsave(phba->host->host_lock, iflag); 1324 vport->port_state);
1200 phba->fc_flag &= ~FC_ESTABLISH_LINK; 1325 list_for_each_entry(vport, &phba->port_list, listentry) {
1201 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1326 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1327
1328 spin_lock_irqsave(shost->host_lock, iflag);
1329 vport->fc_flag &= ~FC_ESTABLISH_LINK;
1330 spin_unlock_irqrestore(shost->host_lock, iflag);
1331 }
1202} 1332}
1203 1333
1204static int 1334void
1205lpfc_stop_timer(struct lpfc_hba * phba) 1335lpfc_stop_vport_timers(struct lpfc_vport *vport)
1206{ 1336{
1207 struct lpfc_sli *psli = &phba->sli; 1337 del_timer_sync(&vport->els_tmofunc);
1338 del_timer_sync(&vport->fc_fdmitmo);
1339 lpfc_can_disctmo(vport);
1340 return;
1341}
1342
1343static void
1344lpfc_stop_phba_timers(struct lpfc_hba *phba)
1345{
1346 struct lpfc_vport *vport;
1208 1347
1209 del_timer_sync(&phba->fcp_poll_timer); 1348 del_timer_sync(&phba->fcp_poll_timer);
1210 del_timer_sync(&phba->fc_estabtmo); 1349 del_timer_sync(&phba->fc_estabtmo);
1211 del_timer_sync(&phba->fc_disctmo); 1350 list_for_each_entry(vport, &phba->port_list, listentry)
1212 del_timer_sync(&phba->fc_fdmitmo); 1351 lpfc_stop_vport_timers(vport);
1213 del_timer_sync(&phba->els_tmofunc); 1352 del_timer_sync(&phba->sli.mbox_tmo);
1214 psli = &phba->sli; 1353 del_timer_sync(&phba->fabric_block_timer);
1215 del_timer_sync(&psli->mbox_tmo); 1354 phba->hb_outstanding = 0;
1216 return(1); 1355 del_timer_sync(&phba->hb_tmofunc);
1356 return;
1217} 1357}
1218 1358
1219int 1359int
1220lpfc_online(struct lpfc_hba * phba) 1360lpfc_online(struct lpfc_hba *phba)
1221{ 1361{
1362 struct lpfc_vport *vport = phba->pport;
1363
1222 if (!phba) 1364 if (!phba)
1223 return 0; 1365 return 0;
1224 1366
1225 if (!(phba->fc_flag & FC_OFFLINE_MODE)) 1367 if (!(vport->fc_flag & FC_OFFLINE_MODE))
1226 return 0; 1368 return 0;
1227 1369
1228 lpfc_printf_log(phba, 1370 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1229 KERN_WARNING,
1230 LOG_INIT,
1231 "%d:0458 Bring Adapter online\n", 1371 "%d:0458 Bring Adapter online\n",
1232 phba->brd_no); 1372 phba->brd_no);
1233 1373
@@ -1243,9 +1383,14 @@ lpfc_online(struct lpfc_hba * phba)
1243 return 1; 1383 return 1;
1244 } 1384 }
1245 1385
1246 spin_lock_irq(phba->host->host_lock); 1386 list_for_each_entry(vport, &phba->port_list, listentry) {
1247 phba->fc_flag &= ~FC_OFFLINE_MODE; 1387 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1248 spin_unlock_irq(phba->host->host_lock); 1388 spin_lock_irq(shost->host_lock);
1389 vport->fc_flag &= ~FC_OFFLINE_MODE;
1390 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
1391 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1392 spin_unlock_irq(shost->host_lock);
1393 }
1249 1394
1250 lpfc_unblock_mgmt_io(phba); 1395 lpfc_unblock_mgmt_io(phba);
1251 return 0; 1396 return 0;
@@ -1256,9 +1401,9 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
1256{ 1401{
1257 unsigned long iflag; 1402 unsigned long iflag;
1258 1403
1259 spin_lock_irqsave(phba->host->host_lock, iflag); 1404 spin_lock_irqsave(&phba->hbalock, iflag);
1260 phba->fc_flag |= FC_BLOCK_MGMT_IO; 1405 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1261 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1406 spin_unlock_irqrestore(&phba->hbalock, iflag);
1262} 1407}
1263 1408
1264void 1409void
@@ -1266,17 +1411,18 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1266{ 1411{
1267 unsigned long iflag; 1412 unsigned long iflag;
1268 1413
1269 spin_lock_irqsave(phba->host->host_lock, iflag); 1414 spin_lock_irqsave(&phba->hbalock, iflag);
1270 phba->fc_flag &= ~FC_BLOCK_MGMT_IO; 1415 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
1271 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1416 spin_unlock_irqrestore(&phba->hbalock, iflag);
1272} 1417}
1273 1418
1274void 1419void
1275lpfc_offline_prep(struct lpfc_hba * phba) 1420lpfc_offline_prep(struct lpfc_hba * phba)
1276{ 1421{
1422 struct lpfc_vport *vport = phba->pport;
1277 struct lpfc_nodelist *ndlp, *next_ndlp; 1423 struct lpfc_nodelist *ndlp, *next_ndlp;
1278 1424
1279 if (phba->fc_flag & FC_OFFLINE_MODE) 1425 if (vport->fc_flag & FC_OFFLINE_MODE)
1280 return; 1426 return;
1281 1427
1282 lpfc_block_mgmt_io(phba); 1428 lpfc_block_mgmt_io(phba);
@@ -1284,39 +1430,49 @@ lpfc_offline_prep(struct lpfc_hba * phba)
1284 lpfc_linkdown(phba); 1430 lpfc_linkdown(phba);
1285 1431
1286 /* Issue an unreg_login to all nodes */ 1432 /* Issue an unreg_login to all nodes */
1287 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nodes, nlp_listp) 1433 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp)
1288 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) 1434 if (ndlp->nlp_state != NLP_STE_UNUSED_NODE)
1289 lpfc_unreg_rpi(phba, ndlp); 1435 lpfc_unreg_rpi(vport, ndlp);
1290 1436
1291 lpfc_sli_flush_mbox_queue(phba); 1437 lpfc_sli_flush_mbox_queue(phba);
1292} 1438}
1293 1439
1294void 1440void
1295lpfc_offline(struct lpfc_hba * phba) 1441lpfc_offline(struct lpfc_hba *phba)
1296{ 1442{
1297 unsigned long iflag; 1443 struct lpfc_vport *vport = phba->pport;
1444 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1445 struct lpfc_vport *port_iterator;
1298 1446
1299 if (phba->fc_flag & FC_OFFLINE_MODE) 1447 if (vport->fc_flag & FC_OFFLINE_MODE)
1300 return; 1448 return;
1301 1449
1302 /* stop all timers associated with this hba */ 1450 /* stop all timers associated with this hba */
1303 lpfc_stop_timer(phba); 1451 lpfc_stop_phba_timers(phba);
1452 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
1453 port_iterator->work_port_events = 0;
1454 }
1304 1455
1305 lpfc_printf_log(phba, 1456 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1306 KERN_WARNING,
1307 LOG_INIT,
1308 "%d:0460 Bring Adapter offline\n", 1457 "%d:0460 Bring Adapter offline\n",
1309 phba->brd_no); 1458 phba->brd_no);
1310 1459
1311 /* Bring down the SLI Layer and cleanup. The HBA is offline 1460 /* Bring down the SLI Layer and cleanup. The HBA is offline
1312 now. */ 1461 now. */
1313 lpfc_sli_hba_down(phba); 1462 lpfc_sli_hba_down(phba);
1314 lpfc_cleanup(phba); 1463 spin_lock_irq(&phba->hbalock);
1315 spin_lock_irqsave(phba->host->host_lock, iflag);
1316 phba->work_hba_events = 0;
1317 phba->work_ha = 0; 1464 phba->work_ha = 0;
1318 phba->fc_flag |= FC_OFFLINE_MODE; 1465 vport->fc_flag |= FC_OFFLINE_MODE;
1319 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1466 spin_unlock_irq(&phba->hbalock);
1467 list_for_each_entry(port_iterator, &phba->port_list, listentry) {
1468 shost = lpfc_shost_from_vport(port_iterator);
1469
1470 lpfc_cleanup(port_iterator);
1471 spin_lock_irq(shost->host_lock);
1472 vport->work_port_events = 0;
1473 vport->fc_flag |= FC_OFFLINE_MODE;
1474 spin_unlock_irq(shost->host_lock);
1475 }
1320} 1476}
1321 1477
1322/****************************************************************************** 1478/******************************************************************************
@@ -1326,17 +1482,17 @@ lpfc_offline(struct lpfc_hba * phba)
1326* 1482*
1327******************************************************************************/ 1483******************************************************************************/
1328static int 1484static int
1329lpfc_scsi_free(struct lpfc_hba * phba) 1485lpfc_scsi_free(struct lpfc_hba *phba)
1330{ 1486{
1331 struct lpfc_scsi_buf *sb, *sb_next; 1487 struct lpfc_scsi_buf *sb, *sb_next;
1332 struct lpfc_iocbq *io, *io_next; 1488 struct lpfc_iocbq *io, *io_next;
1333 1489
1334 spin_lock_irq(phba->host->host_lock); 1490 spin_lock_irq(&phba->hbalock);
1335 /* Release all the lpfc_scsi_bufs maintained by this host. */ 1491 /* Release all the lpfc_scsi_bufs maintained by this host. */
1336 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { 1492 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1337 list_del(&sb->list); 1493 list_del(&sb->list);
1338 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, 1494 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1339 sb->dma_handle); 1495 sb->dma_handle);
1340 kfree(sb); 1496 kfree(sb);
1341 phba->total_scsi_bufs--; 1497 phba->total_scsi_bufs--;
1342 } 1498 }
@@ -1348,134 +1504,183 @@ lpfc_scsi_free(struct lpfc_hba * phba)
1348 phba->total_iocbq_bufs--; 1504 phba->total_iocbq_bufs--;
1349 } 1505 }
1350 1506
1351 spin_unlock_irq(phba->host->host_lock); 1507 spin_unlock_irq(&phba->hbalock);
1352 1508
1353 return 0; 1509 return 0;
1354} 1510}
1355 1511
1356void lpfc_remove_device(struct lpfc_hba *phba)
1357{
1358 unsigned long iflag;
1359 1512
1360 lpfc_free_sysfs_attr(phba); 1513struct lpfc_vport *
1361 1514lpfc_create_port(struct lpfc_hba *phba, int instance, struct fc_vport *fc_vport)
1362 spin_lock_irqsave(phba->host->host_lock, iflag); 1515{
1363 phba->fc_flag |= FC_UNLOADING; 1516 struct lpfc_vport *vport;
1517 struct Scsi_Host *shost;
1518 int error = 0;
1364 1519
1365 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1520 shost = scsi_host_alloc(&lpfc_template, sizeof(struct lpfc_vport));
1521 if (!shost)
1522 goto out;
1366 1523
1367 fc_remove_host(phba->host); 1524 vport = (struct lpfc_vport *) shost->hostdata;
1368 scsi_remove_host(phba->host); 1525 vport->phba = phba;
1369 1526
1370 kthread_stop(phba->worker_thread); 1527 vport->load_flag |= FC_LOADING;
1528 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1371 1529
1530 shost->unique_id = instance;
1531 shost->max_id = LPFC_MAX_TARGET;
1532 shost->max_lun = phba->cfg_max_luns;
1533 shost->this_id = -1;
1534 shost->max_cmd_len = 16;
1372 /* 1535 /*
1373 * Bring down the SLI Layer. This step disable all interrupts, 1536 * Set initial can_queue value since 0 is no longer supported and
1374 * clears the rings, discards all mailbox commands, and resets 1537 * scsi_add_host will fail. This will be adjusted later based on the
1375 * the HBA. 1538 * max xri value determined in hba setup.
1376 */ 1539 */
1377 lpfc_sli_hba_down(phba); 1540 shost->can_queue = phba->cfg_hba_queue_depth - 10;
1378 lpfc_sli_brdrestart(phba); 1541 if (fc_vport != NULL) {
1542 shost->transportt = lpfc_vport_transport_template;
1543 vport->port_type = LPFC_NPIV_PORT;
1544 } else {
1545 shost->transportt = lpfc_transport_template;
1546 vport->port_type = LPFC_PHYSICAL_PORT;
1547 }
1379 1548
1380 /* Release the irq reservation */ 1549 /* Initialize all internally managed lists. */
1381 free_irq(phba->pcidev->irq, phba); 1550 INIT_LIST_HEAD(&vport->fc_nodes);
1382 pci_disable_msi(phba->pcidev); 1551 spin_lock_init(&vport->work_port_lock);
1383 1552
1384 lpfc_cleanup(phba); 1553 init_timer(&vport->fc_disctmo);
1385 lpfc_stop_timer(phba); 1554 vport->fc_disctmo.function = lpfc_disc_timeout;
1386 phba->work_hba_events = 0; 1555 vport->fc_disctmo.data = (unsigned long)vport;
1387 1556
1388 /* 1557 init_timer(&vport->fc_fdmitmo);
1389 * Call scsi_free before mem_free since scsi bufs are released to their 1558 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
1390 * corresponding pools here. 1559 vport->fc_fdmitmo.data = (unsigned long)vport;
1391 */
1392 lpfc_scsi_free(phba);
1393 lpfc_mem_free(phba);
1394 1560
1395 /* Free resources associated with SLI2 interface */ 1561 init_timer(&vport->els_tmofunc);
1396 dma_free_coherent(&phba->pcidev->dev, SLI2_SLIM_SIZE, 1562 vport->els_tmofunc.function = lpfc_els_timeout;
1397 phba->slim2p, phba->slim2p_mapping); 1563 vport->els_tmofunc.data = (unsigned long)vport;
1398 1564
1399 /* unmap adapter SLIM and Control Registers */ 1565 if (fc_vport != NULL) {
1400 iounmap(phba->ctrl_regs_memmap_p); 1566 error = scsi_add_host(shost, &fc_vport->dev);
1401 iounmap(phba->slim_memmap_p); 1567 } else {
1568 error = scsi_add_host(shost, &phba->pcidev->dev);
1569 }
1570 if (error)
1571 goto out_put_shost;
1402 1572
1403 pci_release_regions(phba->pcidev); 1573 list_add_tail(&vport->listentry, &phba->port_list);
1404 pci_disable_device(phba->pcidev); 1574 return vport;
1405 1575
1406 idr_remove(&lpfc_hba_index, phba->brd_no); 1576out_put_shost:
1407 scsi_host_put(phba->host); 1577 scsi_host_put(shost);
1578out:
1579 return NULL;
1408} 1580}
1409 1581
1410void lpfc_scan_start(struct Scsi_Host *host) 1582void
1583destroy_port(struct lpfc_vport *vport)
1411{ 1584{
1412 struct lpfc_hba *phba = (struct lpfc_hba*)host->hostdata; 1585 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1586 struct lpfc_hba *phba = vport->phba;
1413 1587
1414 if (lpfc_alloc_sysfs_attr(phba)) 1588 kfree(vport->vname);
1415 goto error;
1416 1589
1417 phba->MBslimaddr = phba->slim_memmap_p; 1590 lpfc_debugfs_terminate(vport);
1418 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; 1591 fc_remove_host(shost);
1419 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; 1592 scsi_remove_host(shost);
1420 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1421 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1422 1593
1423 if (lpfc_sli_hba_setup(phba)) 1594 spin_lock_irq(&phba->hbalock);
1424 goto error; 1595 list_del_init(&vport->listentry);
1596 spin_unlock_irq(&phba->hbalock);
1425 1597
1426 /* 1598 lpfc_cleanup(vport);
1427 * hba setup may have changed the hba_queue_depth so we need to adjust
1428 * the value of can_queue.
1429 */
1430 host->can_queue = phba->cfg_hba_queue_depth - 10;
1431 return; 1599 return;
1600}
1601
1602int
1603lpfc_get_instance(void)
1604{
1605 int instance = 0;
1432 1606
1433error: 1607 /* Assign an unused number */
1434 lpfc_remove_device(phba); 1608 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
1609 return -1;
1610 if (idr_get_new(&lpfc_hba_index, NULL, &instance))
1611 return -1;
1612 return instance;
1435} 1613}
1436 1614
1615/*
1616 * Note: there is no scan_start function as adapter initialization
1617 * will have asynchronously kicked off the link initialization.
1618 */
1619
1437int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) 1620int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
1438{ 1621{
1439 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1622 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1623 struct lpfc_hba *phba = vport->phba;
1624 int stat = 0;
1440 1625
1441 if (!phba->host) 1626 spin_lock_irq(shost->host_lock);
1442 return 1; 1627
1443 if (time >= 30 * HZ) 1628 if (vport->fc_flag & FC_UNLOADING) {
1629 stat = 1;
1630 goto finished;
1631 }
1632 if (time >= 30 * HZ) {
1633 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1634 "%d:0461 Scanning longer than 30 "
1635 "seconds. Continuing initialization\n",
1636 phba->brd_no);
1637 stat = 1;
1638 goto finished;
1639 }
1640 if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
1641 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1642 "%d:0465 Link down longer than 15 "
1643 "seconds. Continuing initialization\n",
1644 phba->brd_no);
1645 stat = 1;
1444 goto finished; 1646 goto finished;
1647 }
1445 1648
1446 if (phba->hba_state != LPFC_HBA_READY) 1649 if (vport->port_state != LPFC_VPORT_READY)
1447 return 0; 1650 goto finished;
1448 if (phba->num_disc_nodes || phba->fc_prli_sent) 1651 if (vport->num_disc_nodes || vport->fc_prli_sent)
1449 return 0; 1652 goto finished;
1450 if ((phba->fc_map_cnt == 0) && (time < 2 * HZ)) 1653 if (vport->fc_map_cnt == 0 && time < 2 * HZ)
1451 return 0; 1654 goto finished;
1452 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) 1655 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
1453 return 0; 1656 goto finished;
1454 if ((phba->hba_state > LPFC_LINK_DOWN) || (time < 15 * HZ)) 1657
1455 return 0; 1658 stat = 1;
1456 1659
1457finished: 1660finished:
1458 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 1661 spin_unlock_irq(shost->host_lock);
1459 spin_lock_irq(shost->host_lock); 1662 return stat;
1460 lpfc_poll_start_timer(phba); 1663}
1461 spin_unlock_irq(shost->host_lock);
1462 }
1463 1664
1665void lpfc_host_attrib_init(struct Scsi_Host *shost)
1666{
1667 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1668 struct lpfc_hba *phba = vport->phba;
1464 /* 1669 /*
1465 * set fixed host attributes 1670 * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
1466 * Must done after lpfc_sli_hba_setup()
1467 */ 1671 */
1468 1672
1469 fc_host_node_name(shost) = wwn_to_u64(phba->fc_nodename.u.wwn); 1673 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
1470 fc_host_port_name(shost) = wwn_to_u64(phba->fc_portname.u.wwn); 1674 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
1471 fc_host_supported_classes(shost) = FC_COS_CLASS3; 1675 fc_host_supported_classes(shost) = FC_COS_CLASS3;
1472 1676
1473 memset(fc_host_supported_fc4s(shost), 0, 1677 memset(fc_host_supported_fc4s(shost), 0,
1474 sizeof(fc_host_supported_fc4s(shost))); 1678 sizeof(fc_host_supported_fc4s(shost)));
1475 fc_host_supported_fc4s(shost)[2] = 1; 1679 fc_host_supported_fc4s(shost)[2] = 1;
1476 fc_host_supported_fc4s(shost)[7] = 1; 1680 fc_host_supported_fc4s(shost)[7] = 1;
1477 1681
1478 lpfc_get_hba_sym_node_name(phba, fc_host_symbolic_name(shost)); 1682 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
1683 sizeof fc_host_symbolic_name(shost));
1479 1684
1480 fc_host_supported_speeds(shost) = 0; 1685 fc_host_supported_speeds(shost) = 0;
1481 if (phba->lmt & LMT_10Gb) 1686 if (phba->lmt & LMT_10Gb)
@@ -1488,31 +1693,31 @@ finished:
1488 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT; 1693 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
1489 1694
1490 fc_host_maxframe_size(shost) = 1695 fc_host_maxframe_size(shost) =
1491 ((((uint32_t) phba->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) | 1696 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
1492 (uint32_t) phba->fc_sparam.cmn.bbRcvSizeLsb); 1697 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
1493 1698
1494 /* This value is also unchanging */ 1699 /* This value is also unchanging */
1495 memset(fc_host_active_fc4s(shost), 0, 1700 memset(fc_host_active_fc4s(shost), 0,
1496 sizeof(fc_host_active_fc4s(shost))); 1701 sizeof(fc_host_active_fc4s(shost)));
1497 fc_host_active_fc4s(shost)[2] = 1; 1702 fc_host_active_fc4s(shost)[2] = 1;
1498 fc_host_active_fc4s(shost)[7] = 1; 1703 fc_host_active_fc4s(shost)[7] = 1;
1499 1704
1705 fc_host_max_npiv_vports(shost) = phba->max_vpi;
1500 spin_lock_irq(shost->host_lock); 1706 spin_lock_irq(shost->host_lock);
1501 phba->fc_flag &= ~FC_LOADING; 1707 vport->fc_flag &= ~FC_LOADING;
1502 spin_unlock_irq(shost->host_lock); 1708 spin_unlock_irq(shost->host_lock);
1503
1504 return 1;
1505} 1709}
1506 1710
1507static int __devinit 1711static int __devinit
1508lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) 1712lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1509{ 1713{
1510 struct Scsi_Host *host; 1714 struct lpfc_vport *vport = NULL;
1511 struct lpfc_hba *phba; 1715 struct lpfc_hba *phba;
1512 struct lpfc_sli *psli; 1716 struct lpfc_sli *psli;
1513 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; 1717 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
1718 struct Scsi_Host *shost = NULL;
1514 unsigned long bar0map_len, bar2map_len; 1719 unsigned long bar0map_len, bar2map_len;
1515 int error = -ENODEV, retval; 1720 int error = -ENODEV;
1516 int i; 1721 int i;
1517 uint16_t iotag; 1722 uint16_t iotag;
1518 1723
@@ -1521,61 +1726,46 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1521 if (pci_request_regions(pdev, LPFC_DRIVER_NAME)) 1726 if (pci_request_regions(pdev, LPFC_DRIVER_NAME))
1522 goto out_disable_device; 1727 goto out_disable_device;
1523 1728
1524 host = scsi_host_alloc(&lpfc_template, sizeof (struct lpfc_hba)); 1729 phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
1525 if (!host) 1730 if (!phba)
1526 goto out_release_regions; 1731 goto out_release_regions;
1527 1732
1528 phba = (struct lpfc_hba*)host->hostdata; 1733 spin_lock_init(&phba->hbalock);
1529 memset(phba, 0, sizeof (struct lpfc_hba));
1530 phba->host = host;
1531 1734
1532 phba->fc_flag |= FC_LOADING;
1533 phba->pcidev = pdev; 1735 phba->pcidev = pdev;
1534 1736
1535 /* Assign an unused board number */ 1737 /* Assign an unused board number */
1536 if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL)) 1738 if ((phba->brd_no = lpfc_get_instance()) < 0)
1537 goto out_put_host; 1739 goto out_free_phba;
1538 1740
1539 error = idr_get_new(&lpfc_hba_index, NULL, &phba->brd_no); 1741 INIT_LIST_HEAD(&phba->port_list);
1540 if (error) 1742 INIT_LIST_HEAD(&phba->hbq_buffer_list);
1541 goto out_put_host; 1743 /*
1542 1744 * Get all the module params for configuring this host and then
1543 host->unique_id = phba->brd_no; 1745 * establish the host.
1746 */
1747 lpfc_get_cfgparam(phba);
1748 phba->max_vpi = LPFC_MAX_VPI;
1544 1749
1545 /* Initialize timers used by driver */ 1750 /* Initialize timers used by driver */
1546 init_timer(&phba->fc_estabtmo); 1751 init_timer(&phba->fc_estabtmo);
1547 phba->fc_estabtmo.function = lpfc_establish_link_tmo; 1752 phba->fc_estabtmo.function = lpfc_establish_link_tmo;
1548 phba->fc_estabtmo.data = (unsigned long)phba; 1753 phba->fc_estabtmo.data = (unsigned long)phba;
1549 init_timer(&phba->fc_disctmo); 1754
1550 phba->fc_disctmo.function = lpfc_disc_timeout; 1755 init_timer(&phba->hb_tmofunc);
1551 phba->fc_disctmo.data = (unsigned long)phba; 1756 phba->hb_tmofunc.function = lpfc_hb_timeout;
1552 1757 phba->hb_tmofunc.data = (unsigned long)phba;
1553 init_timer(&phba->fc_fdmitmo); 1758
1554 phba->fc_fdmitmo.function = lpfc_fdmi_tmo;
1555 phba->fc_fdmitmo.data = (unsigned long)phba;
1556 init_timer(&phba->els_tmofunc);
1557 phba->els_tmofunc.function = lpfc_els_timeout;
1558 phba->els_tmofunc.data = (unsigned long)phba;
1559 psli = &phba->sli; 1759 psli = &phba->sli;
1560 init_timer(&psli->mbox_tmo); 1760 init_timer(&psli->mbox_tmo);
1561 psli->mbox_tmo.function = lpfc_mbox_timeout; 1761 psli->mbox_tmo.function = lpfc_mbox_timeout;
1562 psli->mbox_tmo.data = (unsigned long)phba; 1762 psli->mbox_tmo.data = (unsigned long) phba;
1563
1564 init_timer(&phba->fcp_poll_timer); 1763 init_timer(&phba->fcp_poll_timer);
1565 phba->fcp_poll_timer.function = lpfc_poll_timeout; 1764 phba->fcp_poll_timer.function = lpfc_poll_timeout;
1566 phba->fcp_poll_timer.data = (unsigned long)phba; 1765 phba->fcp_poll_timer.data = (unsigned long) phba;
1567 1766 init_timer(&phba->fabric_block_timer);
1568 /* 1767 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
1569 * Get all the module params for configuring this host and then 1768 phba->fabric_block_timer.data = (unsigned long) phba;
1570 * establish the host parameters.
1571 */
1572 lpfc_get_cfgparam(phba);
1573
1574 host->max_id = LPFC_MAX_TARGET;
1575 host->max_lun = phba->cfg_max_luns;
1576 host->this_id = -1;
1577
1578 INIT_LIST_HEAD(&phba->fc_nodes);
1579 1769
1580 pci_set_master(pdev); 1770 pci_set_master(pdev);
1581 pci_try_set_mwi(pdev); 1771 pci_try_set_mwi(pdev);
@@ -1620,13 +1810,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1620 1810
1621 memset(phba->slim2p, 0, SLI2_SLIM_SIZE); 1811 memset(phba->slim2p, 0, SLI2_SLIM_SIZE);
1622 1812
1813 phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
1814 lpfc_sli_hbq_size(),
1815 &phba->hbqslimp.phys,
1816 GFP_KERNEL);
1817 if (!phba->hbqslimp.virt)
1818 goto out_free_slim;
1819
1820 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
1821
1623 /* Initialize the SLI Layer to run with lpfc HBAs. */ 1822 /* Initialize the SLI Layer to run with lpfc HBAs. */
1624 lpfc_sli_setup(phba); 1823 lpfc_sli_setup(phba);
1625 lpfc_sli_queue_setup(phba); 1824 lpfc_sli_queue_setup(phba);
1626 1825
1627 error = lpfc_mem_alloc(phba); 1826 error = lpfc_mem_alloc(phba);
1628 if (error) 1827 if (error)
1629 goto out_free_slim; 1828 goto out_free_hbqslimp;
1630 1829
1631 /* Initialize and populate the iocb list per host. */ 1830 /* Initialize and populate the iocb list per host. */
1632 INIT_LIST_HEAD(&phba->lpfc_iocb_list); 1831 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
@@ -1650,10 +1849,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1650 error = -ENOMEM; 1849 error = -ENOMEM;
1651 goto out_free_iocbq; 1850 goto out_free_iocbq;
1652 } 1851 }
1653 spin_lock_irq(phba->host->host_lock); 1852
1853 spin_lock_irq(&phba->hbalock);
1654 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); 1854 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
1655 phba->total_iocbq_bufs++; 1855 phba->total_iocbq_bufs++;
1656 spin_unlock_irq(phba->host->host_lock); 1856 spin_unlock_irq(&phba->hbalock);
1657 } 1857 }
1658 1858
1659 /* Initialize HBA structure */ 1859 /* Initialize HBA structure */
@@ -1674,22 +1874,22 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1674 goto out_free_iocbq; 1874 goto out_free_iocbq;
1675 } 1875 }
1676 1876
1677 /*
1678 * Set initial can_queue value since 0 is no longer supported and
1679 * scsi_add_host will fail. This will be adjusted later based on the
1680 * max xri value determined in hba setup.
1681 */
1682 host->can_queue = phba->cfg_hba_queue_depth - 10;
1683
1684 /* Tell the midlayer we support 16 byte commands */
1685 host->max_cmd_len = 16;
1686
1687 /* Initialize the list of scsi buffers used by driver for scsi IO. */ 1877 /* Initialize the list of scsi buffers used by driver for scsi IO. */
1688 spin_lock_init(&phba->scsi_buf_list_lock); 1878 spin_lock_init(&phba->scsi_buf_list_lock);
1689 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); 1879 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
1690 1880
1691 host->transportt = lpfc_transport_template; 1881 /* Initialize list of fabric iocbs */
1692 pci_set_drvdata(pdev, host); 1882 INIT_LIST_HEAD(&phba->fabric_iocb_list);
1883
1884 vport = lpfc_create_port(phba, phba->brd_no, NULL);
1885 if (!vport)
1886 goto out_kthread_stop;
1887
1888 shost = lpfc_shost_from_vport(vport);
1889 phba->pport = vport;
1890 lpfc_debugfs_initialize(vport);
1891
1892 pci_set_drvdata(pdev, shost);
1693 1893
1694 if (phba->cfg_use_msi) { 1894 if (phba->cfg_use_msi) {
1695 error = pci_enable_msi(phba->pcidev); 1895 error = pci_enable_msi(phba->pcidev);
@@ -1700,38 +1900,68 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1700 } 1900 }
1701 1901
1702 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED, 1902 error = request_irq(phba->pcidev->irq, lpfc_intr_handler, IRQF_SHARED,
1703 LPFC_DRIVER_NAME, phba); 1903 LPFC_DRIVER_NAME, phba);
1704 if (error) { 1904 if (error) {
1705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1706 "%d:0451 Enable interrupt handler failed\n", 1906 "%d:0451 Enable interrupt handler failed\n",
1707 phba->brd_no); 1907 phba->brd_no);
1708 goto out_kthread_stop; 1908 goto out_disable_msi;
1709 } 1909 }
1710 1910
1711 error = scsi_add_host(host, &pdev->dev); 1911 phba->MBslimaddr = phba->slim_memmap_p;
1712 if (error) 1912 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
1913 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
1914 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
1915 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
1916
1917 if (lpfc_alloc_sysfs_attr(vport))
1713 goto out_free_irq; 1918 goto out_free_irq;
1714 1919
1715 scsi_scan_host(host); 1920 if (lpfc_sli_hba_setup(phba))
1921 goto out_remove_device;
1922
1923 /*
1924 * hba setup may have changed the hba_queue_depth so we need to adjust
1925 * the value of can_queue.
1926 */
1927 shost->can_queue = phba->cfg_hba_queue_depth - 10;
1928
1929 lpfc_host_attrib_init(shost);
1930
1931 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1932 spin_lock_irq(shost->host_lock);
1933 lpfc_poll_start_timer(phba);
1934 spin_unlock_irq(shost->host_lock);
1935 }
1936
1937 scsi_scan_host(shost);
1716 1938
1717 return 0; 1939 return 0;
1718 1940
1941out_remove_device:
1942 lpfc_free_sysfs_attr(vport);
1943 spin_lock_irq(shost->host_lock);
1944 vport->fc_flag |= FC_UNLOADING;
1945 spin_unlock_irq(shost->host_lock);
1719out_free_irq: 1946out_free_irq:
1720 lpfc_stop_timer(phba); 1947 lpfc_stop_phba_timers(phba);
1721 phba->work_hba_events = 0; 1948 phba->pport->work_port_events = 0;
1722 free_irq(phba->pcidev->irq, phba); 1949 free_irq(phba->pcidev->irq, phba);
1950out_disable_msi:
1723 pci_disable_msi(phba->pcidev); 1951 pci_disable_msi(phba->pcidev);
1952 destroy_port(vport);
1724out_kthread_stop: 1953out_kthread_stop:
1725 kthread_stop(phba->worker_thread); 1954 kthread_stop(phba->worker_thread);
1726out_free_iocbq: 1955out_free_iocbq:
1727 list_for_each_entry_safe(iocbq_entry, iocbq_next, 1956 list_for_each_entry_safe(iocbq_entry, iocbq_next,
1728 &phba->lpfc_iocb_list, list) { 1957 &phba->lpfc_iocb_list, list) {
1729 spin_lock_irq(phba->host->host_lock);
1730 kfree(iocbq_entry); 1958 kfree(iocbq_entry);
1731 phba->total_iocbq_bufs--; 1959 phba->total_iocbq_bufs--;
1732 spin_unlock_irq(phba->host->host_lock);
1733 } 1960 }
1734 lpfc_mem_free(phba); 1961 lpfc_mem_free(phba);
1962out_free_hbqslimp:
1963 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
1964 phba->hbqslimp.phys);
1735out_free_slim: 1965out_free_slim:
1736 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p, 1966 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, phba->slim2p,
1737 phba->slim2p_mapping); 1967 phba->slim2p_mapping);
@@ -1741,27 +1971,85 @@ out_iounmap_slim:
1741 iounmap(phba->slim_memmap_p); 1971 iounmap(phba->slim_memmap_p);
1742out_idr_remove: 1972out_idr_remove:
1743 idr_remove(&lpfc_hba_index, phba->brd_no); 1973 idr_remove(&lpfc_hba_index, phba->brd_no);
1744out_put_host: 1974out_free_phba:
1745 phba->host = NULL; 1975 kfree(phba);
1746 scsi_host_put(host);
1747out_release_regions: 1976out_release_regions:
1748 pci_release_regions(pdev); 1977 pci_release_regions(pdev);
1749out_disable_device: 1978out_disable_device:
1750 pci_disable_device(pdev); 1979 pci_disable_device(pdev);
1751out: 1980out:
1752 pci_set_drvdata(pdev, NULL); 1981 pci_set_drvdata(pdev, NULL);
1982 if (shost)
1983 scsi_host_put(shost);
1753 return error; 1984 return error;
1754} 1985}
1755 1986
1756static void __devexit 1987static void __devexit
1757lpfc_pci_remove_one(struct pci_dev *pdev) 1988lpfc_pci_remove_one(struct pci_dev *pdev)
1758{ 1989{
1759 struct Scsi_Host *host = pci_get_drvdata(pdev); 1990 struct Scsi_Host *shost = pci_get_drvdata(pdev);
1760 struct lpfc_hba *phba = (struct lpfc_hba *)host->hostdata; 1991 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1992 struct lpfc_hba *phba = vport->phba;
1993 struct lpfc_vport *port_iterator;
1994 list_for_each_entry(port_iterator, &phba->port_list, listentry)
1995 port_iterator->load_flag |= FC_UNLOADING;
1996
1997 kfree(vport->vname);
1998 lpfc_free_sysfs_attr(vport);
1999
2000 fc_remove_host(shost);
2001 scsi_remove_host(shost);
2002
2003 /*
2004 * Bring down the SLI Layer. This step disable all interrupts,
2005 * clears the rings, discards all mailbox commands, and resets
2006 * the HBA.
2007 */
2008 lpfc_sli_hba_down(phba);
2009 lpfc_sli_brdrestart(phba);
2010
2011 lpfc_stop_phba_timers(phba);
2012 spin_lock_irq(&phba->hbalock);
2013 list_del_init(&vport->listentry);
2014 spin_unlock_irq(&phba->hbalock);
2015
1761 2016
1762 lpfc_remove_device(phba); 2017 lpfc_debugfs_terminate(vport);
2018 lpfc_cleanup(vport);
2019
2020 kthread_stop(phba->worker_thread);
2021
2022 /* Release the irq reservation */
2023 free_irq(phba->pcidev->irq, phba);
2024 pci_disable_msi(phba->pcidev);
1763 2025
1764 pci_set_drvdata(pdev, NULL); 2026 pci_set_drvdata(pdev, NULL);
2027 scsi_host_put(shost);
2028
2029 /*
2030 * Call scsi_free before mem_free since scsi bufs are released to their
2031 * corresponding pools here.
2032 */
2033 lpfc_scsi_free(phba);
2034 lpfc_mem_free(phba);
2035
2036 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt,
2037 phba->hbqslimp.phys);
2038
2039 /* Free resources associated with SLI2 interface */
2040 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
2041 phba->slim2p, phba->slim2p_mapping);
2042
2043 /* unmap adapter SLIM and Control Registers */
2044 iounmap(phba->ctrl_regs_memmap_p);
2045 iounmap(phba->slim_memmap_p);
2046
2047 idr_remove(&lpfc_hba_index, phba->brd_no);
2048
2049 kfree(phba);
2050
2051 pci_release_regions(pdev);
2052 pci_disable_device(pdev);
1765} 2053}
1766 2054
1767/** 2055/**
@@ -1819,10 +2107,13 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
1819 pci_set_master(pdev); 2107 pci_set_master(pdev);
1820 2108
1821 /* Re-establishing Link */ 2109 /* Re-establishing Link */
1822 spin_lock_irq(phba->host->host_lock); 2110 spin_lock_irq(host->host_lock);
1823 phba->fc_flag |= FC_ESTABLISH_LINK; 2111 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2112 spin_unlock_irq(host->host_lock);
2113
2114 spin_lock_irq(&phba->hbalock);
1824 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2115 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1825 spin_unlock_irq(phba->host->host_lock); 2116 spin_unlock_irq(&phba->hbalock);
1826 2117
1827 2118
1828 /* Take device offline; this will perform cleanup */ 2119 /* Take device offline; this will perform cleanup */
@@ -1932,7 +2223,7 @@ static struct pci_driver lpfc_driver = {
1932 .id_table = lpfc_id_table, 2223 .id_table = lpfc_id_table,
1933 .probe = lpfc_pci_probe_one, 2224 .probe = lpfc_pci_probe_one,
1934 .remove = __devexit_p(lpfc_pci_remove_one), 2225 .remove = __devexit_p(lpfc_pci_remove_one),
1935 .err_handler = &lpfc_err_handler, 2226 .err_handler = &lpfc_err_handler,
1936}; 2227};
1937 2228
1938static int __init 2229static int __init
@@ -1945,11 +2236,15 @@ lpfc_init(void)
1945 2236
1946 lpfc_transport_template = 2237 lpfc_transport_template =
1947 fc_attach_transport(&lpfc_transport_functions); 2238 fc_attach_transport(&lpfc_transport_functions);
1948 if (!lpfc_transport_template) 2239 lpfc_vport_transport_template =
2240 fc_attach_transport(&lpfc_vport_transport_functions);
2241 if (!lpfc_transport_template || !lpfc_vport_transport_template)
1949 return -ENOMEM; 2242 return -ENOMEM;
1950 error = pci_register_driver(&lpfc_driver); 2243 error = pci_register_driver(&lpfc_driver);
1951 if (error) 2244 if (error) {
1952 fc_release_transport(lpfc_transport_template); 2245 fc_release_transport(lpfc_transport_template);
2246 fc_release_transport(lpfc_vport_transport_template);
2247 }
1953 2248
1954 return error; 2249 return error;
1955} 2250}
@@ -1959,6 +2254,7 @@ lpfc_exit(void)
1959{ 2254{
1960 pci_unregister_driver(&lpfc_driver); 2255 pci_unregister_driver(&lpfc_driver);
1961 fc_release_transport(lpfc_transport_template); 2256 fc_release_transport(lpfc_transport_template);
2257 fc_release_transport(lpfc_vport_transport_template);
1962} 2258}
1963 2259
1964module_init(lpfc_init); 2260module_init(lpfc_init);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 438cbcd9eb13..8a6ceffeabcf 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -30,6 +30,7 @@
30#define LOG_SLI 0x800 /* SLI events */ 30#define LOG_SLI 0x800 /* SLI events */
31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ 31#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */
32#define LOG_LIBDFC 0x2000 /* Libdfc events */ 32#define LOG_LIBDFC 0x2000 /* Libdfc events */
33#define LOG_VPORT 0x4000 /* NPIV events */
33#define LOG_ALL_MSG 0xffff /* LOG all messages */ 34#define LOG_ALL_MSG 0xffff /* LOG all messages */
34 35
35#define lpfc_printf_log(phba, level, mask, fmt, arg...) \ 36#define lpfc_printf_log(phba, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 8041c3f06f7b..8f42fbfdd29e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -82,6 +82,22 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
82} 82}
83 83
84/**********************************************/ 84/**********************************************/
85/* lpfc_heart_beat Issue a HEART_BEAT */
86/* mailbox command */
87/**********************************************/
88void
89lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
90{
91 MAILBOX_t *mb;
92
93 mb = &pmb->mb;
94 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
95 mb->mbxCommand = MBX_HEARTBEAT;
96 mb->mbxOwner = OWN_HOST;
97 return;
98}
99
100/**********************************************/
85/* lpfc_read_la Issue a READ LA */ 101/* lpfc_read_la Issue a READ LA */
86/* mailbox command */ 102/* mailbox command */
87/**********************************************/ 103/**********************************************/
@@ -134,6 +150,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
134void 150void
135lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 151lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
136{ 152{
153 struct lpfc_vport *vport = phba->pport;
137 MAILBOX_t *mb = &pmb->mb; 154 MAILBOX_t *mb = &pmb->mb;
138 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 155 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
139 156
@@ -147,7 +164,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
147 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; 164 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
148 } 165 }
149 166
150 mb->un.varCfgLnk.myId = phba->fc_myDID; 167 mb->un.varCfgLnk.myId = vport->fc_myDID;
151 mb->un.varCfgLnk.edtov = phba->fc_edtov; 168 mb->un.varCfgLnk.edtov = phba->fc_edtov;
152 mb->un.varCfgLnk.arbtov = phba->fc_arbtov; 169 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
153 mb->un.varCfgLnk.ratov = phba->fc_ratov; 170 mb->un.varCfgLnk.ratov = phba->fc_ratov;
@@ -239,7 +256,7 @@ lpfc_init_link(struct lpfc_hba * phba,
239/* mailbox command */ 256/* mailbox command */
240/**********************************************/ 257/**********************************************/
241int 258int
242lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 259lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
243{ 260{
244 struct lpfc_dmabuf *mp; 261 struct lpfc_dmabuf *mp;
245 MAILBOX_t *mb; 262 MAILBOX_t *mb;
@@ -270,6 +287,7 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
270 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 287 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
271 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 288 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
272 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 289 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
290 mb->un.varRdSparm.vpi = vpi;
273 291
274 /* save address for completion */ 292 /* save address for completion */
275 pmb->context1 = mp; 293 pmb->context1 = mp;
@@ -282,7 +300,8 @@ lpfc_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
282/* mailbox command */ 300/* mailbox command */
283/********************************************/ 301/********************************************/
284void 302void
285lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) 303lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
304 LPFC_MBOXQ_t * pmb)
286{ 305{
287 MAILBOX_t *mb; 306 MAILBOX_t *mb;
288 307
@@ -290,6 +309,7 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb)
290 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 309 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
291 310
292 mb->un.varUnregDID.did = did; 311 mb->un.varUnregDID.did = did;
312 mb->un.varUnregDID.vpi = vpi;
293 313
294 mb->mbxCommand = MBX_UNREG_D_ID; 314 mb->mbxCommand = MBX_UNREG_D_ID;
295 mb->mbxOwner = OWN_HOST; 315 mb->mbxOwner = OWN_HOST;
@@ -335,19 +355,17 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
335/* mailbox command */ 355/* mailbox command */
336/********************************************/ 356/********************************************/
337int 357int
338lpfc_reg_login(struct lpfc_hba * phba, 358lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
339 uint32_t did, uint8_t * param, LPFC_MBOXQ_t * pmb, uint32_t flag) 359 uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
340{ 360{
361 MAILBOX_t *mb = &pmb->mb;
341 uint8_t *sparam; 362 uint8_t *sparam;
342 struct lpfc_dmabuf *mp; 363 struct lpfc_dmabuf *mp;
343 MAILBOX_t *mb;
344 struct lpfc_sli *psli;
345 364
346 psli = &phba->sli;
347 mb = &pmb->mb;
348 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 365 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
349 366
350 mb->un.varRegLogin.rpi = 0; 367 mb->un.varRegLogin.rpi = 0;
368 mb->un.varRegLogin.vpi = vpi;
351 mb->un.varRegLogin.did = did; 369 mb->un.varRegLogin.did = did;
352 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ 370 mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */
353 371
@@ -359,12 +377,10 @@ lpfc_reg_login(struct lpfc_hba * phba,
359 kfree(mp); 377 kfree(mp);
360 mb->mbxCommand = MBX_REG_LOGIN64; 378 mb->mbxCommand = MBX_REG_LOGIN64;
361 /* REG_LOGIN: no buffers */ 379 /* REG_LOGIN: no buffers */
362 lpfc_printf_log(phba, 380 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
363 KERN_WARNING, 381 "%d (%d):0302 REG_LOGIN: no buffers, DID x%x, "
364 LOG_MBOX, 382 "flag x%x\n",
365 "%d:0302 REG_LOGIN: no buffers Data x%x x%x\n", 383 phba->brd_no, vpi, did, flag);
366 phba->brd_no,
367 (uint32_t) did, (uint32_t) flag);
368 return (1); 384 return (1);
369 } 385 }
370 INIT_LIST_HEAD(&mp->list); 386 INIT_LIST_HEAD(&mp->list);
@@ -389,7 +405,8 @@ lpfc_reg_login(struct lpfc_hba * phba,
389/* mailbox command */ 405/* mailbox command */
390/**********************************************/ 406/**********************************************/
391void 407void
392lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb) 408lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
409 LPFC_MBOXQ_t * pmb)
393{ 410{
394 MAILBOX_t *mb; 411 MAILBOX_t *mb;
395 412
@@ -398,12 +415,52 @@ lpfc_unreg_login(struct lpfc_hba * phba, uint32_t rpi, LPFC_MBOXQ_t * pmb)
398 415
399 mb->un.varUnregLogin.rpi = (uint16_t) rpi; 416 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
400 mb->un.varUnregLogin.rsvd1 = 0; 417 mb->un.varUnregLogin.rsvd1 = 0;
418 mb->un.varUnregLogin.vpi = vpi;
401 419
402 mb->mbxCommand = MBX_UNREG_LOGIN; 420 mb->mbxCommand = MBX_UNREG_LOGIN;
403 mb->mbxOwner = OWN_HOST; 421 mb->mbxOwner = OWN_HOST;
404 return; 422 return;
405} 423}
406 424
425/**************************************************/
426/* lpfc_reg_vpi Issue a REG_VPI */
427/* mailbox command */
428/**************************************************/
429void
430lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
431 LPFC_MBOXQ_t *pmb)
432{
433 MAILBOX_t *mb = &pmb->mb;
434
435 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
436
437 mb->un.varRegVpi.vpi = vpi;
438 mb->un.varRegVpi.sid = sid;
439
440 mb->mbxCommand = MBX_REG_VPI;
441 mb->mbxOwner = OWN_HOST;
442 return;
443
444}
445
446/**************************************************/
447/* lpfc_unreg_vpi Issue a UNREG_VNPI */
448/* mailbox command */
449/**************************************************/
450void
451lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
452{
453 MAILBOX_t *mb = &pmb->mb;
454 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
455
456 mb->un.varUnregVpi.vpi = vpi;
457
458 mb->mbxCommand = MBX_UNREG_VPI;
459 mb->mbxOwner = OWN_HOST;
460 return;
461
462}
463
407static void 464static void
408lpfc_config_pcb_setup(struct lpfc_hba * phba) 465lpfc_config_pcb_setup(struct lpfc_hba * phba)
409{ 466{
@@ -412,14 +469,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
412 PCB_t *pcbp = &phba->slim2p->pcb; 469 PCB_t *pcbp = &phba->slim2p->pcb;
413 dma_addr_t pdma_addr; 470 dma_addr_t pdma_addr;
414 uint32_t offset; 471 uint32_t offset;
415 uint32_t iocbCnt; 472 uint32_t iocbCnt = 0;
416 int i; 473 int i;
417 474
418 pcbp->maxRing = (psli->num_rings - 1); 475 pcbp->maxRing = (psli->num_rings - 1);
419 476
420 iocbCnt = 0;
421 for (i = 0; i < psli->num_rings; i++) { 477 for (i = 0; i < psli->num_rings; i++) {
422 pring = &psli->ring[i]; 478 pring = &psli->ring[i];
479
480 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
481 SLI2_IOCB_CMD_SIZE;
482 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
483 SLI2_IOCB_RSP_SIZE;
423 /* A ring MUST have both cmd and rsp entries defined to be 484 /* A ring MUST have both cmd and rsp entries defined to be
424 valid */ 485 valid */
425 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) { 486 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
@@ -434,20 +495,18 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
434 continue; 495 continue;
435 } 496 }
436 /* Command ring setup for ring */ 497 /* Command ring setup for ring */
437 pring->cmdringaddr = 498 pring->cmdringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
438 (void *)&phba->slim2p->IOCBs[iocbCnt];
439 pcbp->rdsc[i].cmdEntries = pring->numCiocb; 499 pcbp->rdsc[i].cmdEntries = pring->numCiocb;
440 500
441 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 501 offset = (uint8_t *) &phba->slim2p->IOCBs[iocbCnt] -
442 (uint8_t *)phba->slim2p; 502 (uint8_t *) phba->slim2p;
443 pdma_addr = phba->slim2p_mapping + offset; 503 pdma_addr = phba->slim2p_mapping + offset;
444 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 504 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
445 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 505 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
446 iocbCnt += pring->numCiocb; 506 iocbCnt += pring->numCiocb;
447 507
448 /* Response ring setup for ring */ 508 /* Response ring setup for ring */
449 pring->rspringaddr = 509 pring->rspringaddr = (void *) &phba->slim2p->IOCBs[iocbCnt];
450 (void *)&phba->slim2p->IOCBs[iocbCnt];
451 510
452 pcbp->rdsc[i].rspEntries = pring->numRiocb; 511 pcbp->rdsc[i].rspEntries = pring->numRiocb;
453 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] - 512 offset = (uint8_t *)&phba->slim2p->IOCBs[iocbCnt] -
@@ -462,16 +521,108 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
462void 521void
463lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 522lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
464{ 523{
465 MAILBOX_t *mb; 524 MAILBOX_t *mb = &pmb->mb;
466
467 mb = &pmb->mb;
468 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 525 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
469 mb->un.varRdRev.cv = 1; 526 mb->un.varRdRev.cv = 1;
527 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
470 mb->mbxCommand = MBX_READ_REV; 528 mb->mbxCommand = MBX_READ_REV;
471 mb->mbxOwner = OWN_HOST; 529 mb->mbxOwner = OWN_HOST;
472 return; 530 return;
473} 531}
474 532
533static void
534lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
535 struct lpfc_hbq_init *hbq_desc)
536{
537 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
538 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
539 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
540}
541
542static void
543lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
544 struct lpfc_hbq_init *hbq_desc)
545{
546 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
547 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
548 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
549 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
550 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
551 sizeof(hbqmb->profiles.profile3.cmdmatch));
552}
553
554static void
555lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
556 struct lpfc_hbq_init *hbq_desc)
557{
558 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
559 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
560 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
561 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
562 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
563 sizeof(hbqmb->profiles.profile5.cmdmatch));
564}
565
566void
567lpfc_config_hbq(struct lpfc_hba *phba, struct lpfc_hbq_init *hbq_desc,
568 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
569{
570 int i;
571 MAILBOX_t *mb = &pmb->mb;
572 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
573
574 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
575 hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
576 hbqmb->recvNotify = hbq_desc->rn; /* Receive
577 * Notification */
578 hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
579 * # in words 0-19 */
580 hbqmb->profile = hbq_desc->profile; /* Selection profile:
581 * 0 = all,
582 * 7 = logentry */
583 hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
584 * e.g. Ring0=b0001,
585 * ring2=b0100 */
586 hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
587 * or 5 */
588 hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
589 * HBQ will be used
590 * for LogEntry
591 * buffers */
592 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
593 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
594 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
595
596 mb->mbxCommand = MBX_CONFIG_HBQ;
597 mb->mbxOwner = OWN_HOST;
598
599 /* Copy info for profiles 2,3,5. Other
600 * profiles this area is reserved
601 */
602 if (hbq_desc->profile == 2)
603 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
604 else if (hbq_desc->profile == 3)
605 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
606 else if (hbq_desc->profile == 5)
607 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
608
609 /* Return if no rctl / type masks for this HBQ */
610 if (!hbq_desc->mask_count)
611 return;
612
613 /* Otherwise we setup specific rctl / type masks for this HBQ */
614 for (i = 0; i < hbq_desc->mask_count; i++) {
615 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
616 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
617 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
618 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
619 }
620
621 return;
622}
623
624
625
475void 626void
476lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 627lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
477{ 628{
@@ -514,15 +665,16 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
514} 665}
515 666
516void 667void
517lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 668lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
518{ 669{
670 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
519 MAILBOX_t *mb = &pmb->mb; 671 MAILBOX_t *mb = &pmb->mb;
520 dma_addr_t pdma_addr; 672 dma_addr_t pdma_addr;
521 uint32_t bar_low, bar_high; 673 uint32_t bar_low, bar_high;
522 size_t offset; 674 size_t offset;
523 struct lpfc_hgp hgp; 675 struct lpfc_hgp hgp;
524 void __iomem *to_slim;
525 int i; 676 int i;
677 uint32_t pgp_offset;
526 678
527 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 679 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
528 mb->mbxCommand = MBX_CONFIG_PORT; 680 mb->mbxCommand = MBX_CONFIG_PORT;
@@ -535,12 +687,29 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
535 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); 687 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
536 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); 688 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
537 689
690 /* If HBA supports SLI=3 ask for it */
691
692 if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
693 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
694 mb->un.varCfgPort.max_hbq = 1; /* Requesting 2 HBQs */
695 if (phba->max_vpi && phba->cfg_npiv_enable &&
696 phba->vpd.sli3Feat.cmv) {
697 mb->un.varCfgPort.max_vpi = phba->max_vpi;
698 mb->un.varCfgPort.cmv = 1;
699 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
700 } else
701 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
702 } else
703 phba->sli_rev = 2;
704 mb->un.varCfgPort.sli_mode = phba->sli_rev;
705
538 /* Now setup pcb */ 706 /* Now setup pcb */
539 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2; 707 phba->slim2p->pcb.type = TYPE_NATIVE_SLI2;
540 phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2; 708 phba->slim2p->pcb.feature = FEATURE_INITIAL_SLI2;
541 709
542 /* Setup Mailbox pointers */ 710 /* Setup Mailbox pointers */
543 phba->slim2p->pcb.mailBoxSize = sizeof(MAILBOX_t); 711 phba->slim2p->pcb.mailBoxSize = offsetof(MAILBOX_t, us) +
712 sizeof(struct sli2_desc);
544 offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p; 713 offset = (uint8_t *)&phba->slim2p->mbx - (uint8_t *)phba->slim2p;
545 pdma_addr = phba->slim2p_mapping + offset; 714 pdma_addr = phba->slim2p_mapping + offset;
546 phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr); 715 phba->slim2p->pcb.mbAddrHigh = putPaddrHigh(pdma_addr);
@@ -568,29 +737,70 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
568 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); 737 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
569 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); 738 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
570 739
740 /*
741 * Set up HGP - Port Memory
742 *
743 * The port expects the host get/put pointers to reside in memory
744 * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
745 * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
746 * words (0x40 bytes). This area is not reserved if HBQs are
747 * configured in SLI-3.
748 *
749 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
750 * RR0Get 0xc4 0x84
751 * CR1Put 0xc8 0x88
752 * RR1Get 0xcc 0x8c
753 * CR2Put 0xd0 0x90
754 * RR2Get 0xd4 0x94
755 * CR3Put 0xd8 0x98
756 * RR3Get 0xdc 0x9c
757 *
758 * Reserved 0xa0-0xbf
759 * If HBQs configured:
760 * HBQ 0 Put ptr 0xc0
761 * HBQ 1 Put ptr 0xc4
762 * HBQ 2 Put ptr 0xc8
763 * ......
764 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
765 *
766 */
767
768 if (phba->sli_rev == 3) {
769 phba->host_gp = &mb_slim->us.s3.host[0];
770 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
771 } else {
772 phba->host_gp = &mb_slim->us.s2.host[0];
773 phba->hbq_put = NULL;
774 }
571 775
572 /* mask off BAR0's flag bits 0 - 3 */ 776 /* mask off BAR0's flag bits 0 - 3 */
573 phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + 777 phba->slim2p->pcb.hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
574 (SLIMOFF*sizeof(uint32_t)); 778 (void __iomem *) phba->host_gp -
779 (void __iomem *)phba->MBslimaddr;
575 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) 780 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
576 phba->slim2p->pcb.hgpAddrHigh = bar_high; 781 phba->slim2p->pcb.hgpAddrHigh = bar_high;
577 else 782 else
578 phba->slim2p->pcb.hgpAddrHigh = 0; 783 phba->slim2p->pcb.hgpAddrHigh = 0;
579 /* write HGP data to SLIM at the required longword offset */ 784 /* write HGP data to SLIM at the required longword offset */
580 memset(&hgp, 0, sizeof(struct lpfc_hgp)); 785 memset(&hgp, 0, sizeof(struct lpfc_hgp));
581 to_slim = phba->MBslimaddr + (SLIMOFF*sizeof (uint32_t));
582 786
583 for (i=0; i < phba->sli.num_rings; i++) { 787 for (i=0; i < phba->sli.num_rings; i++) {
584 lpfc_memcpy_to_slim(to_slim, &hgp, sizeof(struct lpfc_hgp)); 788 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
585 to_slim += sizeof (struct lpfc_hgp); 789 sizeof(*phba->host_gp));
586 } 790 }
587 791
588 /* Setup Port Group ring pointer */ 792 /* Setup Port Group ring pointer */
589 offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port - 793 if (phba->sli_rev == 3)
590 (uint8_t *)phba->slim2p; 794 pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s3_pgp.port -
591 pdma_addr = phba->slim2p_mapping + offset; 795 (uint8_t *)phba->slim2p;
796 else
797 pgp_offset = (uint8_t *)&phba->slim2p->mbx.us.s2.port -
798 (uint8_t *)phba->slim2p;
799
800 pdma_addr = phba->slim2p_mapping + pgp_offset;
592 phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr); 801 phba->slim2p->pcb.pgpAddrHigh = putPaddrHigh(pdma_addr);
593 phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr); 802 phba->slim2p->pcb.pgpAddrLow = putPaddrLow(pdma_addr);
803 phba->hbq_get = &phba->slim2p->mbx.us.s3_pgp.hbq_get[0];
594 804
595 /* Use callback routine to setp rings in the pcb */ 805 /* Use callback routine to setp rings in the pcb */
596 lpfc_config_pcb_setup(phba); 806 lpfc_config_pcb_setup(phba);
@@ -606,11 +816,7 @@ lpfc_config_port(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
606 816
607 /* Swap PCB if needed */ 817 /* Swap PCB if needed */
608 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb, 818 lpfc_sli_pcimem_bcopy(&phba->slim2p->pcb, &phba->slim2p->pcb,
609 sizeof (PCB_t)); 819 sizeof(PCB_t));
610
611 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
612 "%d:0405 Service Level Interface (SLI) 2 selected\n",
613 phba->brd_no);
614} 820}
615 821
616void 822void
@@ -644,15 +850,23 @@ lpfc_mbox_get(struct lpfc_hba * phba)
644 LPFC_MBOXQ_t *mbq = NULL; 850 LPFC_MBOXQ_t *mbq = NULL;
645 struct lpfc_sli *psli = &phba->sli; 851 struct lpfc_sli *psli = &phba->sli;
646 852
647 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, 853 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
648 list); 854 if (mbq)
649 if (mbq) {
650 psli->mboxq_cnt--; 855 psli->mboxq_cnt--;
651 }
652 856
653 return mbq; 857 return mbq;
654} 858}
655 859
860void
861lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
862{
863 /* This function expects to be called from interupt context */
864 spin_lock(&phba->hbalock);
865 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
866 spin_unlock(&phba->hbalock);
867 return;
868}
869
656int 870int
657lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) 871lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
658{ 872{
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index ec3bbbde6f7a..3594c469494f 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -38,10 +38,13 @@
38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 40
41
42
41int 43int
42lpfc_mem_alloc(struct lpfc_hba * phba) 44lpfc_mem_alloc(struct lpfc_hba * phba)
43{ 45{
44 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 46 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
47 int longs;
45 int i; 48 int i;
46 49
47 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", 50 phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
@@ -80,10 +83,27 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
80 if (!phba->nlp_mem_pool) 83 if (!phba->nlp_mem_pool)
81 goto fail_free_mbox_pool; 84 goto fail_free_mbox_pool;
82 85
86 phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
87 LPFC_BPL_SIZE, 8, 0);
88 if (!phba->lpfc_hbq_pool)
89 goto fail_free_nlp_mem_pool;
90
91 /* vpi zero is reserved for the physical port so add 1 to max */
92 longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
93 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
94 if (!phba->vpi_bmask)
95 goto fail_free_hbq_pool;
96
83 return 0; 97 return 0;
84 98
99 fail_free_hbq_pool:
100 lpfc_sli_hbqbuf_free_all(phba);
101 fail_free_nlp_mem_pool:
102 mempool_destroy(phba->nlp_mem_pool);
103 phba->nlp_mem_pool = NULL;
85 fail_free_mbox_pool: 104 fail_free_mbox_pool:
86 mempool_destroy(phba->mbox_mem_pool); 105 mempool_destroy(phba->mbox_mem_pool);
106 phba->mbox_mem_pool = NULL;
87 fail_free_mbuf_pool: 107 fail_free_mbuf_pool:
88 while (i--) 108 while (i--)
89 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 109 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
@@ -91,8 +111,10 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
91 kfree(pool->elements); 111 kfree(pool->elements);
92 fail_free_lpfc_mbuf_pool: 112 fail_free_lpfc_mbuf_pool:
93 pci_pool_destroy(phba->lpfc_mbuf_pool); 113 pci_pool_destroy(phba->lpfc_mbuf_pool);
114 phba->lpfc_mbuf_pool = NULL;
94 fail_free_dma_buf_pool: 115 fail_free_dma_buf_pool:
95 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 116 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
117 phba->lpfc_scsi_dma_buf_pool = NULL;
96 fail: 118 fail:
97 return -ENOMEM; 119 return -ENOMEM;
98} 120}
@@ -106,6 +128,9 @@ lpfc_mem_free(struct lpfc_hba * phba)
106 struct lpfc_dmabuf *mp; 128 struct lpfc_dmabuf *mp;
107 int i; 129 int i;
108 130
131 kfree(phba->vpi_bmask);
132 lpfc_sli_hbqbuf_free_all(phba);
133
109 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { 134 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
110 mp = (struct lpfc_dmabuf *) (mbox->context1); 135 mp = (struct lpfc_dmabuf *) (mbox->context1);
111 if (mp) { 136 if (mp) {
@@ -115,6 +140,15 @@ lpfc_mem_free(struct lpfc_hba * phba)
115 list_del(&mbox->list); 140 list_del(&mbox->list);
116 mempool_free(mbox, phba->mbox_mem_pool); 141 mempool_free(mbox, phba->mbox_mem_pool);
117 } 142 }
143 list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
144 mp = (struct lpfc_dmabuf *) (mbox->context1);
145 if (mp) {
146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
147 kfree(mp);
148 }
149 list_del(&mbox->list);
150 mempool_free(mbox, phba->mbox_mem_pool);
151 }
118 152
119 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 153 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
120 if (psli->mbox_active) { 154 if (psli->mbox_active) {
@@ -132,13 +166,21 @@ lpfc_mem_free(struct lpfc_hba * phba)
132 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, 166 pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
133 pool->elements[i].phys); 167 pool->elements[i].phys);
134 kfree(pool->elements); 168 kfree(pool->elements);
169
170 pci_pool_destroy(phba->lpfc_hbq_pool);
135 mempool_destroy(phba->nlp_mem_pool); 171 mempool_destroy(phba->nlp_mem_pool);
136 mempool_destroy(phba->mbox_mem_pool); 172 mempool_destroy(phba->mbox_mem_pool);
137 173
138 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); 174 pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
139 pci_pool_destroy(phba->lpfc_mbuf_pool); 175 pci_pool_destroy(phba->lpfc_mbuf_pool);
140 176
141 /* Free the iocb lookup array */ 177 phba->lpfc_hbq_pool = NULL;
178 phba->nlp_mem_pool = NULL;
179 phba->mbox_mem_pool = NULL;
180 phba->lpfc_scsi_dma_buf_pool = NULL;
181 phba->lpfc_mbuf_pool = NULL;
182
183 /* Free the iocb lookup array */
142 kfree(psli->iocbq_lookup); 184 kfree(psli->iocbq_lookup);
143 psli->iocbq_lookup = NULL; 185 psli->iocbq_lookup = NULL;
144 186
@@ -148,20 +190,23 @@ void *
148lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle) 190lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
149{ 191{
150 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 192 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
193 unsigned long iflags;
151 void *ret; 194 void *ret;
152 195
153 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle); 196 ret = pci_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
154 197
155 if (!ret && ( mem_flags & MEM_PRI) && pool->current_count) { 198 spin_lock_irqsave(&phba->hbalock, iflags);
199 if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
156 pool->current_count--; 200 pool->current_count--;
157 ret = pool->elements[pool->current_count].virt; 201 ret = pool->elements[pool->current_count].virt;
158 *handle = pool->elements[pool->current_count].phys; 202 *handle = pool->elements[pool->current_count].phys;
159 } 203 }
204 spin_unlock_irqrestore(&phba->hbalock, iflags);
160 return ret; 205 return ret;
161} 206}
162 207
163void 208void
164lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) 209__lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
165{ 210{
166 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; 211 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
167 212
@@ -174,3 +219,51 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
174 } 219 }
175 return; 220 return;
176} 221}
222
223void
224lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
225{
226 unsigned long iflags;
227
228 spin_lock_irqsave(&phba->hbalock, iflags);
229 __lpfc_mbuf_free(phba, virt, dma);
230 spin_unlock_irqrestore(&phba->hbalock, iflags);
231 return;
232}
233
234void *
235lpfc_hbq_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
236{
237 void *ret;
238 ret = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_ATOMIC, handle);
239 return ret;
240}
241
242void
243lpfc_hbq_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
244{
245 pci_pool_free(phba->lpfc_hbq_pool, virt, dma);
246 return;
247}
248
249void
250lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
251{
252 struct hbq_dmabuf *hbq_entry;
253
254 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
255 hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
256 if (hbq_entry->tag == -1) {
257 lpfc_hbq_free(phba, hbq_entry->dbuf.virt,
258 hbq_entry->dbuf.phys);
259 kfree(hbq_entry);
260 } else {
261 lpfc_sli_free_hbq(phba, hbq_entry);
262 }
263 } else {
264 lpfc_mbuf_free(phba, mp->virt, mp->phys);
265 kfree(mp);
266 }
267 return;
268}
269
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b309841e3846..bca2f5c9b4ba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,4 +1,4 @@
1/******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. *
@@ -35,20 +35,22 @@
35#include "lpfc.h" 35#include "lpfc.h"
36#include "lpfc_logmsg.h" 36#include "lpfc_logmsg.h"
37#include "lpfc_crtn.h" 37#include "lpfc_crtn.h"
38#include "lpfc_vport.h"
39#include "lpfc_debugfs.h"
38 40
39 41
40/* Called to verify a rcv'ed ADISC was intended for us. */ 42/* Called to verify a rcv'ed ADISC was intended for us. */
41static int 43static int
42lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 44lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
43 struct lpfc_name * nn, struct lpfc_name * pn) 45 struct lpfc_name *nn, struct lpfc_name *pn)
44{ 46{
45 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 47 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
46 * table entry for that node. 48 * table entry for that node.
47 */ 49 */
48 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0) 50 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
49 return 0; 51 return 0;
50 52
51 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0) 53 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
52 return 0; 54 return 0;
53 55
54 /* we match, return success */ 56 /* we match, return success */
@@ -56,11 +58,10 @@ lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
56} 58}
57 59
58int 60int
59lpfc_check_sparm(struct lpfc_hba * phba, 61lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
60 struct lpfc_nodelist * ndlp, struct serv_parm * sp, 62 struct serv_parm * sp, uint32_t class)
61 uint32_t class)
62{ 63{
63 volatile struct serv_parm *hsp = &phba->fc_sparam; 64 volatile struct serv_parm *hsp = &vport->fc_sparam;
64 uint16_t hsp_value, ssp_value = 0; 65 uint16_t hsp_value, ssp_value = 0;
65 66
66 /* 67 /*
@@ -75,12 +76,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
75 hsp->cls1.rcvDataSizeLsb; 76 hsp->cls1.rcvDataSizeLsb;
76 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) | 77 ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
77 sp->cls1.rcvDataSizeLsb; 78 sp->cls1.rcvDataSizeLsb;
79 if (!ssp_value)
80 goto bad_service_param;
78 if (ssp_value > hsp_value) { 81 if (ssp_value > hsp_value) {
79 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb; 82 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
80 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb; 83 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
81 } 84 }
82 } else if (class == CLASS1) { 85 } else if (class == CLASS1) {
83 return 0; 86 goto bad_service_param;
84 } 87 }
85 88
86 if (sp->cls2.classValid) { 89 if (sp->cls2.classValid) {
@@ -88,12 +91,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
88 hsp->cls2.rcvDataSizeLsb; 91 hsp->cls2.rcvDataSizeLsb;
89 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) | 92 ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
90 sp->cls2.rcvDataSizeLsb; 93 sp->cls2.rcvDataSizeLsb;
94 if (!ssp_value)
95 goto bad_service_param;
91 if (ssp_value > hsp_value) { 96 if (ssp_value > hsp_value) {
92 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb; 97 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
93 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb; 98 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
94 } 99 }
95 } else if (class == CLASS2) { 100 } else if (class == CLASS2) {
96 return 0; 101 goto bad_service_param;
97 } 102 }
98 103
99 if (sp->cls3.classValid) { 104 if (sp->cls3.classValid) {
@@ -101,12 +106,14 @@ lpfc_check_sparm(struct lpfc_hba * phba,
101 hsp->cls3.rcvDataSizeLsb; 106 hsp->cls3.rcvDataSizeLsb;
102 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) | 107 ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
103 sp->cls3.rcvDataSizeLsb; 108 sp->cls3.rcvDataSizeLsb;
109 if (!ssp_value)
110 goto bad_service_param;
104 if (ssp_value > hsp_value) { 111 if (ssp_value > hsp_value) {
105 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb; 112 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
106 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb; 113 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
107 } 114 }
108 } else if (class == CLASS3) { 115 } else if (class == CLASS3) {
109 return 0; 116 goto bad_service_param;
110 } 117 }
111 118
112 /* 119 /*
@@ -125,12 +132,22 @@ lpfc_check_sparm(struct lpfc_hba * phba,
125 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name)); 132 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
126 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name)); 133 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
127 return 1; 134 return 1;
135bad_service_param:
136 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
137 "%d (%d):0207 Device %x "
138 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
139 "invalid service parameters. Ignoring device.\n",
140 vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
141 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
142 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
143 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
144 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
145 return 0;
128} 146}
129 147
130static void * 148static void *
131lpfc_check_elscmpl_iocb(struct lpfc_hba * phba, 149lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
132 struct lpfc_iocbq *cmdiocb, 150 struct lpfc_iocbq *rspiocb)
133 struct lpfc_iocbq *rspiocb)
134{ 151{
135 struct lpfc_dmabuf *pcmd, *prsp; 152 struct lpfc_dmabuf *pcmd, *prsp;
136 uint32_t *lp; 153 uint32_t *lp;
@@ -168,32 +185,29 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
168 * routine effectively results in a "software abort". 185 * routine effectively results in a "software abort".
169 */ 186 */
170int 187int
171lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp) 188lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
172{ 189{
173 LIST_HEAD(completions); 190 LIST_HEAD(completions);
174 struct lpfc_sli *psli; 191 struct lpfc_sli *psli = &phba->sli;
175 struct lpfc_sli_ring *pring; 192 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
176 struct lpfc_iocbq *iocb, *next_iocb; 193 struct lpfc_iocbq *iocb, *next_iocb;
177 IOCB_t *cmd; 194 IOCB_t *cmd;
178 195
179 /* Abort outstanding I/O on NPort <nlp_DID> */ 196 /* Abort outstanding I/O on NPort <nlp_DID> */
180 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 197 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
181 "%d:0205 Abort outstanding I/O on NPort x%x " 198 "%d (%d):0205 Abort outstanding I/O on NPort x%x "
182 "Data: x%x x%x x%x\n", 199 "Data: x%x x%x x%x\n",
183 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag, 200 phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
184 ndlp->nlp_state, ndlp->nlp_rpi); 201 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
185 202
186 psli = &phba->sli; 203 lpfc_fabric_abort_nport(ndlp);
187 pring = &psli->ring[LPFC_ELS_RING];
188 204
189 /* First check the txq */ 205 /* First check the txq */
190 spin_lock_irq(phba->host->host_lock); 206 spin_lock_irq(&phba->hbalock);
191 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 207 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
192 /* Check to see if iocb matches the nport we are looking 208 /* Check to see if iocb matches the nport we are looking for */
193 for */
194 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { 209 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
195 /* It matches, so deque and call compl with an 210 /* It matches, so deque and call compl with anp error */
196 error */
197 list_move_tail(&iocb->list, &completions); 211 list_move_tail(&iocb->list, &completions);
198 pring->txq_cnt--; 212 pring->txq_cnt--;
199 } 213 }
@@ -201,37 +215,39 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
201 215
202 /* Next check the txcmplq */ 216 /* Next check the txcmplq */
203 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 217 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
204 /* Check to see if iocb matches the nport we are looking 218 /* Check to see if iocb matches the nport we are looking for */
205 for */ 219 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
206 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
207 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 220 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
221 }
208 } 222 }
209 spin_unlock_irq(phba->host->host_lock); 223 spin_unlock_irq(&phba->hbalock);
210 224
211 while (!list_empty(&completions)) { 225 while (!list_empty(&completions)) {
212 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 226 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
213 cmd = &iocb->iocb; 227 cmd = &iocb->iocb;
214 list_del(&iocb->list); 228 list_del_init(&iocb->list);
215 229
216 if (iocb->iocb_cmpl) { 230 if (!iocb->iocb_cmpl)
231 lpfc_sli_release_iocbq(phba, iocb);
232 else {
217 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 233 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
218 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 234 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
219 (iocb->iocb_cmpl) (phba, iocb, iocb); 235 (iocb->iocb_cmpl) (phba, iocb, iocb);
220 } else 236 }
221 lpfc_sli_release_iocbq(phba, iocb);
222 } 237 }
223 238
224 /* If we are delaying issuing an ELS command, cancel it */ 239 /* If we are delaying issuing an ELS command, cancel it */
225 if (ndlp->nlp_flag & NLP_DELAY_TMO) 240 if (ndlp->nlp_flag & NLP_DELAY_TMO)
226 lpfc_cancel_retry_delay_tmo(phba, ndlp); 241 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
227 return 0; 242 return 0;
228} 243}
229 244
230static int 245static int
231lpfc_rcv_plogi(struct lpfc_hba * phba, 246lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
232 struct lpfc_nodelist * ndlp, 247 struct lpfc_iocbq *cmdiocb)
233 struct lpfc_iocbq *cmdiocb)
234{ 248{
249 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
250 struct lpfc_hba *phba = vport->phba;
235 struct lpfc_dmabuf *pcmd; 251 struct lpfc_dmabuf *pcmd;
236 uint32_t *lp; 252 uint32_t *lp;
237 IOCB_t *icmd; 253 IOCB_t *icmd;
@@ -241,14 +257,14 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
241 int rc; 257 int rc;
242 258
243 memset(&stat, 0, sizeof (struct ls_rjt)); 259 memset(&stat, 0, sizeof (struct ls_rjt));
244 if (phba->hba_state <= LPFC_FLOGI) { 260 if (vport->port_state <= LPFC_FLOGI) {
245 /* Before responding to PLOGI, check for pt2pt mode. 261 /* Before responding to PLOGI, check for pt2pt mode.
246 * If we are pt2pt, with an outstanding FLOGI, abort 262 * If we are pt2pt, with an outstanding FLOGI, abort
247 * the FLOGI and resend it first. 263 * the FLOGI and resend it first.
248 */ 264 */
249 if (phba->fc_flag & FC_PT2PT) { 265 if (vport->fc_flag & FC_PT2PT) {
250 lpfc_els_abort_flogi(phba); 266 lpfc_els_abort_flogi(phba);
251 if (!(phba->fc_flag & FC_PT2PT_PLOGI)) { 267 if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
252 /* If the other side is supposed to initiate 268 /* If the other side is supposed to initiate
253 * the PLOGI anyway, just ACC it now and 269 * the PLOGI anyway, just ACC it now and
254 * move on with discovery. 270 * move on with discovery.
@@ -257,45 +273,42 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
257 phba->fc_ratov = FF_DEF_RATOV; 273 phba->fc_ratov = FF_DEF_RATOV;
258 /* Start discovery - this should just do 274 /* Start discovery - this should just do
259 CLEAR_LA */ 275 CLEAR_LA */
260 lpfc_disc_start(phba); 276 lpfc_disc_start(vport);
261 } else { 277 } else
262 lpfc_initial_flogi(phba); 278 lpfc_initial_flogi(vport);
263 }
264 } else { 279 } else {
265 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY; 280 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
266 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 281 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
267 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, 282 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
268 ndlp); 283 ndlp, NULL);
269 return 0; 284 return 0;
270 } 285 }
271 } 286 }
272 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 287 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
273 lp = (uint32_t *) pcmd->virt; 288 lp = (uint32_t *) pcmd->virt;
274 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 289 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
275 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) { 290 if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
276 /* Reject this request because invalid parameters */ 291 /* Reject this request because invalid parameters */
277 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 292 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
278 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 293 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
279 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 294 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
295 NULL);
280 return 0; 296 return 0;
281 } 297 }
282 icmd = &cmdiocb->iocb; 298 icmd = &cmdiocb->iocb;
283 299
284 /* PLOGI chkparm OK */ 300 /* PLOGI chkparm OK */
285 lpfc_printf_log(phba, 301 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
286 KERN_INFO, 302 "%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
287 LOG_ELS, 303 phba->brd_no, vport->vpi,
288 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
289 phba->brd_no,
290 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, 304 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
291 ndlp->nlp_rpi); 305 ndlp->nlp_rpi);
292 306
293 if ((phba->cfg_fcp_class == 2) && 307 if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
294 (sp->cls2.classValid)) {
295 ndlp->nlp_fcp_info |= CLASS2; 308 ndlp->nlp_fcp_info |= CLASS2;
296 } else { 309 else
297 ndlp->nlp_fcp_info |= CLASS3; 310 ndlp->nlp_fcp_info |= CLASS3;
298 } 311
299 ndlp->nlp_class_sup = 0; 312 ndlp->nlp_class_sup = 0;
300 if (sp->cls1.classValid) 313 if (sp->cls1.classValid)
301 ndlp->nlp_class_sup |= FC_COS_CLASS1; 314 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -317,35 +330,37 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
317 case NLP_STE_PRLI_ISSUE: 330 case NLP_STE_PRLI_ISSUE:
318 case NLP_STE_UNMAPPED_NODE: 331 case NLP_STE_UNMAPPED_NODE:
319 case NLP_STE_MAPPED_NODE: 332 case NLP_STE_MAPPED_NODE:
320 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0); 333 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
321 return 1; 334 return 1;
322 } 335 }
323 336
324 if ((phba->fc_flag & FC_PT2PT) 337 if ((vport->fc_flag & FC_PT2PT) &&
325 && !(phba->fc_flag & FC_PT2PT_PLOGI)) { 338 !(vport->fc_flag & FC_PT2PT_PLOGI)) {
326 /* rcv'ed PLOGI decides what our NPortId will be */ 339 /* rcv'ed PLOGI decides what our NPortId will be */
327 phba->fc_myDID = icmd->un.rcvels.parmRo; 340 vport->fc_myDID = icmd->un.rcvels.parmRo;
328 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 341 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
329 if (mbox == NULL) 342 if (mbox == NULL)
330 goto out; 343 goto out;
331 lpfc_config_link(phba, mbox); 344 lpfc_config_link(phba, mbox);
332 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 345 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
346 mbox->vport = vport;
333 rc = lpfc_sli_issue_mbox 347 rc = lpfc_sli_issue_mbox
334 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); 348 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
335 if (rc == MBX_NOT_FINISHED) { 349 if (rc == MBX_NOT_FINISHED) {
336 mempool_free( mbox, phba->mbox_mem_pool); 350 mempool_free(mbox, phba->mbox_mem_pool);
337 goto out; 351 goto out;
338 } 352 }
339 353
340 lpfc_can_disctmo(phba); 354 lpfc_can_disctmo(vport);
341 } 355 }
342 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 356 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
343 if (mbox == NULL) 357 if (!mbox)
344 goto out; 358 goto out;
345 359
346 if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID, 360 rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
347 (uint8_t *) sp, mbox, 0)) { 361 (uint8_t *) sp, mbox, 0);
348 mempool_free( mbox, phba->mbox_mem_pool); 362 if (rc) {
363 mempool_free(mbox, phba->mbox_mem_pool);
349 goto out; 364 goto out;
350 } 365 }
351 366
@@ -357,7 +372,10 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
357 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox 372 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
358 * command issued in lpfc_cmpl_els_acc(). 373 * command issued in lpfc_cmpl_els_acc().
359 */ 374 */
375 mbox->vport = vport;
376 spin_lock_irq(shost->host_lock);
360 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); 377 ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
378 spin_unlock_irq(shost->host_lock);
361 379
362 /* 380 /*
363 * If there is an outstanding PLOGI issued, abort it before 381 * If there is an outstanding PLOGI issued, abort it before
@@ -373,24 +391,41 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
373 lpfc_els_abort(phba, ndlp); 391 lpfc_els_abort(phba, ndlp);
374 } 392 }
375 393
376 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0); 394 if ((vport->port_type == LPFC_NPIV_PORT &&
395 phba->cfg_vport_restrict_login)) {
396
397 /* In order to preserve RPIs, we want to cleanup
398 * the default RPI the firmware created to rcv
399 * this ELS request. The only way to do this is
400 * to register, then unregister the RPI.
401 */
402 spin_lock_irq(shost->host_lock);
403 ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
404 spin_unlock_irq(shost->host_lock);
405 stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
406 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
407 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
408 ndlp, mbox);
409 return 1;
410 }
411 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
377 return 1; 412 return 1;
378 413
379out: 414out:
380 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 415 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
381 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; 416 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
382 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 417 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
383 return 0; 418 return 0;
384} 419}
385 420
386static int 421static int
387lpfc_rcv_padisc(struct lpfc_hba * phba, 422lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
388 struct lpfc_nodelist * ndlp,
389 struct lpfc_iocbq *cmdiocb) 423 struct lpfc_iocbq *cmdiocb)
390{ 424{
425 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
391 struct lpfc_dmabuf *pcmd; 426 struct lpfc_dmabuf *pcmd;
392 struct serv_parm *sp; 427 struct serv_parm *sp;
393 struct lpfc_name *pnn, *ppn; 428 struct lpfc_name *pnn, *ppn;
394 struct ls_rjt stat; 429 struct ls_rjt stat;
395 ADISC *ap; 430 ADISC *ap;
396 IOCB_t *icmd; 431 IOCB_t *icmd;
@@ -412,13 +447,12 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
412 } 447 }
413 448
414 icmd = &cmdiocb->iocb; 449 icmd = &cmdiocb->iocb;
415 if ((icmd->ulpStatus == 0) && 450 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
416 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
417 if (cmd == ELS_CMD_ADISC) { 451 if (cmd == ELS_CMD_ADISC) {
418 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp); 452 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
419 } else { 453 } else {
420 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, 454 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
421 NULL, 0); 455 NULL, 0);
422 } 456 }
423 return 1; 457 return 1;
424 } 458 }
@@ -427,55 +461,57 @@ lpfc_rcv_padisc(struct lpfc_hba * phba,
427 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 461 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
428 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS; 462 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
429 stat.un.b.vendorUnique = 0; 463 stat.un.b.vendorUnique = 0;
430 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 464 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
431 465
432 /* 1 sec timeout */ 466 /* 1 sec timeout */
433 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 467 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
434 468
435 spin_lock_irq(phba->host->host_lock); 469 spin_lock_irq(shost->host_lock);
436 ndlp->nlp_flag |= NLP_DELAY_TMO; 470 ndlp->nlp_flag |= NLP_DELAY_TMO;
437 spin_unlock_irq(phba->host->host_lock); 471 spin_unlock_irq(shost->host_lock);
438 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 472 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
439 ndlp->nlp_prev_state = ndlp->nlp_state; 473 ndlp->nlp_prev_state = ndlp->nlp_state;
440 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 474 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
441 return 0; 475 return 0;
442} 476}
443 477
444static int 478static int
445lpfc_rcv_logo(struct lpfc_hba * phba, 479lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
446 struct lpfc_nodelist * ndlp, 480 struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
447 struct lpfc_iocbq *cmdiocb,
448 uint32_t els_cmd)
449{ 481{
450 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ 482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
483
484 /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
451 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary 485 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
452 * PLOGIs during LOGO storms from a device. 486 * PLOGIs during LOGO storms from a device.
453 */ 487 */
488 spin_lock_irq(shost->host_lock);
454 ndlp->nlp_flag |= NLP_LOGO_ACC; 489 ndlp->nlp_flag |= NLP_LOGO_ACC;
490 spin_unlock_irq(shost->host_lock);
455 if (els_cmd == ELS_CMD_PRLO) 491 if (els_cmd == ELS_CMD_PRLO)
456 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 492 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
457 else 493 else
458 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 494 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
459 495
460 if (!(ndlp->nlp_type & NLP_FABRIC) || 496 if (!(ndlp->nlp_type & NLP_FABRIC) ||
461 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { 497 (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
462 /* Only try to re-login if this is NOT a Fabric Node */ 498 /* Only try to re-login if this is NOT a Fabric Node */
463 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 499 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
464 spin_lock_irq(phba->host->host_lock); 500 spin_lock_irq(shost->host_lock);
465 ndlp->nlp_flag |= NLP_DELAY_TMO; 501 ndlp->nlp_flag |= NLP_DELAY_TMO;
466 spin_unlock_irq(phba->host->host_lock); 502 spin_unlock_irq(shost->host_lock);
467 503
468 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 504 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
469 ndlp->nlp_prev_state = ndlp->nlp_state; 505 ndlp->nlp_prev_state = ndlp->nlp_state;
470 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 506 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
471 } else { 507 } else {
472 ndlp->nlp_prev_state = ndlp->nlp_state; 508 ndlp->nlp_prev_state = ndlp->nlp_state;
473 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 509 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
474 } 510 }
475 511
476 spin_lock_irq(phba->host->host_lock); 512 spin_lock_irq(shost->host_lock);
477 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 513 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
478 spin_unlock_irq(phba->host->host_lock); 514 spin_unlock_irq(shost->host_lock);
479 /* The driver has to wait until the ACC completes before it continues 515 /* The driver has to wait until the ACC completes before it continues
480 * processing the LOGO. The action will resume in 516 * processing the LOGO. The action will resume in
481 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an 517 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
@@ -485,9 +521,8 @@ lpfc_rcv_logo(struct lpfc_hba * phba,
485} 521}
486 522
487static void 523static void
488lpfc_rcv_prli(struct lpfc_hba * phba, 524lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
489 struct lpfc_nodelist * ndlp, 525 struct lpfc_iocbq *cmdiocb)
490 struct lpfc_iocbq *cmdiocb)
491{ 526{
492 struct lpfc_dmabuf *pcmd; 527 struct lpfc_dmabuf *pcmd;
493 uint32_t *lp; 528 uint32_t *lp;
@@ -501,8 +536,7 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
501 536
502 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 537 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
503 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 538 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
504 if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) && 539 if (npr->prliType == PRLI_FCP_TYPE) {
505 (npr->prliType == PRLI_FCP_TYPE)) {
506 if (npr->initiatorFunc) 540 if (npr->initiatorFunc)
507 ndlp->nlp_type |= NLP_FCP_INITIATOR; 541 ndlp->nlp_type |= NLP_FCP_INITIATOR;
508 if (npr->targetFunc) 542 if (npr->targetFunc)
@@ -517,36 +551,42 @@ lpfc_rcv_prli(struct lpfc_hba * phba,
517 roles |= FC_RPORT_ROLE_FCP_INITIATOR; 551 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
518 if (ndlp->nlp_type & NLP_FCP_TARGET) 552 if (ndlp->nlp_type & NLP_FCP_TARGET)
519 roles |= FC_RPORT_ROLE_FCP_TARGET; 553 roles |= FC_RPORT_ROLE_FCP_TARGET;
554
555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
556 "rport rolechg: role:x%x did:x%x flg:x%x",
557 roles, ndlp->nlp_DID, ndlp->nlp_flag);
558
520 fc_remote_port_rolechg(rport, roles); 559 fc_remote_port_rolechg(rport, roles);
521 } 560 }
522} 561}
523 562
524static uint32_t 563static uint32_t
525lpfc_disc_set_adisc(struct lpfc_hba * phba, 564lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
526 struct lpfc_nodelist * ndlp)
527{ 565{
566 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
567 struct lpfc_hba *phba = vport->phba;
568
528 /* Check config parameter use-adisc or FCP-2 */ 569 /* Check config parameter use-adisc or FCP-2 */
529 if ((phba->cfg_use_adisc == 0) && 570 if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
530 !(phba->fc_flag & FC_RSCN_MODE)) { 571 ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
531 if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE)) 572 spin_lock_irq(shost->host_lock);
532 return 0; 573 ndlp->nlp_flag |= NLP_NPR_ADISC;
574 spin_unlock_irq(shost->host_lock);
575 return 1;
533 } 576 }
534 spin_lock_irq(phba->host->host_lock); 577 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
535 ndlp->nlp_flag |= NLP_NPR_ADISC; 578 lpfc_unreg_rpi(vport, ndlp);
536 spin_unlock_irq(phba->host->host_lock); 579 return 0;
537 return 1;
538} 580}
539 581
540static uint32_t 582static uint32_t
541lpfc_disc_illegal(struct lpfc_hba * phba, 583lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
542 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 584 void *arg, uint32_t evt)
543{ 585{
544 lpfc_printf_log(phba, 586 lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
545 KERN_ERR, 587 "%d (%d):0253 Illegal State Transition: node x%x "
546 LOG_DISCOVERY, 588 "event x%x, state x%x Data: x%x x%x\n",
547 "%d:0253 Illegal State Transition: node x%x event x%x, " 589 vport->phba->brd_no, vport->vpi,
548 "state x%x Data: x%x x%x\n",
549 phba->brd_no,
550 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, 590 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
551 ndlp->nlp_flag); 591 ndlp->nlp_flag);
552 return ndlp->nlp_state; 592 return ndlp->nlp_state;
@@ -555,151 +595,162 @@ lpfc_disc_illegal(struct lpfc_hba * phba,
555/* Start of Discovery State Machine routines */ 595/* Start of Discovery State Machine routines */
556 596
557static uint32_t 597static uint32_t
558lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba, 598lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
559 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 599 void *arg, uint32_t evt)
560{ 600{
561 struct lpfc_iocbq *cmdiocb; 601 struct lpfc_iocbq *cmdiocb;
562 602
563 cmdiocb = (struct lpfc_iocbq *) arg; 603 cmdiocb = (struct lpfc_iocbq *) arg;
564 604
565 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 605 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
566 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 606 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
567 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 607 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
568 return ndlp->nlp_state; 608 return ndlp->nlp_state;
569 } 609 }
570 lpfc_drop_node(phba, ndlp); 610 lpfc_drop_node(vport, ndlp);
571 return NLP_STE_FREED_NODE; 611 return NLP_STE_FREED_NODE;
572} 612}
573 613
574static uint32_t 614static uint32_t
575lpfc_rcv_els_unused_node(struct lpfc_hba * phba, 615lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
576 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 616 void *arg, uint32_t evt)
577{ 617{
578 lpfc_issue_els_logo(phba, ndlp, 0); 618 lpfc_issue_els_logo(vport, ndlp, 0);
579 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 619 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
580 return ndlp->nlp_state; 620 return ndlp->nlp_state;
581} 621}
582 622
583static uint32_t 623static uint32_t
584lpfc_rcv_logo_unused_node(struct lpfc_hba * phba, 624lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
585 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 625 void *arg, uint32_t evt)
586{ 626{
587 struct lpfc_iocbq *cmdiocb; 627 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
588 628 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
589 cmdiocb = (struct lpfc_iocbq *) arg;
590 629
591 spin_lock_irq(phba->host->host_lock); 630 spin_lock_irq(shost->host_lock);
592 ndlp->nlp_flag |= NLP_LOGO_ACC; 631 ndlp->nlp_flag |= NLP_LOGO_ACC;
593 spin_unlock_irq(phba->host->host_lock); 632 spin_unlock_irq(shost->host_lock);
594 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 633 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
595 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 634 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
596 635
597 return ndlp->nlp_state; 636 return ndlp->nlp_state;
598} 637}
599 638
600static uint32_t 639static uint32_t
601lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba, 640lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
602 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 641 void *arg, uint32_t evt)
603{ 642{
604 lpfc_drop_node(phba, ndlp); 643 lpfc_drop_node(vport, ndlp);
605 return NLP_STE_FREED_NODE; 644 return NLP_STE_FREED_NODE;
606} 645}
607 646
608static uint32_t 647static uint32_t
609lpfc_device_rm_unused_node(struct lpfc_hba * phba, 648lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
610 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 649 void *arg, uint32_t evt)
611{ 650{
612 lpfc_drop_node(phba, ndlp); 651 lpfc_drop_node(vport, ndlp);
613 return NLP_STE_FREED_NODE; 652 return NLP_STE_FREED_NODE;
614} 653}
615 654
616static uint32_t 655static uint32_t
617lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, 656lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
618 void *arg, uint32_t evt) 657 void *arg, uint32_t evt)
619{ 658{
659 struct lpfc_hba *phba = vport->phba;
620 struct lpfc_iocbq *cmdiocb = arg; 660 struct lpfc_iocbq *cmdiocb = arg;
621 struct lpfc_dmabuf *pcmd; 661 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
622 struct serv_parm *sp; 662 uint32_t *lp = (uint32_t *) pcmd->virt;
623 uint32_t *lp; 663 struct serv_parm *sp = (struct serv_parm *) (lp + 1);
624 struct ls_rjt stat; 664 struct ls_rjt stat;
625 int port_cmp; 665 int port_cmp;
626 666
627 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
628 lp = (uint32_t *) pcmd->virt;
629 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
630
631 memset(&stat, 0, sizeof (struct ls_rjt)); 667 memset(&stat, 0, sizeof (struct ls_rjt));
632 668
633 /* For a PLOGI, we only accept if our portname is less 669 /* For a PLOGI, we only accept if our portname is less
634 * than the remote portname. 670 * than the remote portname.
635 */ 671 */
636 phba->fc_stat.elsLogiCol++; 672 phba->fc_stat.elsLogiCol++;
637 port_cmp = memcmp(&phba->fc_portname, &sp->portName, 673 port_cmp = memcmp(&vport->fc_portname, &sp->portName,
638 sizeof (struct lpfc_name)); 674 sizeof(struct lpfc_name));
639 675
640 if (port_cmp >= 0) { 676 if (port_cmp >= 0) {
641 /* Reject this request because the remote node will accept 677 /* Reject this request because the remote node will accept
642 ours */ 678 ours */
643 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 679 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
644 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 680 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
645 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 681 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
682 NULL);
646 } else { 683 } else {
647 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 684 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
648 } /* if our portname was less */ 685 } /* If our portname was less */
649 686
650 return ndlp->nlp_state; 687 return ndlp->nlp_state;
651} 688}
652 689
653static uint32_t 690static uint32_t
654lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, 691lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
655 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 692 void *arg, uint32_t evt)
656{ 693{
657 struct lpfc_iocbq *cmdiocb; 694 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
695 struct ls_rjt stat;
658 696
659 cmdiocb = (struct lpfc_iocbq *) arg; 697 memset(&stat, 0, sizeof (struct ls_rjt));
698 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
699 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
700 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
701 return ndlp->nlp_state;
702}
660 703
661 /* software abort outstanding PLOGI */ 704static uint32_t
662 lpfc_els_abort(phba, ndlp); 705lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
706 void *arg, uint32_t evt)
707{
708 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
709
710 /* software abort outstanding PLOGI */
711 lpfc_els_abort(vport->phba, ndlp);
663 712
664 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 713 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
665 return ndlp->nlp_state; 714 return ndlp->nlp_state;
666} 715}
667 716
668static uint32_t 717static uint32_t
669lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba, 718lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
670 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 719 void *arg, uint32_t evt)
671{ 720{
672 struct lpfc_iocbq *cmdiocb; 721 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
673 722 struct lpfc_hba *phba = vport->phba;
674 cmdiocb = (struct lpfc_iocbq *) arg; 723 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
675 724
676 /* software abort outstanding PLOGI */ 725 /* software abort outstanding PLOGI */
677 lpfc_els_abort(phba, ndlp); 726 lpfc_els_abort(phba, ndlp);
678 727
679 if (evt == NLP_EVT_RCV_LOGO) { 728 if (evt == NLP_EVT_RCV_LOGO) {
680 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 729 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
681 } else { 730 } else {
682 lpfc_issue_els_logo(phba, ndlp, 0); 731 lpfc_issue_els_logo(vport, ndlp, 0);
683 } 732 }
684 733
685 /* Put ndlp in npr list set plogi timer for 1 sec */ 734 /* Put ndlp in npr state set plogi timer for 1 sec */
686 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 735 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
687 spin_lock_irq(phba->host->host_lock); 736 spin_lock_irq(shost->host_lock);
688 ndlp->nlp_flag |= NLP_DELAY_TMO; 737 ndlp->nlp_flag |= NLP_DELAY_TMO;
689 spin_unlock_irq(phba->host->host_lock); 738 spin_unlock_irq(shost->host_lock);
690 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 739 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
691 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 740 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
692 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 741 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
693 742
694 return ndlp->nlp_state; 743 return ndlp->nlp_state;
695} 744}
696 745
697static uint32_t 746static uint32_t
698lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, 747lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
699 struct lpfc_nodelist * ndlp, void *arg, 748 struct lpfc_nodelist *ndlp,
749 void *arg,
700 uint32_t evt) 750 uint32_t evt)
701{ 751{
702 struct lpfc_iocbq *cmdiocb, *rspiocb; 752 struct lpfc_hba *phba = vport->phba;
753 struct lpfc_iocbq *cmdiocb, *rspiocb;
703 struct lpfc_dmabuf *pcmd, *prsp, *mp; 754 struct lpfc_dmabuf *pcmd, *prsp, *mp;
704 uint32_t *lp; 755 uint32_t *lp;
705 IOCB_t *irsp; 756 IOCB_t *irsp;
@@ -721,31 +772,26 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
721 772
722 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 773 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
723 774
724 prsp = list_get_first(&pcmd->list, 775 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
725 struct lpfc_dmabuf,
726 list);
727 lp = (uint32_t *) prsp->virt;
728 776
777 lp = (uint32_t *) prsp->virt;
729 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 778 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
730 if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3)) 779 if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
731 goto out; 780 goto out;
732 781
733 /* PLOGI chkparm OK */ 782 /* PLOGI chkparm OK */
734 lpfc_printf_log(phba, 783 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
735 KERN_INFO, 784 "%d (%d):0121 PLOGI chkparm OK "
736 LOG_ELS,
737 "%d:0121 PLOGI chkparm OK "
738 "Data: x%x x%x x%x x%x\n", 785 "Data: x%x x%x x%x x%x\n",
739 phba->brd_no, 786 phba->brd_no, vport->vpi,
740 ndlp->nlp_DID, ndlp->nlp_state, 787 ndlp->nlp_DID, ndlp->nlp_state,
741 ndlp->nlp_flag, ndlp->nlp_rpi); 788 ndlp->nlp_flag, ndlp->nlp_rpi);
742 789
743 if ((phba->cfg_fcp_class == 2) && 790 if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
744 (sp->cls2.classValid)) {
745 ndlp->nlp_fcp_info |= CLASS2; 791 ndlp->nlp_fcp_info |= CLASS2;
746 } else { 792 else
747 ndlp->nlp_fcp_info |= CLASS3; 793 ndlp->nlp_fcp_info |= CLASS3;
748 } 794
749 ndlp->nlp_class_sup = 0; 795 ndlp->nlp_class_sup = 0;
750 if (sp->cls1.classValid) 796 if (sp->cls1.classValid)
751 ndlp->nlp_class_sup |= FC_COS_CLASS1; 797 ndlp->nlp_class_sup |= FC_COS_CLASS1;
@@ -756,16 +802,23 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
756 if (sp->cls4.classValid) 802 if (sp->cls4.classValid)
757 ndlp->nlp_class_sup |= FC_COS_CLASS4; 803 ndlp->nlp_class_sup |= FC_COS_CLASS4;
758 ndlp->nlp_maxframe = 804 ndlp->nlp_maxframe =
759 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 805 ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
760 sp->cmn.bbRcvSizeLsb;
761 806
762 if (!(mbox = mempool_alloc(phba->mbox_mem_pool, 807 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
763 GFP_KERNEL))) 808 if (!mbox) {
809 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
810 "%d (%d):0133 PLOGI: no memory for reg_login "
811 "Data: x%x x%x x%x x%x\n",
812 phba->brd_no, vport->vpi,
813 ndlp->nlp_DID, ndlp->nlp_state,
814 ndlp->nlp_flag, ndlp->nlp_rpi);
764 goto out; 815 goto out;
816 }
765 817
766 lpfc_unreg_rpi(phba, ndlp); 818 lpfc_unreg_rpi(vport, ndlp);
767 if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, 819
768 mbox, 0) == 0) { 820 if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
821 (uint8_t *) sp, mbox, 0) == 0) {
769 switch (ndlp->nlp_DID) { 822 switch (ndlp->nlp_DID) {
770 case NameServer_DID: 823 case NameServer_DID:
771 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; 824 mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
@@ -777,68 +830,104 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
777 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 830 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
778 } 831 }
779 mbox->context2 = lpfc_nlp_get(ndlp); 832 mbox->context2 = lpfc_nlp_get(ndlp);
833 mbox->vport = vport;
780 if (lpfc_sli_issue_mbox(phba, mbox, 834 if (lpfc_sli_issue_mbox(phba, mbox,
781 (MBX_NOWAIT | MBX_STOP_IOCB)) 835 (MBX_NOWAIT | MBX_STOP_IOCB))
782 != MBX_NOT_FINISHED) { 836 != MBX_NOT_FINISHED) {
783 lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE); 837 lpfc_nlp_set_state(vport, ndlp,
838 NLP_STE_REG_LOGIN_ISSUE);
784 return ndlp->nlp_state; 839 return ndlp->nlp_state;
785 } 840 }
786 lpfc_nlp_put(ndlp); 841 lpfc_nlp_put(ndlp);
787 mp = (struct lpfc_dmabuf *)mbox->context1; 842 mp = (struct lpfc_dmabuf *) mbox->context1;
788 lpfc_mbuf_free(phba, mp->virt, mp->phys); 843 lpfc_mbuf_free(phba, mp->virt, mp->phys);
789 kfree(mp); 844 kfree(mp);
790 mempool_free(mbox, phba->mbox_mem_pool); 845 mempool_free(mbox, phba->mbox_mem_pool);
846
847 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
848 "%d (%d):0134 PLOGI: cannot issue reg_login "
849 "Data: x%x x%x x%x x%x\n",
850 phba->brd_no, vport->vpi,
851 ndlp->nlp_DID, ndlp->nlp_state,
852 ndlp->nlp_flag, ndlp->nlp_rpi);
791 } else { 853 } else {
792 mempool_free(mbox, phba->mbox_mem_pool); 854 mempool_free(mbox, phba->mbox_mem_pool);
855
856 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
857 "%d (%d):0135 PLOGI: cannot format reg_login "
858 "Data: x%x x%x x%x x%x\n",
859 phba->brd_no, vport->vpi,
860 ndlp->nlp_DID, ndlp->nlp_state,
861 ndlp->nlp_flag, ndlp->nlp_rpi);
793 } 862 }
794 863
795 864
796 out: 865out:
866 if (ndlp->nlp_DID == NameServer_DID) {
867 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
868 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
869 "%d (%d):0261 Cannot Register NameServer login\n",
870 phba->brd_no, vport->vpi);
871 }
872
797 /* Free this node since the driver cannot login or has the wrong 873 /* Free this node since the driver cannot login or has the wrong
798 sparm */ 874 sparm */
799 lpfc_drop_node(phba, ndlp); 875 lpfc_drop_node(vport, ndlp);
800 return NLP_STE_FREED_NODE; 876 return NLP_STE_FREED_NODE;
801} 877}
802 878
803static uint32_t 879static uint32_t
804lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, 880lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
805 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 881 void *arg, uint32_t evt)
806{ 882{
807 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 883 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
884
885 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
886 spin_lock_irq(shost->host_lock);
808 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 887 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
888 spin_unlock_irq(shost->host_lock);
809 return ndlp->nlp_state; 889 return ndlp->nlp_state;
810 } 890 } else {
811 else {
812 /* software abort outstanding PLOGI */ 891 /* software abort outstanding PLOGI */
813 lpfc_els_abort(phba, ndlp); 892 lpfc_els_abort(vport->phba, ndlp);
814 893
815 lpfc_drop_node(phba, ndlp); 894 lpfc_drop_node(vport, ndlp);
816 return NLP_STE_FREED_NODE; 895 return NLP_STE_FREED_NODE;
817 } 896 }
818} 897}
819 898
820static uint32_t 899static uint32_t
821lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, 900lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
822 struct lpfc_nodelist * ndlp, void *arg, 901 struct lpfc_nodelist *ndlp,
823 uint32_t evt) 902 void *arg,
903 uint32_t evt)
824{ 904{
905 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
906 struct lpfc_hba *phba = vport->phba;
907
908 /* Don't do anything that will mess up processing of the
909 * previous RSCN.
910 */
911 if (vport->fc_flag & FC_RSCN_DEFERRED)
912 return ndlp->nlp_state;
913
825 /* software abort outstanding PLOGI */ 914 /* software abort outstanding PLOGI */
826 lpfc_els_abort(phba, ndlp); 915 lpfc_els_abort(phba, ndlp);
827 916
828 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; 917 ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
829 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 918 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
830 spin_lock_irq(phba->host->host_lock); 919 spin_lock_irq(shost->host_lock);
831 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 920 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
832 spin_unlock_irq(phba->host->host_lock); 921 spin_unlock_irq(shost->host_lock);
833 922
834 return ndlp->nlp_state; 923 return ndlp->nlp_state;
835} 924}
836 925
837static uint32_t 926static uint32_t
838lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba, 927lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
839 struct lpfc_nodelist * ndlp, void *arg, 928 void *arg, uint32_t evt)
840 uint32_t evt)
841{ 929{
930 struct lpfc_hba *phba = vport->phba;
842 struct lpfc_iocbq *cmdiocb; 931 struct lpfc_iocbq *cmdiocb;
843 932
844 /* software abort outstanding ADISC */ 933 /* software abort outstanding ADISC */
@@ -846,34 +935,31 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
846 935
847 cmdiocb = (struct lpfc_iocbq *) arg; 936 cmdiocb = (struct lpfc_iocbq *) arg;
848 937
849 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 938 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
850 return ndlp->nlp_state; 939 return ndlp->nlp_state;
851 } 940
852 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 941 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
853 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 942 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
854 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 943 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
855 944
856 return ndlp->nlp_state; 945 return ndlp->nlp_state;
857} 946}
858 947
859static uint32_t 948static uint32_t
860lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba, 949lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
861 struct lpfc_nodelist * ndlp, void *arg, 950 void *arg, uint32_t evt)
862 uint32_t evt)
863{ 951{
864 struct lpfc_iocbq *cmdiocb; 952 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
865
866 cmdiocb = (struct lpfc_iocbq *) arg;
867 953
868 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 954 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
869 return ndlp->nlp_state; 955 return ndlp->nlp_state;
870} 956}
871 957
872static uint32_t 958static uint32_t
873lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, 959lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
874 struct lpfc_nodelist * ndlp, void *arg, 960 void *arg, uint32_t evt)
875 uint32_t evt)
876{ 961{
962 struct lpfc_hba *phba = vport->phba;
877 struct lpfc_iocbq *cmdiocb; 963 struct lpfc_iocbq *cmdiocb;
878 964
879 cmdiocb = (struct lpfc_iocbq *) arg; 965 cmdiocb = (struct lpfc_iocbq *) arg;
@@ -881,42 +967,43 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
881 /* software abort outstanding ADISC */ 967 /* software abort outstanding ADISC */
882 lpfc_els_abort(phba, ndlp); 968 lpfc_els_abort(phba, ndlp);
883 969
884 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 970 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
885 return ndlp->nlp_state; 971 return ndlp->nlp_state;
886} 972}
887 973
888static uint32_t 974static uint32_t
889lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba, 975lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
890 struct lpfc_nodelist * ndlp, void *arg, 976 struct lpfc_nodelist *ndlp,
891 uint32_t evt) 977 void *arg, uint32_t evt)
892{ 978{
893 struct lpfc_iocbq *cmdiocb; 979 struct lpfc_iocbq *cmdiocb;
894 980
895 cmdiocb = (struct lpfc_iocbq *) arg; 981 cmdiocb = (struct lpfc_iocbq *) arg;
896 982
897 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 983 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
898 return ndlp->nlp_state; 984 return ndlp->nlp_state;
899} 985}
900 986
901static uint32_t 987static uint32_t
902lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, 988lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
903 struct lpfc_nodelist * ndlp, void *arg, 989 void *arg, uint32_t evt)
904 uint32_t evt)
905{ 990{
906 struct lpfc_iocbq *cmdiocb; 991 struct lpfc_iocbq *cmdiocb;
907 992
908 cmdiocb = (struct lpfc_iocbq *) arg; 993 cmdiocb = (struct lpfc_iocbq *) arg;
909 994
910 /* Treat like rcv logo */ 995 /* Treat like rcv logo */
911 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); 996 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
912 return ndlp->nlp_state; 997 return ndlp->nlp_state;
913} 998}
914 999
915static uint32_t 1000static uint32_t
916lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba, 1001lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
917 struct lpfc_nodelist * ndlp, void *arg, 1002 struct lpfc_nodelist *ndlp,
918 uint32_t evt) 1003 void *arg, uint32_t evt)
919{ 1004{
1005 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1006 struct lpfc_hba *phba = vport->phba;
920 struct lpfc_iocbq *cmdiocb, *rspiocb; 1007 struct lpfc_iocbq *cmdiocb, *rspiocb;
921 IOCB_t *irsp; 1008 IOCB_t *irsp;
922 ADISC *ap; 1009 ADISC *ap;
@@ -928,101 +1015,112 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
928 irsp = &rspiocb->iocb; 1015 irsp = &rspiocb->iocb;
929 1016
930 if ((irsp->ulpStatus) || 1017 if ((irsp->ulpStatus) ||
931 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) { 1018 (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
932 /* 1 sec timeout */ 1019 /* 1 sec timeout */
933 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); 1020 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
934 spin_lock_irq(phba->host->host_lock); 1021 spin_lock_irq(shost->host_lock);
935 ndlp->nlp_flag |= NLP_DELAY_TMO; 1022 ndlp->nlp_flag |= NLP_DELAY_TMO;
936 spin_unlock_irq(phba->host->host_lock); 1023 spin_unlock_irq(shost->host_lock);
937 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1024 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
938 1025
939 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name)); 1026 memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
940 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name)); 1027 memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
941 1028
942 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1029 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
943 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1030 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
944 lpfc_unreg_rpi(phba, ndlp); 1031 lpfc_unreg_rpi(vport, ndlp);
945 return ndlp->nlp_state; 1032 return ndlp->nlp_state;
946 } 1033 }
947 1034
948 if (ndlp->nlp_type & NLP_FCP_TARGET) { 1035 if (ndlp->nlp_type & NLP_FCP_TARGET) {
949 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1036 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
950 lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE); 1037 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
951 } else { 1038 } else {
952 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1039 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
953 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1040 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
954 } 1041 }
955 return ndlp->nlp_state; 1042 return ndlp->nlp_state;
956} 1043}
957 1044
958static uint32_t 1045static uint32_t
959lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, 1046lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
960 struct lpfc_nodelist * ndlp, void *arg, 1047 void *arg, uint32_t evt)
961 uint32_t evt)
962{ 1048{
963 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1049 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1050
1051 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1052 spin_lock_irq(shost->host_lock);
964 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1053 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1054 spin_unlock_irq(shost->host_lock);
965 return ndlp->nlp_state; 1055 return ndlp->nlp_state;
966 } 1056 } else {
967 else {
968 /* software abort outstanding ADISC */ 1057 /* software abort outstanding ADISC */
969 lpfc_els_abort(phba, ndlp); 1058 lpfc_els_abort(vport->phba, ndlp);
970 1059
971 lpfc_drop_node(phba, ndlp); 1060 lpfc_drop_node(vport, ndlp);
972 return NLP_STE_FREED_NODE; 1061 return NLP_STE_FREED_NODE;
973 } 1062 }
974} 1063}
975 1064
976static uint32_t 1065static uint32_t
977lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, 1066lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
978 struct lpfc_nodelist * ndlp, void *arg, 1067 struct lpfc_nodelist *ndlp,
979 uint32_t evt) 1068 void *arg,
1069 uint32_t evt)
980{ 1070{
1071 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1072 struct lpfc_hba *phba = vport->phba;
1073
1074 /* Don't do anything that will mess up processing of the
1075 * previous RSCN.
1076 */
1077 if (vport->fc_flag & FC_RSCN_DEFERRED)
1078 return ndlp->nlp_state;
1079
981 /* software abort outstanding ADISC */ 1080 /* software abort outstanding ADISC */
982 lpfc_els_abort(phba, ndlp); 1081 lpfc_els_abort(phba, ndlp);
983 1082
984 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1083 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
985 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1084 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
986 spin_lock_irq(phba->host->host_lock); 1085 spin_lock_irq(shost->host_lock);
987 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1086 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
988 ndlp->nlp_flag |= NLP_NPR_ADISC; 1087 spin_unlock_irq(shost->host_lock);
989 spin_unlock_irq(phba->host->host_lock); 1088 lpfc_disc_set_adisc(vport, ndlp);
990
991 return ndlp->nlp_state; 1089 return ndlp->nlp_state;
992} 1090}
993 1091
994static uint32_t 1092static uint32_t
995lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba, 1093lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
996 struct lpfc_nodelist * ndlp, void *arg, 1094 struct lpfc_nodelist *ndlp,
1095 void *arg,
997 uint32_t evt) 1096 uint32_t evt)
998{ 1097{
999 struct lpfc_iocbq *cmdiocb; 1098 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1000 1099
1001 cmdiocb = (struct lpfc_iocbq *) arg; 1100 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1002
1003 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1004 return ndlp->nlp_state; 1101 return ndlp->nlp_state;
1005} 1102}
1006 1103
1007static uint32_t 1104static uint32_t
1008lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba, 1105lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1009 struct lpfc_nodelist * ndlp, void *arg, 1106 struct lpfc_nodelist *ndlp,
1107 void *arg,
1010 uint32_t evt) 1108 uint32_t evt)
1011{ 1109{
1012 struct lpfc_iocbq *cmdiocb; 1110 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1013 1111
1014 cmdiocb = (struct lpfc_iocbq *) arg; 1112 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1015
1016 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1017 return ndlp->nlp_state; 1113 return ndlp->nlp_state;
1018} 1114}
1019 1115
1020static uint32_t 1116static uint32_t
1021lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, 1117lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1022 struct lpfc_nodelist * ndlp, void *arg, 1118 struct lpfc_nodelist *ndlp,
1119 void *arg,
1023 uint32_t evt) 1120 uint32_t evt)
1024{ 1121{
1025 struct lpfc_iocbq *cmdiocb; 1122 struct lpfc_hba *phba = vport->phba;
1123 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1026 LPFC_MBOXQ_t *mb; 1124 LPFC_MBOXQ_t *mb;
1027 LPFC_MBOXQ_t *nextmb; 1125 LPFC_MBOXQ_t *nextmb;
1028 struct lpfc_dmabuf *mp; 1126 struct lpfc_dmabuf *mp;
@@ -1033,12 +1131,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1033 if ((mb = phba->sli.mbox_active)) { 1131 if ((mb = phba->sli.mbox_active)) {
1034 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1132 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1035 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1133 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1134 lpfc_nlp_put(ndlp);
1036 mb->context2 = NULL; 1135 mb->context2 = NULL;
1037 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1136 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1038 } 1137 }
1039 } 1138 }
1040 1139
1041 spin_lock_irq(phba->host->host_lock); 1140 spin_lock_irq(&phba->hbalock);
1042 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 1141 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1043 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && 1142 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1044 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 1143 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1047,61 +1146,61 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1047 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1048 kfree(mp); 1147 kfree(mp);
1049 } 1148 }
1149 lpfc_nlp_put(ndlp);
1050 list_del(&mb->list); 1150 list_del(&mb->list);
1051 mempool_free(mb, phba->mbox_mem_pool); 1151 mempool_free(mb, phba->mbox_mem_pool);
1052 } 1152 }
1053 } 1153 }
1054 spin_unlock_irq(phba->host->host_lock); 1154 spin_unlock_irq(&phba->hbalock);
1055 1155
1056 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1156 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1057 return ndlp->nlp_state; 1157 return ndlp->nlp_state;
1058} 1158}
1059 1159
1060static uint32_t 1160static uint32_t
1061lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba, 1161lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1062 struct lpfc_nodelist * ndlp, void *arg, 1162 struct lpfc_nodelist *ndlp,
1163 void *arg,
1063 uint32_t evt) 1164 uint32_t evt)
1064{ 1165{
1065 struct lpfc_iocbq *cmdiocb; 1166 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1066 1167
1067 cmdiocb = (struct lpfc_iocbq *) arg; 1168 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1068
1069 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1070 return ndlp->nlp_state; 1169 return ndlp->nlp_state;
1071} 1170}
1072 1171
1073static uint32_t 1172static uint32_t
1074lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, 1173lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1075 struct lpfc_nodelist * ndlp, void *arg, 1174 struct lpfc_nodelist *ndlp,
1175 void *arg,
1076 uint32_t evt) 1176 uint32_t evt)
1077{ 1177{
1078 struct lpfc_iocbq *cmdiocb; 1178 struct lpfc_iocbq *cmdiocb;
1079 1179
1080 cmdiocb = (struct lpfc_iocbq *) arg; 1180 cmdiocb = (struct lpfc_iocbq *) arg;
1081 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1181 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1082 return ndlp->nlp_state; 1182 return ndlp->nlp_state;
1083} 1183}
1084 1184
1085static uint32_t 1185static uint32_t
1086lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba, 1186lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1087 struct lpfc_nodelist * ndlp, 1187 struct lpfc_nodelist *ndlp,
1088 void *arg, uint32_t evt) 1188 void *arg,
1189 uint32_t evt)
1089{ 1190{
1090 LPFC_MBOXQ_t *pmb; 1191 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1091 MAILBOX_t *mb; 1192 struct lpfc_hba *phba = vport->phba;
1092 uint32_t did; 1193 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1194 MAILBOX_t *mb = &pmb->mb;
1195 uint32_t did = mb->un.varWords[1];
1093 1196
1094 pmb = (LPFC_MBOXQ_t *) arg;
1095 mb = &pmb->mb;
1096 did = mb->un.varWords[1];
1097 if (mb->mbxStatus) { 1197 if (mb->mbxStatus) {
1098 /* RegLogin failed */ 1198 /* RegLogin failed */
1099 lpfc_printf_log(phba, 1199 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
1100 KERN_ERR, 1200 "%d (%d):0246 RegLogin failed Data: x%x x%x "
1101 LOG_DISCOVERY, 1201 "x%x\n",
1102 "%d:0246 RegLogin failed Data: x%x x%x x%x\n", 1202 phba->brd_no, vport->vpi,
1103 phba->brd_no, 1203 did, mb->mbxStatus, vport->port_state);
1104 did, mb->mbxStatus, phba->hba_state);
1105 1204
1106 /* 1205 /*
1107 * If RegLogin failed due to lack of HBA resources do not 1206 * If RegLogin failed due to lack of HBA resources do not
@@ -1109,20 +1208,20 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1109 */ 1208 */
1110 if (mb->mbxStatus == MBXERR_RPI_FULL) { 1209 if (mb->mbxStatus == MBXERR_RPI_FULL) {
1111 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 1210 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1112 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE); 1211 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1113 return ndlp->nlp_state; 1212 return ndlp->nlp_state;
1114 } 1213 }
1115 1214
1116 /* Put ndlp in npr list set plogi timer for 1 sec */ 1215 /* Put ndlp in npr state set plogi timer for 1 sec */
1117 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1216 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1118 spin_lock_irq(phba->host->host_lock); 1217 spin_lock_irq(shost->host_lock);
1119 ndlp->nlp_flag |= NLP_DELAY_TMO; 1218 ndlp->nlp_flag |= NLP_DELAY_TMO;
1120 spin_unlock_irq(phba->host->host_lock); 1219 spin_unlock_irq(shost->host_lock);
1121 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1220 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1122 1221
1123 lpfc_issue_els_logo(phba, ndlp, 0); 1222 lpfc_issue_els_logo(vport, ndlp, 0);
1124 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1223 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1125 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1224 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1126 return ndlp->nlp_state; 1225 return ndlp->nlp_state;
1127 } 1226 }
1128 1227
@@ -1131,91 +1230,99 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1131 /* Only if we are not a fabric nport do we issue PRLI */ 1230 /* Only if we are not a fabric nport do we issue PRLI */
1132 if (!(ndlp->nlp_type & NLP_FABRIC)) { 1231 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1133 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1232 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1134 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE); 1233 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1135 lpfc_issue_els_prli(phba, ndlp, 0); 1234 lpfc_issue_els_prli(vport, ndlp, 0);
1136 } else { 1235 } else {
1137 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1236 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1138 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1237 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1139 } 1238 }
1140 return ndlp->nlp_state; 1239 return ndlp->nlp_state;
1141} 1240}
1142 1241
1143static uint32_t 1242static uint32_t
1144lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, 1243lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1145 struct lpfc_nodelist * ndlp, void *arg, 1244 struct lpfc_nodelist *ndlp,
1245 void *arg,
1146 uint32_t evt) 1246 uint32_t evt)
1147{ 1247{
1148 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1248 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1249
1250 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1251 spin_lock_irq(shost->host_lock);
1149 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1252 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1253 spin_unlock_irq(shost->host_lock);
1150 return ndlp->nlp_state; 1254 return ndlp->nlp_state;
1151 } 1255 } else {
1152 else { 1256 lpfc_drop_node(vport, ndlp);
1153 lpfc_drop_node(phba, ndlp);
1154 return NLP_STE_FREED_NODE; 1257 return NLP_STE_FREED_NODE;
1155 } 1258 }
1156} 1259}
1157 1260
1158static uint32_t 1261static uint32_t
1159lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, 1262lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1160 struct lpfc_nodelist * ndlp, void *arg, 1263 struct lpfc_nodelist *ndlp,
1161 uint32_t evt) 1264 void *arg,
1265 uint32_t evt)
1162{ 1266{
1267 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1268
1269 /* Don't do anything that will mess up processing of the
1270 * previous RSCN.
1271 */
1272 if (vport->fc_flag & FC_RSCN_DEFERRED)
1273 return ndlp->nlp_state;
1274
1163 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 1275 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1164 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1276 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1165 spin_lock_irq(phba->host->host_lock); 1277 spin_lock_irq(shost->host_lock);
1166 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1278 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1167 spin_unlock_irq(phba->host->host_lock); 1279 spin_unlock_irq(shost->host_lock);
1280 lpfc_disc_set_adisc(vport, ndlp);
1168 return ndlp->nlp_state; 1281 return ndlp->nlp_state;
1169} 1282}
1170 1283
1171static uint32_t 1284static uint32_t
1172lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba, 1285lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1173 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1286 void *arg, uint32_t evt)
1174{ 1287{
1175 struct lpfc_iocbq *cmdiocb; 1288 struct lpfc_iocbq *cmdiocb;
1176 1289
1177 cmdiocb = (struct lpfc_iocbq *) arg; 1290 cmdiocb = (struct lpfc_iocbq *) arg;
1178 1291
1179 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1292 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1180 return ndlp->nlp_state; 1293 return ndlp->nlp_state;
1181} 1294}
1182 1295
1183static uint32_t 1296static uint32_t
1184lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba, 1297lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1185 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1298 void *arg, uint32_t evt)
1186{ 1299{
1187 struct lpfc_iocbq *cmdiocb; 1300 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1188
1189 cmdiocb = (struct lpfc_iocbq *) arg;
1190 1301
1191 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp); 1302 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1192 return ndlp->nlp_state; 1303 return ndlp->nlp_state;
1193} 1304}
1194 1305
1195static uint32_t 1306static uint32_t
1196lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, 1307lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1197 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1308 void *arg, uint32_t evt)
1198{ 1309{
1199 struct lpfc_iocbq *cmdiocb; 1310 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1200
1201 cmdiocb = (struct lpfc_iocbq *) arg;
1202 1311
1203 /* Software abort outstanding PRLI before sending acc */ 1312 /* Software abort outstanding PRLI before sending acc */
1204 lpfc_els_abort(phba, ndlp); 1313 lpfc_els_abort(vport->phba, ndlp);
1205 1314
1206 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); 1315 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1207 return ndlp->nlp_state; 1316 return ndlp->nlp_state;
1208} 1317}
1209 1318
1210static uint32_t 1319static uint32_t
1211lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba, 1320lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1212 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1321 void *arg, uint32_t evt)
1213{ 1322{
1214 struct lpfc_iocbq *cmdiocb; 1323 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1215
1216 cmdiocb = (struct lpfc_iocbq *) arg;
1217 1324
1218 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1325 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1219 return ndlp->nlp_state; 1326 return ndlp->nlp_state;
1220} 1327}
1221 1328
@@ -1225,21 +1332,22 @@ lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1225 * NEXT STATE = PRLI_ISSUE 1332 * NEXT STATE = PRLI_ISSUE
1226 */ 1333 */
1227static uint32_t 1334static uint32_t
1228lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, 1335lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1229 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1336 void *arg, uint32_t evt)
1230{ 1337{
1231 struct lpfc_iocbq *cmdiocb; 1338 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1232 1339
1233 cmdiocb = (struct lpfc_iocbq *) arg; 1340 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1234 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1235 return ndlp->nlp_state; 1341 return ndlp->nlp_state;
1236} 1342}
1237 1343
1238static uint32_t 1344static uint32_t
1239lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba, 1345lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1240 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1346 void *arg, uint32_t evt)
1241{ 1347{
1348 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1242 struct lpfc_iocbq *cmdiocb, *rspiocb; 1349 struct lpfc_iocbq *cmdiocb, *rspiocb;
1350 struct lpfc_hba *phba = vport->phba;
1243 IOCB_t *irsp; 1351 IOCB_t *irsp;
1244 PRLI *npr; 1352 PRLI *npr;
1245 1353
@@ -1249,8 +1357,12 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1249 1357
1250 irsp = &rspiocb->iocb; 1358 irsp = &rspiocb->iocb;
1251 if (irsp->ulpStatus) { 1359 if (irsp->ulpStatus) {
1360 if ((vport->port_type == LPFC_NPIV_PORT) &&
1361 phba->cfg_vport_restrict_login) {
1362 goto out;
1363 }
1252 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1364 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1253 lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE); 1365 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1254 return ndlp->nlp_state; 1366 return ndlp->nlp_state;
1255 } 1367 }
1256 1368
@@ -1266,319 +1378,329 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1266 if (npr->Retry) 1378 if (npr->Retry)
1267 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; 1379 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1268 } 1380 }
1381 if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1382 (vport->port_type == LPFC_NPIV_PORT) &&
1383 phba->cfg_vport_restrict_login) {
1384out:
1385 spin_lock_irq(shost->host_lock);
1386 ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1387 spin_unlock_irq(shost->host_lock);
1388 lpfc_issue_els_logo(vport, ndlp, 0);
1389
1390 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1391 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1392 return ndlp->nlp_state;
1393 }
1269 1394
1270 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1395 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1271 lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE); 1396 if (ndlp->nlp_type & NLP_FCP_TARGET)
1397 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1398 else
1399 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1272 return ndlp->nlp_state; 1400 return ndlp->nlp_state;
1273} 1401}
1274 1402
1275/*! lpfc_device_rm_prli_issue 1403/*! lpfc_device_rm_prli_issue
1276 * 1404 *
1277 * \pre 1405 * \pre
1278 * \post 1406 * \post
1279 * \param phba 1407 * \param phba
1280 * \param ndlp 1408 * \param ndlp
1281 * \param arg 1409 * \param arg
1282 * \param evt 1410 * \param evt
1283 * \return uint32_t 1411 * \return uint32_t
1284 * 1412 *
1285 * \b Description: 1413 * \b Description:
1286 * This routine is envoked when we a request to remove a nport we are in the 1414 * This routine is envoked when we a request to remove a nport we are in the
1287 * process of PRLIing. We should software abort outstanding prli, unreg 1415 * process of PRLIing. We should software abort outstanding prli, unreg
1288 * login, send a logout. We will change node state to UNUSED_NODE, put it 1416 * login, send a logout. We will change node state to UNUSED_NODE, put it
1289 * on plogi list so it can be freed when LOGO completes. 1417 * on plogi list so it can be freed when LOGO completes.
1290 * 1418 *
1291 */ 1419 */
1420
1292static uint32_t 1421static uint32_t
1293lpfc_device_rm_prli_issue(struct lpfc_hba * phba, 1422lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1294 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1423 void *arg, uint32_t evt)
1295{ 1424{
1296 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1425 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1426
1427 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1428 spin_lock_irq(shost->host_lock);
1297 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1429 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1430 spin_unlock_irq(shost->host_lock);
1298 return ndlp->nlp_state; 1431 return ndlp->nlp_state;
1299 } 1432 } else {
1300 else {
1301 /* software abort outstanding PLOGI */ 1433 /* software abort outstanding PLOGI */
1302 lpfc_els_abort(phba, ndlp); 1434 lpfc_els_abort(vport->phba, ndlp);
1303 1435
1304 lpfc_drop_node(phba, ndlp); 1436 lpfc_drop_node(vport, ndlp);
1305 return NLP_STE_FREED_NODE; 1437 return NLP_STE_FREED_NODE;
1306 } 1438 }
1307} 1439}
1308 1440
1309 1441
1310/*! lpfc_device_recov_prli_issue 1442/*! lpfc_device_recov_prli_issue
1311 * 1443 *
1312 * \pre 1444 * \pre
1313 * \post 1445 * \post
1314 * \param phba 1446 * \param phba
1315 * \param ndlp 1447 * \param ndlp
1316 * \param arg 1448 * \param arg
1317 * \param evt 1449 * \param evt
1318 * \return uint32_t 1450 * \return uint32_t
1319 * 1451 *
1320 * \b Description: 1452 * \b Description:
1321 * The routine is envoked when the state of a device is unknown, like 1453 * The routine is envoked when the state of a device is unknown, like
1322 * during a link down. We should remove the nodelist entry from the 1454 * during a link down. We should remove the nodelist entry from the
1323 * unmapped list, issue a UNREG_LOGIN, do a software abort of the 1455 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1324 * outstanding PRLI command, then free the node entry. 1456 * outstanding PRLI command, then free the node entry.
1325 */ 1457 */
1326static uint32_t 1458static uint32_t
1327lpfc_device_recov_prli_issue(struct lpfc_hba * phba, 1459lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1328 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1460 struct lpfc_nodelist *ndlp,
1461 void *arg,
1462 uint32_t evt)
1329{ 1463{
1464 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1465 struct lpfc_hba *phba = vport->phba;
1466
1467 /* Don't do anything that will mess up processing of the
1468 * previous RSCN.
1469 */
1470 if (vport->fc_flag & FC_RSCN_DEFERRED)
1471 return ndlp->nlp_state;
1472
1330 /* software abort outstanding PRLI */ 1473 /* software abort outstanding PRLI */
1331 lpfc_els_abort(phba, ndlp); 1474 lpfc_els_abort(phba, ndlp);
1332 1475
1333 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; 1476 ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1334 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1477 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1335 spin_lock_irq(phba->host->host_lock); 1478 spin_lock_irq(shost->host_lock);
1336 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1479 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1337 spin_unlock_irq(phba->host->host_lock); 1480 spin_unlock_irq(shost->host_lock);
1481 lpfc_disc_set_adisc(vport, ndlp);
1338 return ndlp->nlp_state; 1482 return ndlp->nlp_state;
1339} 1483}
1340 1484
1341static uint32_t 1485static uint32_t
1342lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba, 1486lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1343 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1487 void *arg, uint32_t evt)
1344{ 1488{
1345 struct lpfc_iocbq *cmdiocb; 1489 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1346
1347 cmdiocb = (struct lpfc_iocbq *) arg;
1348 1490
1349 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1491 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1350 return ndlp->nlp_state; 1492 return ndlp->nlp_state;
1351} 1493}
1352 1494
1353static uint32_t 1495static uint32_t
1354lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba, 1496lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1355 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1497 void *arg, uint32_t evt)
1356{ 1498{
1357 struct lpfc_iocbq *cmdiocb; 1499 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1358 1500
1359 cmdiocb = (struct lpfc_iocbq *) arg; 1501 lpfc_rcv_prli(vport, ndlp, cmdiocb);
1360 1502 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1361 lpfc_rcv_prli(phba, ndlp, cmdiocb);
1362 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1363 return ndlp->nlp_state; 1503 return ndlp->nlp_state;
1364} 1504}
1365 1505
1366static uint32_t 1506static uint32_t
1367lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, 1507lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1368 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1508 void *arg, uint32_t evt)
1369{ 1509{
1370 struct lpfc_iocbq *cmdiocb; 1510 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1371 1511
1372 cmdiocb = (struct lpfc_iocbq *) arg; 1512 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1373
1374 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1375 return ndlp->nlp_state; 1513 return ndlp->nlp_state;
1376} 1514}
1377 1515
1378static uint32_t 1516static uint32_t
1379lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba, 1517lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1380 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1518 void *arg, uint32_t evt)
1381{ 1519{
1382 struct lpfc_iocbq *cmdiocb; 1520 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1383 1521
1384 cmdiocb = (struct lpfc_iocbq *) arg; 1522 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1385
1386 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1387 return ndlp->nlp_state; 1523 return ndlp->nlp_state;
1388} 1524}
1389 1525
1390static uint32_t 1526static uint32_t
1391lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, 1527lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1392 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1528 void *arg, uint32_t evt)
1393{ 1529{
1394 struct lpfc_iocbq *cmdiocb; 1530 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1395
1396 cmdiocb = (struct lpfc_iocbq *) arg;
1397 1531
1398 lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); 1532 lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1399 return ndlp->nlp_state; 1533 return ndlp->nlp_state;
1400} 1534}
1401 1535
1402static uint32_t 1536static uint32_t
1403lpfc_device_recov_unmap_node(struct lpfc_hba * phba, 1537lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1404 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1538 struct lpfc_nodelist *ndlp,
1539 void *arg,
1540 uint32_t evt)
1405{ 1541{
1542 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1543
1406 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; 1544 ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1407 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1545 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1546 spin_lock_irq(shost->host_lock);
1408 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1547 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1409 lpfc_disc_set_adisc(phba, ndlp); 1548 spin_unlock_irq(shost->host_lock);
1549 lpfc_disc_set_adisc(vport, ndlp);
1410 1550
1411 return ndlp->nlp_state; 1551 return ndlp->nlp_state;
1412} 1552}
1413 1553
1414static uint32_t 1554static uint32_t
1415lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba, 1555lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1416 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1556 void *arg, uint32_t evt)
1417{ 1557{
1418 struct lpfc_iocbq *cmdiocb; 1558 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1419
1420 cmdiocb = (struct lpfc_iocbq *) arg;
1421 1559
1422 lpfc_rcv_plogi(phba, ndlp, cmdiocb); 1560 lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1423 return ndlp->nlp_state; 1561 return ndlp->nlp_state;
1424} 1562}
1425 1563
1426static uint32_t 1564static uint32_t
1427lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba, 1565lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1428 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1566 void *arg, uint32_t evt)
1429{ 1567{
1430 struct lpfc_iocbq *cmdiocb; 1568 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1431 1569
1432 cmdiocb = (struct lpfc_iocbq *) arg; 1570 lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1433
1434 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1435 return ndlp->nlp_state; 1571 return ndlp->nlp_state;
1436} 1572}
1437 1573
1438static uint32_t 1574static uint32_t
1439lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, 1575lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1440 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1576 void *arg, uint32_t evt)
1441{ 1577{
1442 struct lpfc_iocbq *cmdiocb; 1578 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1443 1579
1444 cmdiocb = (struct lpfc_iocbq *) arg; 1580 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1445
1446 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1447 return ndlp->nlp_state; 1581 return ndlp->nlp_state;
1448} 1582}
1449 1583
1450static uint32_t 1584static uint32_t
1451lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba, 1585lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1452 struct lpfc_nodelist * ndlp, void *arg, 1586 struct lpfc_nodelist *ndlp,
1453 uint32_t evt) 1587 void *arg, uint32_t evt)
1454{ 1588{
1455 struct lpfc_iocbq *cmdiocb; 1589 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1456
1457 cmdiocb = (struct lpfc_iocbq *) arg;
1458 1590
1459 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1591 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1460 return ndlp->nlp_state; 1592 return ndlp->nlp_state;
1461} 1593}
1462 1594
1463static uint32_t 1595static uint32_t
1464lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, 1596lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1465 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1597 void *arg, uint32_t evt)
1466{ 1598{
1467 struct lpfc_iocbq *cmdiocb; 1599 struct lpfc_hba *phba = vport->phba;
1468 1600 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1469 cmdiocb = (struct lpfc_iocbq *) arg;
1470 1601
1471 /* flush the target */ 1602 /* flush the target */
1472 spin_lock_irq(phba->host->host_lock);
1473 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 1603 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1474 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT); 1604 ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1475 spin_unlock_irq(phba->host->host_lock);
1476 1605
1477 /* Treat like rcv logo */ 1606 /* Treat like rcv logo */
1478 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); 1607 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1479 return ndlp->nlp_state; 1608 return ndlp->nlp_state;
1480} 1609}
1481 1610
1482static uint32_t 1611static uint32_t
1483lpfc_device_recov_mapped_node(struct lpfc_hba * phba, 1612lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1484 struct lpfc_nodelist * ndlp, void *arg, 1613 struct lpfc_nodelist *ndlp,
1485 uint32_t evt) 1614 void *arg,
1615 uint32_t evt)
1486{ 1616{
1617 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1618
1487 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; 1619 ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1488 lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE); 1620 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1489 spin_lock_irq(phba->host->host_lock); 1621 spin_lock_irq(shost->host_lock);
1490 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1622 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1491 spin_unlock_irq(phba->host->host_lock); 1623 spin_unlock_irq(shost->host_lock);
1492 lpfc_disc_set_adisc(phba, ndlp); 1624 lpfc_disc_set_adisc(vport, ndlp);
1493 return ndlp->nlp_state; 1625 return ndlp->nlp_state;
1494} 1626}
1495 1627
1496static uint32_t 1628static uint32_t
1497lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba, 1629lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1498 struct lpfc_nodelist * ndlp, void *arg, 1630 void *arg, uint32_t evt)
1499 uint32_t evt)
1500{ 1631{
1501 struct lpfc_iocbq *cmdiocb; 1632 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1502 1633 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1503 cmdiocb = (struct lpfc_iocbq *) arg;
1504 1634
1505 /* Ignore PLOGI if we have an outstanding LOGO */ 1635 /* Ignore PLOGI if we have an outstanding LOGO */
1506 if (ndlp->nlp_flag & NLP_LOGO_SND) { 1636 if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
1507 return ndlp->nlp_state; 1637 return ndlp->nlp_state;
1508 } 1638 }
1509 1639
1510 if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) { 1640 if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1511 spin_lock_irq(phba->host->host_lock); 1641 spin_lock_irq(shost->host_lock);
1512 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1642 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1513 spin_unlock_irq(phba->host->host_lock); 1643 spin_unlock_irq(shost->host_lock);
1514 return ndlp->nlp_state; 1644 return ndlp->nlp_state;
1515 } 1645 }
1516 1646
1517 /* send PLOGI immediately, move to PLOGI issue state */ 1647 /* send PLOGI immediately, move to PLOGI issue state */
1518 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1648 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1519 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1649 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1520 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1650 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1521 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1651 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1522 } 1652 }
1523 1653
1524 return ndlp->nlp_state; 1654 return ndlp->nlp_state;
1525} 1655}
1526 1656
1527static uint32_t 1657static uint32_t
1528lpfc_rcv_prli_npr_node(struct lpfc_hba * phba, 1658lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1529 struct lpfc_nodelist * ndlp, void *arg, 1659 void *arg, uint32_t evt)
1530 uint32_t evt)
1531{ 1660{
1532 struct lpfc_iocbq *cmdiocb; 1661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1533 struct ls_rjt stat; 1662 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1534 1663 struct ls_rjt stat;
1535 cmdiocb = (struct lpfc_iocbq *) arg;
1536 1664
1537 memset(&stat, 0, sizeof (struct ls_rjt)); 1665 memset(&stat, 0, sizeof (struct ls_rjt));
1538 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 1666 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1539 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 1667 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1540 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp); 1668 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1541 1669
1542 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1670 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1543 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1671 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1544 spin_lock_irq(phba->host->host_lock); 1672 spin_lock_irq(shost->host_lock);
1545 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1673 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1546 spin_unlock_irq(phba->host->host_lock);
1547 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1674 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1548 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 1675 spin_unlock_irq(shost->host_lock);
1549 lpfc_issue_els_adisc(phba, ndlp, 0); 1676 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1677 lpfc_issue_els_adisc(vport, ndlp, 0);
1550 } else { 1678 } else {
1551 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1679 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1552 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1680 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1553 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1681 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1554 } 1682 }
1555 } 1683 }
1556 return ndlp->nlp_state; 1684 return ndlp->nlp_state;
1557} 1685}
1558 1686
1559static uint32_t 1687static uint32_t
1560lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, 1688lpfc_rcv_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1561 struct lpfc_nodelist * ndlp, void *arg, 1689 void *arg, uint32_t evt)
1562 uint32_t evt)
1563{ 1690{
1564 struct lpfc_iocbq *cmdiocb; 1691 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1565 1692
1566 cmdiocb = (struct lpfc_iocbq *) arg; 1693 lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1567
1568 lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1569 return ndlp->nlp_state; 1694 return ndlp->nlp_state;
1570} 1695}
1571 1696
1572static uint32_t 1697static uint32_t
1573lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba, 1698lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1574 struct lpfc_nodelist * ndlp, void *arg, 1699 void *arg, uint32_t evt)
1575 uint32_t evt)
1576{ 1700{
1577 struct lpfc_iocbq *cmdiocb; 1701 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1578
1579 cmdiocb = (struct lpfc_iocbq *) arg;
1580 1702
1581 lpfc_rcv_padisc(phba, ndlp, cmdiocb); 1703 lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1582 1704
1583 /* 1705 /*
1584 * Do not start discovery if discovery is about to start 1706 * Do not start discovery if discovery is about to start
@@ -1586,53 +1708,52 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1586 * here will affect the counting of discovery threads. 1708 * here will affect the counting of discovery threads.
1587 */ 1709 */
1588 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && 1710 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1589 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)){ 1711 !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1590 if (ndlp->nlp_flag & NLP_NPR_ADISC) { 1712 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1713 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1591 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1714 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1592 lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE); 1715 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1593 lpfc_issue_els_adisc(phba, ndlp, 0); 1716 lpfc_issue_els_adisc(vport, ndlp, 0);
1594 } else { 1717 } else {
1595 ndlp->nlp_prev_state = NLP_STE_NPR_NODE; 1718 ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1596 lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE); 1719 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1597 lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0); 1720 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1598 } 1721 }
1599 } 1722 }
1600 return ndlp->nlp_state; 1723 return ndlp->nlp_state;
1601} 1724}
1602 1725
1603static uint32_t 1726static uint32_t
1604lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba, 1727lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1605 struct lpfc_nodelist * ndlp, void *arg, 1728 void *arg, uint32_t evt)
1606 uint32_t evt)
1607{ 1729{
1608 struct lpfc_iocbq *cmdiocb; 1730 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1609 1731 struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1610 cmdiocb = (struct lpfc_iocbq *) arg;
1611 1732
1612 spin_lock_irq(phba->host->host_lock); 1733 spin_lock_irq(shost->host_lock);
1613 ndlp->nlp_flag |= NLP_LOGO_ACC; 1734 ndlp->nlp_flag |= NLP_LOGO_ACC;
1614 spin_unlock_irq(phba->host->host_lock); 1735 spin_unlock_irq(shost->host_lock);
1615 1736
1616 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); 1737 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1617 1738
1618 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 1739 if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
1619 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); 1740 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1620 spin_lock_irq(phba->host->host_lock); 1741 spin_lock_irq(shost->host_lock);
1621 ndlp->nlp_flag |= NLP_DELAY_TMO; 1742 ndlp->nlp_flag |= NLP_DELAY_TMO;
1622 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1743 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1623 spin_unlock_irq(phba->host->host_lock); 1744 spin_unlock_irq(shost->host_lock);
1624 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; 1745 ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1625 } else { 1746 } else {
1626 spin_lock_irq(phba->host->host_lock); 1747 spin_lock_irq(shost->host_lock);
1627 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 1748 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1628 spin_unlock_irq(phba->host->host_lock); 1749 spin_unlock_irq(shost->host_lock);
1629 } 1750 }
1630 return ndlp->nlp_state; 1751 return ndlp->nlp_state;
1631} 1752}
1632 1753
1633static uint32_t 1754static uint32_t
1634lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, 1755lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1635 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1756 void *arg, uint32_t evt)
1636{ 1757{
1637 struct lpfc_iocbq *cmdiocb, *rspiocb; 1758 struct lpfc_iocbq *cmdiocb, *rspiocb;
1638 IOCB_t *irsp; 1759 IOCB_t *irsp;
@@ -1642,15 +1763,15 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1642 1763
1643 irsp = &rspiocb->iocb; 1764 irsp = &rspiocb->iocb;
1644 if (irsp->ulpStatus) { 1765 if (irsp->ulpStatus) {
1645 lpfc_drop_node(phba, ndlp); 1766 lpfc_drop_node(vport, ndlp);
1646 return NLP_STE_FREED_NODE; 1767 return NLP_STE_FREED_NODE;
1647 } 1768 }
1648 return ndlp->nlp_state; 1769 return ndlp->nlp_state;
1649} 1770}
1650 1771
1651static uint32_t 1772static uint32_t
1652lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, 1773lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1653 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1774 void *arg, uint32_t evt)
1654{ 1775{
1655 struct lpfc_iocbq *cmdiocb, *rspiocb; 1776 struct lpfc_iocbq *cmdiocb, *rspiocb;
1656 IOCB_t *irsp; 1777 IOCB_t *irsp;
@@ -1660,25 +1781,24 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1660 1781
1661 irsp = &rspiocb->iocb; 1782 irsp = &rspiocb->iocb;
1662 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1783 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1663 lpfc_drop_node(phba, ndlp); 1784 lpfc_drop_node(vport, ndlp);
1664 return NLP_STE_FREED_NODE; 1785 return NLP_STE_FREED_NODE;
1665 } 1786 }
1666 return ndlp->nlp_state; 1787 return ndlp->nlp_state;
1667} 1788}
1668 1789
1669static uint32_t 1790static uint32_t
1670lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba, 1791lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1671 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 1792 void *arg, uint32_t evt)
1672{ 1793{
1673 lpfc_unreg_rpi(phba, ndlp); 1794 lpfc_unreg_rpi(vport, ndlp);
1674 /* This routine does nothing, just return the current state */ 1795 /* This routine does nothing, just return the current state */
1675 return ndlp->nlp_state; 1796 return ndlp->nlp_state;
1676} 1797}
1677 1798
1678static uint32_t 1799static uint32_t
1679lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, 1800lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1680 struct lpfc_nodelist * ndlp, void *arg, 1801 void *arg, uint32_t evt)
1681 uint32_t evt)
1682{ 1802{
1683 struct lpfc_iocbq *cmdiocb, *rspiocb; 1803 struct lpfc_iocbq *cmdiocb, *rspiocb;
1684 IOCB_t *irsp; 1804 IOCB_t *irsp;
@@ -1688,28 +1808,25 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1688 1808
1689 irsp = &rspiocb->iocb; 1809 irsp = &rspiocb->iocb;
1690 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { 1810 if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1691 lpfc_drop_node(phba, ndlp); 1811 lpfc_drop_node(vport, ndlp);
1692 return NLP_STE_FREED_NODE; 1812 return NLP_STE_FREED_NODE;
1693 } 1813 }
1694 return ndlp->nlp_state; 1814 return ndlp->nlp_state;
1695} 1815}
1696 1816
1697static uint32_t 1817static uint32_t
1698lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, 1818lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1699 struct lpfc_nodelist * ndlp, void *arg, 1819 struct lpfc_nodelist *ndlp,
1700 uint32_t evt) 1820 void *arg, uint32_t evt)
1701{ 1821{
1702 LPFC_MBOXQ_t *pmb; 1822 LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1703 MAILBOX_t *mb; 1823 MAILBOX_t *mb = &pmb->mb;
1704
1705 pmb = (LPFC_MBOXQ_t *) arg;
1706 mb = &pmb->mb;
1707 1824
1708 if (!mb->mbxStatus) 1825 if (!mb->mbxStatus)
1709 ndlp->nlp_rpi = mb->un.varWords[0]; 1826 ndlp->nlp_rpi = mb->un.varWords[0];
1710 else { 1827 else {
1711 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { 1828 if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1712 lpfc_drop_node(phba, ndlp); 1829 lpfc_drop_node(vport, ndlp);
1713 return NLP_STE_FREED_NODE; 1830 return NLP_STE_FREED_NODE;
1714 } 1831 }
1715 } 1832 }
@@ -1717,28 +1834,38 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1717} 1834}
1718 1835
1719static uint32_t 1836static uint32_t
1720lpfc_device_rm_npr_node(struct lpfc_hba * phba, 1837lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1721 struct lpfc_nodelist * ndlp, void *arg, 1838 void *arg, uint32_t evt)
1722 uint32_t evt)
1723{ 1839{
1840 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1841
1724 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { 1842 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1843 spin_lock_irq(shost->host_lock);
1725 ndlp->nlp_flag |= NLP_NODEV_REMOVE; 1844 ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1845 spin_unlock_irq(shost->host_lock);
1726 return ndlp->nlp_state; 1846 return ndlp->nlp_state;
1727 } 1847 }
1728 lpfc_drop_node(phba, ndlp); 1848 lpfc_drop_node(vport, ndlp);
1729 return NLP_STE_FREED_NODE; 1849 return NLP_STE_FREED_NODE;
1730} 1850}
1731 1851
1732static uint32_t 1852static uint32_t
1733lpfc_device_recov_npr_node(struct lpfc_hba * phba, 1853lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1734 struct lpfc_nodelist * ndlp, void *arg, 1854 void *arg, uint32_t evt)
1735 uint32_t evt)
1736{ 1855{
1737 spin_lock_irq(phba->host->host_lock); 1856 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1857
1858 /* Don't do anything that will mess up processing of the
1859 * previous RSCN.
1860 */
1861 if (vport->fc_flag & FC_RSCN_DEFERRED)
1862 return ndlp->nlp_state;
1863
1864 spin_lock_irq(shost->host_lock);
1738 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); 1865 ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1739 spin_unlock_irq(phba->host->host_lock); 1866 spin_unlock_irq(shost->host_lock);
1740 if (ndlp->nlp_flag & NLP_DELAY_TMO) { 1867 if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1741 lpfc_cancel_retry_delay_tmo(phba, ndlp); 1868 lpfc_cancel_retry_delay_tmo(vport, ndlp);
1742 } 1869 }
1743 return ndlp->nlp_state; 1870 return ndlp->nlp_state;
1744} 1871}
@@ -1801,7 +1928,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1801 */ 1928 */
1802 1929
1803static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT]) 1930static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1804 (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = { 1931 (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
1805 /* Action routine Event Current State */ 1932 /* Action routine Event Current State */
1806 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */ 1933 lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
1807 lpfc_rcv_els_unused_node, /* RCV_PRLI */ 1934 lpfc_rcv_els_unused_node, /* RCV_PRLI */
@@ -1818,7 +1945,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1818 lpfc_disc_illegal, /* DEVICE_RECOVERY */ 1945 lpfc_disc_illegal, /* DEVICE_RECOVERY */
1819 1946
1820 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */ 1947 lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
1821 lpfc_rcv_els_plogi_issue, /* RCV_PRLI */ 1948 lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
1822 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */ 1949 lpfc_rcv_logo_plogi_issue, /* RCV_LOGO */
1823 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */ 1950 lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
1824 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */ 1951 lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
@@ -1917,35 +2044,41 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1917}; 2044};
1918 2045
1919int 2046int
1920lpfc_disc_state_machine(struct lpfc_hba * phba, 2047lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1921 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) 2048 void *arg, uint32_t evt)
1922{ 2049{
2050 struct lpfc_hba *phba = vport->phba;
1923 uint32_t cur_state, rc; 2051 uint32_t cur_state, rc;
1924 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *, 2052 uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
1925 uint32_t); 2053 uint32_t);
1926 2054
1927 lpfc_nlp_get(ndlp); 2055 lpfc_nlp_get(ndlp);
1928 cur_state = ndlp->nlp_state; 2056 cur_state = ndlp->nlp_state;
1929 2057
1930 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ 2058 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1931 lpfc_printf_log(phba, 2059 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1932 KERN_INFO, 2060 "%d (%d):0211 DSM in event x%x on NPort x%x in "
1933 LOG_DISCOVERY, 2061 "state %d Data: x%x\n",
1934 "%d:0211 DSM in event x%x on NPort x%x in state %d " 2062 phba->brd_no, vport->vpi,
1935 "Data: x%x\n",
1936 phba->brd_no,
1937 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag); 2063 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1938 2064
2065 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2066 "DSM in: evt:%d ste:%d did:x%x",
2067 evt, cur_state, ndlp->nlp_DID);
2068
1939 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt]; 2069 func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
1940 rc = (func) (phba, ndlp, arg, evt); 2070 rc = (func) (vport, ndlp, arg, evt);
1941 2071
1942 /* DSM out state <rc> on NPort <nlp_DID> */ 2072 /* DSM out state <rc> on NPort <nlp_DID> */
1943 lpfc_printf_log(phba, 2073 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1944 KERN_INFO, 2074 "%d (%d):0212 DSM out state %d on NPort x%x "
1945 LOG_DISCOVERY, 2075 "Data: x%x\n",
1946 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n", 2076 phba->brd_no, vport->vpi,
1947 phba->brd_no, 2077 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1948 rc, ndlp->nlp_DID, ndlp->nlp_flag); 2078
2079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2080 "DSM out: ste:%d did:x%x flg:x%x",
2081 rc, ndlp->nlp_DID, ndlp->nlp_flag);
1949 2082
1950 lpfc_nlp_put(ndlp); 2083 lpfc_nlp_put(ndlp);
1951 2084
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9a12d05e99e4..8f45bbc42126 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -37,10 +37,158 @@
37#include "lpfc.h" 37#include "lpfc.h"
38#include "lpfc_logmsg.h" 38#include "lpfc_logmsg.h"
39#include "lpfc_crtn.h" 39#include "lpfc_crtn.h"
40#include "lpfc_vport.h"
40 41
41#define LPFC_RESET_WAIT 2 42#define LPFC_RESET_WAIT 2
42#define LPFC_ABORT_WAIT 2 43#define LPFC_ABORT_WAIT 2
43 44
45/*
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
48 */
49void
50lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51{
52 unsigned long flags;
53
54 spin_lock_irqsave(&phba->hbalock, flags);
55 atomic_inc(&phba->num_rsrc_err);
56 phba->last_rsrc_error_time = jiffies;
57
58 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
59 spin_unlock_irqrestore(&phba->hbalock, flags);
60 return;
61 }
62
63 phba->last_ramp_down_time = jiffies;
64
65 spin_unlock_irqrestore(&phba->hbalock, flags);
66
67 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
68 if ((phba->pport->work_port_events &
69 WORKER_RAMP_DOWN_QUEUE) == 0) {
70 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
71 }
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74 spin_lock_irqsave(&phba->hbalock, flags);
75 if (phba->work_wait)
76 wake_up(phba->work_wait);
77 spin_unlock_irqrestore(&phba->hbalock, flags);
78
79 return;
80}
81
82/*
83 * This function is called with no lock held when there is a successful
84 * SCSI command completion.
85 */
86static inline void
87lpfc_rampup_queue_depth(struct lpfc_hba *phba,
88 struct scsi_device *sdev)
89{
90 unsigned long flags;
91 atomic_inc(&phba->num_cmd_success);
92
93 if (phba->cfg_lun_queue_depth <= sdev->queue_depth)
94 return;
95
96 spin_lock_irqsave(&phba->hbalock, flags);
97 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
98 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
99 spin_unlock_irqrestore(&phba->hbalock, flags);
100 return;
101 }
102
103 phba->last_ramp_up_time = jiffies;
104 spin_unlock_irqrestore(&phba->hbalock, flags);
105
106 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
107 if ((phba->pport->work_port_events &
108 WORKER_RAMP_UP_QUEUE) == 0) {
109 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
110 }
111 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
112
113 spin_lock_irqsave(&phba->hbalock, flags);
114 if (phba->work_wait)
115 wake_up(phba->work_wait);
116 spin_unlock_irqrestore(&phba->hbalock, flags);
117}
118
119void
120lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
121{
122 struct lpfc_vport *vport;
123 struct Scsi_Host *host;
124 struct scsi_device *sdev;
125 unsigned long new_queue_depth;
126 unsigned long num_rsrc_err, num_cmd_success;
127
128 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
129 num_cmd_success = atomic_read(&phba->num_cmd_success);
130
131 spin_lock_irq(&phba->hbalock);
132 list_for_each_entry(vport, &phba->port_list, listentry) {
133 host = lpfc_shost_from_vport(vport);
134 if (!scsi_host_get(host))
135 continue;
136
137 spin_unlock_irq(&phba->hbalock);
138
139 shost_for_each_device(sdev, host) {
140 new_queue_depth = sdev->queue_depth * num_rsrc_err /
141 (num_rsrc_err + num_cmd_success);
142 if (!new_queue_depth)
143 new_queue_depth = sdev->queue_depth - 1;
144 else
145 new_queue_depth =
146 sdev->queue_depth - new_queue_depth;
147
148 if (sdev->ordered_tags)
149 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
150 new_queue_depth);
151 else
152 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
153 new_queue_depth);
154 }
155 spin_lock_irq(&phba->hbalock);
156 scsi_host_put(host);
157 }
158 spin_unlock_irq(&phba->hbalock);
159 atomic_set(&phba->num_rsrc_err, 0);
160 atomic_set(&phba->num_cmd_success, 0);
161}
162
163void
164lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
165{
166 struct lpfc_vport *vport;
167 struct Scsi_Host *host;
168 struct scsi_device *sdev;
169
170 spin_lock_irq(&phba->hbalock);
171 list_for_each_entry(vport, &phba->port_list, listentry) {
172 host = lpfc_shost_from_vport(vport);
173 if (!scsi_host_get(host))
174 continue;
175
176 spin_unlock_irq(&phba->hbalock);
177 shost_for_each_device(sdev, host) {
178 if (sdev->ordered_tags)
179 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
180 sdev->queue_depth+1);
181 else
182 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
183 sdev->queue_depth+1);
184 }
185 spin_lock_irq(&phba->hbalock);
186 scsi_host_put(host);
187 }
188 spin_unlock_irq(&phba->hbalock);
189 atomic_set(&phba->num_rsrc_err, 0);
190 atomic_set(&phba->num_cmd_success, 0);
191}
44 192
45/* 193/*
46 * This routine allocates a scsi buffer, which contains all the necessary 194 * This routine allocates a scsi buffer, which contains all the necessary
@@ -51,8 +199,9 @@
51 * and the BPL BDE is setup in the IOCB. 199 * and the BPL BDE is setup in the IOCB.
52 */ 200 */
53static struct lpfc_scsi_buf * 201static struct lpfc_scsi_buf *
54lpfc_new_scsi_buf(struct lpfc_hba * phba) 202lpfc_new_scsi_buf(struct lpfc_vport *vport)
55{ 203{
204 struct lpfc_hba *phba = vport->phba;
56 struct lpfc_scsi_buf *psb; 205 struct lpfc_scsi_buf *psb;
57 struct ulp_bde64 *bpl; 206 struct ulp_bde64 *bpl;
58 IOCB_t *iocb; 207 IOCB_t *iocb;
@@ -63,7 +212,6 @@ lpfc_new_scsi_buf(struct lpfc_hba * phba)
63 if (!psb) 212 if (!psb)
64 return NULL; 213 return NULL;
65 memset(psb, 0, sizeof (struct lpfc_scsi_buf)); 214 memset(psb, 0, sizeof (struct lpfc_scsi_buf));
66 psb->scsi_hba = phba;
67 215
68 /* 216 /*
69 * Get memory from the pci pool to map the virt space to pci bus space 217 * Get memory from the pci pool to map the virt space to pci bus space
@@ -155,7 +303,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
155} 303}
156 304
157static void 305static void
158lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) 306lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
159{ 307{
160 unsigned long iflag = 0; 308 unsigned long iflag = 0;
161 309
@@ -166,7 +314,7 @@ lpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
166} 314}
167 315
168static int 316static int
169lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd) 317lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
170{ 318{
171 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 319 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
172 struct scatterlist *sgel = NULL; 320 struct scatterlist *sgel = NULL;
@@ -175,8 +323,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
175 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 323 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
176 dma_addr_t physaddr; 324 dma_addr_t physaddr;
177 uint32_t i, num_bde = 0; 325 uint32_t i, num_bde = 0;
178 int datadir = scsi_cmnd->sc_data_direction; 326 int nseg, datadir = scsi_cmnd->sc_data_direction;
179 int dma_error;
180 327
181 /* 328 /*
182 * There are three possibilities here - use scatter-gather segment, use 329 * There are three possibilities here - use scatter-gather segment, use
@@ -185,26 +332,26 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
185 * data bde entry. 332 * data bde entry.
186 */ 333 */
187 bpl += 2; 334 bpl += 2;
188 if (scsi_cmnd->use_sg) { 335 if (scsi_sg_count(scsi_cmnd)) {
189 /* 336 /*
190 * The driver stores the segment count returned from pci_map_sg 337 * The driver stores the segment count returned from pci_map_sg
191 * because this a count of dma-mappings used to map the use_sg 338 * because this a count of dma-mappings used to map the use_sg
192 * pages. They are not guaranteed to be the same for those 339 * pages. They are not guaranteed to be the same for those
193 * architectures that implement an IOMMU. 340 * architectures that implement an IOMMU.
194 */ 341 */
195 sgel = (struct scatterlist *)scsi_cmnd->request_buffer; 342
196 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, 343 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
197 scsi_cmnd->use_sg, datadir); 344 scsi_sg_count(scsi_cmnd), datadir);
198 if (lpfc_cmd->seg_cnt == 0) 345 if (unlikely(!nseg))
199 return 1; 346 return 1;
200 347
348 lpfc_cmd->seg_cnt = nseg;
201 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 349 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
202 printk(KERN_ERR "%s: Too many sg segments from " 350 printk(KERN_ERR "%s: Too many sg segments from "
203 "dma_map_sg. Config %d, seg_cnt %d", 351 "dma_map_sg. Config %d, seg_cnt %d",
204 __FUNCTION__, phba->cfg_sg_seg_cnt, 352 __FUNCTION__, phba->cfg_sg_seg_cnt,
205 lpfc_cmd->seg_cnt); 353 lpfc_cmd->seg_cnt);
206 dma_unmap_sg(&phba->pcidev->dev, sgel, 354 scsi_dma_unmap(scsi_cmnd);
207 lpfc_cmd->seg_cnt, datadir);
208 return 1; 355 return 1;
209 } 356 }
210 357
@@ -214,7 +361,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
214 * single scsi command. Just run through the seg_cnt and format 361 * single scsi command. Just run through the seg_cnt and format
215 * the bde's. 362 * the bde's.
216 */ 363 */
217 for (i = 0; i < lpfc_cmd->seg_cnt; i++) { 364 scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {
218 physaddr = sg_dma_address(sgel); 365 physaddr = sg_dma_address(sgel);
219 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); 366 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
220 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 367 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
@@ -225,34 +372,8 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
225 bpl->tus.f.bdeFlags = BUFF_USE_RCV; 372 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
226 bpl->tus.w = le32_to_cpu(bpl->tus.w); 373 bpl->tus.w = le32_to_cpu(bpl->tus.w);
227 bpl++; 374 bpl++;
228 sgel++;
229 num_bde++; 375 num_bde++;
230 } 376 }
231 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
232 physaddr = dma_map_single(&phba->pcidev->dev,
233 scsi_cmnd->request_buffer,
234 scsi_cmnd->request_bufflen,
235 datadir);
236 dma_error = dma_mapping_error(physaddr);
237 if (dma_error) {
238 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
239 "%d:0718 Unable to dma_map_single "
240 "request_buffer: x%x\n",
241 phba->brd_no, dma_error);
242 return 1;
243 }
244
245 lpfc_cmd->nonsg_phys = physaddr;
246 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
247 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
248 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
249 if (datadir == DMA_TO_DEVICE)
250 bpl->tus.f.bdeFlags = 0;
251 else
252 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
253 bpl->tus.w = le32_to_cpu(bpl->tus.w);
254 num_bde = 1;
255 bpl++;
256 } 377 }
257 378
258 /* 379 /*
@@ -266,7 +387,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
266 (num_bde * sizeof (struct ulp_bde64)); 387 (num_bde * sizeof (struct ulp_bde64));
267 iocb_cmd->ulpBdeCount = 1; 388 iocb_cmd->ulpBdeCount = 1;
268 iocb_cmd->ulpLe = 1; 389 iocb_cmd->ulpLe = 1;
269 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); 390 fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));
270 return 0; 391 return 0;
271} 392}
272 393
@@ -279,26 +400,20 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
279 * a request buffer, but did not request use_sg. There is a third 400 * a request buffer, but did not request use_sg. There is a third
280 * case, but it does not require resource deallocation. 401 * case, but it does not require resource deallocation.
281 */ 402 */
282 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { 403 if (psb->seg_cnt > 0)
283 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, 404 scsi_dma_unmap(psb->pCmd);
284 psb->seg_cnt, psb->pCmd->sc_data_direction);
285 } else {
286 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
287 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
288 psb->pCmd->request_bufflen,
289 psb->pCmd->sc_data_direction);
290 }
291 }
292} 405}
293 406
294static void 407static void
295lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) 408lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
409 struct lpfc_iocbq *rsp_iocb)
296{ 410{
297 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; 411 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
298 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; 412 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
299 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; 413 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
300 struct lpfc_hba *phba = lpfc_cmd->scsi_hba; 414 struct lpfc_hba *phba = vport->phba;
301 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm; 415 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
416 uint32_t vpi = vport->vpi;
302 uint32_t resp_info = fcprsp->rspStatus2; 417 uint32_t resp_info = fcprsp->rspStatus2;
303 uint32_t scsi_status = fcprsp->rspStatus3; 418 uint32_t scsi_status = fcprsp->rspStatus3;
304 uint32_t *lp; 419 uint32_t *lp;
@@ -331,9 +446,9 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
331 logit = LOG_FCP; 446 logit = LOG_FCP;
332 447
333 lpfc_printf_log(phba, KERN_WARNING, logit, 448 lpfc_printf_log(phba, KERN_WARNING, logit,
334 "%d:0730 FCP command x%x failed: x%x SNS x%x x%x " 449 "%d (%d):0730 FCP command x%x failed: x%x SNS x%x x%x "
335 "Data: x%x x%x x%x x%x x%x\n", 450 "Data: x%x x%x x%x x%x x%x\n",
336 phba->brd_no, cmnd->cmnd[0], scsi_status, 451 phba->brd_no, vpi, cmnd->cmnd[0], scsi_status,
337 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info, 452 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
338 be32_to_cpu(fcprsp->rspResId), 453 be32_to_cpu(fcprsp->rspResId),
339 be32_to_cpu(fcprsp->rspSnsLen), 454 be32_to_cpu(fcprsp->rspSnsLen),
@@ -349,15 +464,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
349 } 464 }
350 } 465 }
351 466
352 cmnd->resid = 0; 467 scsi_set_resid(cmnd, 0);
353 if (resp_info & RESID_UNDER) { 468 if (resp_info & RESID_UNDER) {
354 cmnd->resid = be32_to_cpu(fcprsp->rspResId); 469 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
355 470
356 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 471 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
357 "%d:0716 FCP Read Underrun, expected %d, " 472 "%d (%d):0716 FCP Read Underrun, expected %d, "
358 "residual %d Data: x%x x%x x%x\n", phba->brd_no, 473 "residual %d Data: x%x x%x x%x\n",
359 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, 474 phba->brd_no, vpi, be32_to_cpu(fcpcmd->fcpDl),
360 fcpi_parm, cmnd->cmnd[0], cmnd->underflow); 475 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
476 cmnd->underflow);
361 477
362 /* 478 /*
363 * If there is an under run check if under run reported by 479 * If there is an under run check if under run reported by
@@ -366,15 +482,16 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
366 */ 482 */
367 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) && 483 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
368 fcpi_parm && 484 fcpi_parm &&
369 (cmnd->resid != fcpi_parm)) { 485 (scsi_get_resid(cmnd) != fcpi_parm)) {
370 lpfc_printf_log(phba, KERN_WARNING, 486 lpfc_printf_log(phba, KERN_WARNING,
371 LOG_FCP | LOG_FCP_ERROR, 487 LOG_FCP | LOG_FCP_ERROR,
372 "%d:0735 FCP Read Check Error and Underrun " 488 "%d (%d):0735 FCP Read Check Error "
373 "Data: x%x x%x x%x x%x\n", phba->brd_no, 489 "and Underrun Data: x%x x%x x%x x%x\n",
374 be32_to_cpu(fcpcmd->fcpDl), 490 phba->brd_no, vpi,
375 cmnd->resid, 491 be32_to_cpu(fcpcmd->fcpDl),
376 fcpi_parm, cmnd->cmnd[0]); 492 scsi_get_resid(cmnd), fcpi_parm,
377 cmnd->resid = cmnd->request_bufflen; 493 cmnd->cmnd[0]);
494 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
378 host_status = DID_ERROR; 495 host_status = DID_ERROR;
379 } 496 }
380 /* 497 /*
@@ -385,22 +502,23 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
385 */ 502 */
386 if (!(resp_info & SNS_LEN_VALID) && 503 if (!(resp_info & SNS_LEN_VALID) &&
387 (scsi_status == SAM_STAT_GOOD) && 504 (scsi_status == SAM_STAT_GOOD) &&
388 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { 505 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
506 < cmnd->underflow)) {
389 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 507 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
390 "%d:0717 FCP command x%x residual " 508 "%d (%d):0717 FCP command x%x residual "
391 "underrun converted to error " 509 "underrun converted to error "
392 "Data: x%x x%x x%x\n", phba->brd_no, 510 "Data: x%x x%x x%x\n",
393 cmnd->cmnd[0], cmnd->request_bufflen, 511 phba->brd_no, vpi, cmnd->cmnd[0],
394 cmnd->resid, cmnd->underflow); 512 scsi_bufflen(cmnd),
395 513 scsi_get_resid(cmnd), cmnd->underflow);
396 host_status = DID_ERROR; 514 host_status = DID_ERROR;
397 } 515 }
398 } else if (resp_info & RESID_OVER) { 516 } else if (resp_info & RESID_OVER) {
399 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 517 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
400 "%d:0720 FCP command x%x residual " 518 "%d (%d):0720 FCP command x%x residual "
401 "overrun error. Data: x%x x%x \n", 519 "overrun error. Data: x%x x%x \n",
402 phba->brd_no, cmnd->cmnd[0], 520 phba->brd_no, vpi, cmnd->cmnd[0],
403 cmnd->request_bufflen, cmnd->resid); 521 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
404 host_status = DID_ERROR; 522 host_status = DID_ERROR;
405 523
406 /* 524 /*
@@ -410,13 +528,14 @@ lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb)
410 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && 528 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
411 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { 529 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
412 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, 530 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
413 "%d:0734 FCP Read Check Error Data: " 531 "%d (%d):0734 FCP Read Check Error Data: "
414 "x%x x%x x%x x%x\n", phba->brd_no, 532 "x%x x%x x%x x%x\n",
415 be32_to_cpu(fcpcmd->fcpDl), 533 phba->brd_no, vpi,
416 be32_to_cpu(fcprsp->rspResId), 534 be32_to_cpu(fcpcmd->fcpDl),
417 fcpi_parm, cmnd->cmnd[0]); 535 be32_to_cpu(fcprsp->rspResId),
536 fcpi_parm, cmnd->cmnd[0]);
418 host_status = DID_ERROR; 537 host_status = DID_ERROR;
419 cmnd->resid = cmnd->request_bufflen; 538 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
420 } 539 }
421 540
422 out: 541 out:
@@ -429,9 +548,13 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
429{ 548{
430 struct lpfc_scsi_buf *lpfc_cmd = 549 struct lpfc_scsi_buf *lpfc_cmd =
431 (struct lpfc_scsi_buf *) pIocbIn->context1; 550 (struct lpfc_scsi_buf *) pIocbIn->context1;
551 struct lpfc_vport *vport = pIocbIn->vport;
432 struct lpfc_rport_data *rdata = lpfc_cmd->rdata; 552 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
433 struct lpfc_nodelist *pnode = rdata->pnode; 553 struct lpfc_nodelist *pnode = rdata->pnode;
434 struct scsi_cmnd *cmd = lpfc_cmd->pCmd; 554 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
555 uint32_t vpi = (lpfc_cmd->cur_iocbq.vport
556 ? lpfc_cmd->cur_iocbq.vport->vpi
557 : 0);
435 int result; 558 int result;
436 struct scsi_device *sdev, *tmp_sdev; 559 struct scsi_device *sdev, *tmp_sdev;
437 int depth = 0; 560 int depth = 0;
@@ -447,22 +570,31 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
447 lpfc_cmd->status = IOSTAT_DEFAULT; 570 lpfc_cmd->status = IOSTAT_DEFAULT;
448 571
449 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 572 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
450 "%d:0729 FCP cmd x%x failed <%d/%d> status: " 573 "%d (%d):0729 FCP cmd x%x failed <%d/%d> "
451 "x%x result: x%x Data: x%x x%x\n", 574 "status: x%x result: x%x Data: x%x x%x\n",
452 phba->brd_no, cmd->cmnd[0], cmd->device->id, 575 phba->brd_no, vpi, cmd->cmnd[0],
453 cmd->device->lun, lpfc_cmd->status, 576 cmd->device ? cmd->device->id : 0xffff,
454 lpfc_cmd->result, pIocbOut->iocb.ulpContext, 577 cmd->device ? cmd->device->lun : 0xffff,
578 lpfc_cmd->status, lpfc_cmd->result,
579 pIocbOut->iocb.ulpContext,
455 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 580 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
456 581
457 switch (lpfc_cmd->status) { 582 switch (lpfc_cmd->status) {
458 case IOSTAT_FCP_RSP_ERROR: 583 case IOSTAT_FCP_RSP_ERROR:
459 /* Call FCP RSP handler to determine result */ 584 /* Call FCP RSP handler to determine result */
460 lpfc_handle_fcp_err(lpfc_cmd,pIocbOut); 585 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
461 break; 586 break;
462 case IOSTAT_NPORT_BSY: 587 case IOSTAT_NPORT_BSY:
463 case IOSTAT_FABRIC_BSY: 588 case IOSTAT_FABRIC_BSY:
464 cmd->result = ScsiResult(DID_BUS_BUSY, 0); 589 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
465 break; 590 break;
591 case IOSTAT_LOCAL_REJECT:
592 if (lpfc_cmd->result == RJT_UNAVAIL_PERM ||
593 lpfc_cmd->result == IOERR_NO_RESOURCES ||
594 lpfc_cmd->result == RJT_LOGIN_REQUIRED) {
595 cmd->result = ScsiResult(DID_REQUEUE, 0);
596 break;
597 } /* else: fall through */
466 default: 598 default:
467 cmd->result = ScsiResult(DID_ERROR, 0); 599 cmd->result = ScsiResult(DID_ERROR, 0);
468 break; 600 break;
@@ -479,11 +611,12 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
479 uint32_t *lp = (uint32_t *)cmd->sense_buffer; 611 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
480 612
481 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 613 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
482 "%d:0710 Iodone <%d/%d> cmd %p, error x%x " 614 "%d (%d):0710 Iodone <%d/%d> cmd %p, error "
483 "SNS x%x x%x Data: x%x x%x\n", 615 "x%x SNS x%x x%x Data: x%x x%x\n",
484 phba->brd_no, cmd->device->id, 616 phba->brd_no, vpi, cmd->device->id,
485 cmd->device->lun, cmd, cmd->result, 617 cmd->device->lun, cmd, cmd->result,
486 *lp, *(lp + 3), cmd->retries, cmd->resid); 618 *lp, *(lp + 3), cmd->retries,
619 scsi_get_resid(cmd));
487 } 620 }
488 621
489 result = cmd->result; 622 result = cmd->result;
@@ -496,6 +629,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
496 return; 629 return;
497 } 630 }
498 631
632
633 if (!result)
634 lpfc_rampup_queue_depth(phba, sdev);
635
499 if (!result && pnode != NULL && 636 if (!result && pnode != NULL &&
500 ((jiffies - pnode->last_ramp_up_time) > 637 ((jiffies - pnode->last_ramp_up_time) >
501 LPFC_Q_RAMP_UP_INTERVAL * HZ) && 638 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
@@ -534,7 +671,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
534 tmp_sdev->queue_depth - 1); 671 tmp_sdev->queue_depth - 1);
535 } 672 }
536 /* 673 /*
537 * The queue depth cannot be lowered any more. 674 * The queue depth cannot be lowered any more.
538 * Modify the returned error code to store 675 * Modify the returned error code to store
539 * the final depth value set by 676 * the final depth value set by
540 * scsi_track_queue_full. 677 * scsi_track_queue_full.
@@ -544,8 +681,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
544 681
545 if (depth) { 682 if (depth) {
546 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 683 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
547 "%d:0711 detected queue full - lun queue depth " 684 "%d (%d):0711 detected queue full - "
548 " adjusted to %d.\n", phba->brd_no, depth); 685 "lun queue depth adjusted to %d.\n",
686 phba->brd_no, vpi, depth);
549 } 687 }
550 } 688 }
551 689
@@ -553,9 +691,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
553} 691}
554 692
555static void 693static void
556lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd, 694lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
557 struct lpfc_nodelist *pnode) 695 struct lpfc_nodelist *pnode)
558{ 696{
697 struct lpfc_hba *phba = vport->phba;
559 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 698 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
560 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; 699 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
561 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; 700 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
@@ -592,7 +731,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
592 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first 731 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
593 * data bde entry. 732 * data bde entry.
594 */ 733 */
595 if (scsi_cmnd->use_sg) { 734 if (scsi_sg_count(scsi_cmnd)) {
596 if (datadir == DMA_TO_DEVICE) { 735 if (datadir == DMA_TO_DEVICE) {
597 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; 736 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
598 iocb_cmd->un.fcpi.fcpi_parm = 0; 737 iocb_cmd->un.fcpi.fcpi_parm = 0;
@@ -602,23 +741,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
602 } else { 741 } else {
603 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; 742 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
604 iocb_cmd->ulpPU = PARM_READ_CHECK; 743 iocb_cmd->ulpPU = PARM_READ_CHECK;
605 iocb_cmd->un.fcpi.fcpi_parm = 744 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
606 scsi_cmnd->request_bufflen;
607 fcp_cmnd->fcpCntl3 = READ_DATA;
608 phba->fc4InputRequests++;
609 }
610 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
611 if (datadir == DMA_TO_DEVICE) {
612 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
613 iocb_cmd->un.fcpi.fcpi_parm = 0;
614 iocb_cmd->ulpPU = 0;
615 fcp_cmnd->fcpCntl3 = WRITE_DATA;
616 phba->fc4OutputRequests++;
617 } else {
618 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
619 iocb_cmd->ulpPU = PARM_READ_CHECK;
620 iocb_cmd->un.fcpi.fcpi_parm =
621 scsi_cmnd->request_bufflen;
622 fcp_cmnd->fcpCntl3 = READ_DATA; 745 fcp_cmnd->fcpCntl3 = READ_DATA;
623 phba->fc4InputRequests++; 746 phba->fc4InputRequests++;
624 } 747 }
@@ -642,15 +765,15 @@ lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
642 piocbq->context1 = lpfc_cmd; 765 piocbq->context1 = lpfc_cmd;
643 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; 766 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
644 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout; 767 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
768 piocbq->vport = vport;
645} 769}
646 770
647static int 771static int
648lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, 772lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
649 struct lpfc_scsi_buf *lpfc_cmd, 773 struct lpfc_scsi_buf *lpfc_cmd,
650 unsigned int lun, 774 unsigned int lun,
651 uint8_t task_mgmt_cmd) 775 uint8_t task_mgmt_cmd)
652{ 776{
653 struct lpfc_sli *psli;
654 struct lpfc_iocbq *piocbq; 777 struct lpfc_iocbq *piocbq;
655 IOCB_t *piocb; 778 IOCB_t *piocb;
656 struct fcp_cmnd *fcp_cmnd; 779 struct fcp_cmnd *fcp_cmnd;
@@ -661,8 +784,9 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
661 return 0; 784 return 0;
662 } 785 }
663 786
664 psli = &phba->sli;
665 piocbq = &(lpfc_cmd->cur_iocbq); 787 piocbq = &(lpfc_cmd->cur_iocbq);
788 piocbq->vport = vport;
789
666 piocb = &piocbq->iocb; 790 piocb = &piocbq->iocb;
667 791
668 fcp_cmnd = lpfc_cmd->fcp_cmnd; 792 fcp_cmnd = lpfc_cmd->fcp_cmnd;
@@ -688,7 +812,7 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
688 piocb->ulpTimeout = lpfc_cmd->timeout; 812 piocb->ulpTimeout = lpfc_cmd->timeout;
689 } 813 }
690 814
691 return (1); 815 return 1;
692} 816}
693 817
694static void 818static void
@@ -704,10 +828,11 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
704} 828}
705 829
706static int 830static int
707lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, 831lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
708 unsigned tgt_id, unsigned int lun, 832 unsigned tgt_id, unsigned int lun,
709 struct lpfc_rport_data *rdata) 833 struct lpfc_rport_data *rdata)
710{ 834{
835 struct lpfc_hba *phba = vport->phba;
711 struct lpfc_iocbq *iocbq; 836 struct lpfc_iocbq *iocbq;
712 struct lpfc_iocbq *iocbqrsp; 837 struct lpfc_iocbq *iocbqrsp;
713 int ret; 838 int ret;
@@ -716,12 +841,11 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
716 return FAILED; 841 return FAILED;
717 842
718 lpfc_cmd->rdata = rdata; 843 lpfc_cmd->rdata = rdata;
719 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, lun, 844 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
720 FCP_TARGET_RESET); 845 FCP_TARGET_RESET);
721 if (!ret) 846 if (!ret)
722 return FAILED; 847 return FAILED;
723 848
724 lpfc_cmd->scsi_hba = phba;
725 iocbq = &lpfc_cmd->cur_iocbq; 849 iocbq = &lpfc_cmd->cur_iocbq;
726 iocbqrsp = lpfc_sli_get_iocbq(phba); 850 iocbqrsp = lpfc_sli_get_iocbq(phba);
727 851
@@ -730,10 +854,10 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
730 854
731 /* Issue Target Reset to TGT <num> */ 855 /* Issue Target Reset to TGT <num> */
732 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 856 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
733 "%d:0702 Issue Target Reset to TGT %d " 857 "%d (%d):0702 Issue Target Reset to TGT %d "
734 "Data: x%x x%x\n", 858 "Data: x%x x%x\n",
735 phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, 859 phba->brd_no, vport->vpi, tgt_id,
736 rdata->pnode->nlp_flag); 860 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
737 861
738 ret = lpfc_sli_issue_iocb_wait(phba, 862 ret = lpfc_sli_issue_iocb_wait(phba,
739 &phba->sli.ring[phba->sli.fcp_ring], 863 &phba->sli.ring[phba->sli.fcp_ring],
@@ -758,7 +882,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba,
758const char * 882const char *
759lpfc_info(struct Scsi_Host *host) 883lpfc_info(struct Scsi_Host *host)
760{ 884{
761 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata; 885 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
886 struct lpfc_hba *phba = vport->phba;
762 int len; 887 int len;
763 static char lpfcinfobuf[384]; 888 static char lpfcinfobuf[384];
764 889
@@ -800,26 +925,22 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba)
800 925
801void lpfc_poll_timeout(unsigned long ptr) 926void lpfc_poll_timeout(unsigned long ptr)
802{ 927{
803 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 928 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
804 unsigned long iflag;
805
806 spin_lock_irqsave(phba->host->host_lock, iflag);
807 929
808 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 930 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
809 lpfc_sli_poll_fcp_ring (phba); 931 lpfc_sli_poll_fcp_ring (phba);
810 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 932 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
811 lpfc_poll_rearm_timer(phba); 933 lpfc_poll_rearm_timer(phba);
812 } 934 }
813
814 spin_unlock_irqrestore(phba->host->host_lock, iflag);
815} 935}
816 936
817static int 937static int
818lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 938lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
819{ 939{
820 struct lpfc_hba *phba = 940 struct Scsi_Host *shost = cmnd->device->host;
821 (struct lpfc_hba *) cmnd->device->host->hostdata; 941 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
822 struct lpfc_sli *psli = &phba->sli; 942 struct lpfc_hba *phba = vport->phba;
943 struct lpfc_sli *psli = &phba->sli;
823 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 944 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
824 struct lpfc_nodelist *ndlp = rdata->pnode; 945 struct lpfc_nodelist *ndlp = rdata->pnode;
825 struct lpfc_scsi_buf *lpfc_cmd; 946 struct lpfc_scsi_buf *lpfc_cmd;
@@ -840,11 +961,14 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
840 cmnd->result = ScsiResult(DID_BUS_BUSY, 0); 961 cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
841 goto out_fail_command; 962 goto out_fail_command;
842 } 963 }
843 lpfc_cmd = lpfc_get_scsi_buf (phba); 964 lpfc_cmd = lpfc_get_scsi_buf(phba);
844 if (lpfc_cmd == NULL) { 965 if (lpfc_cmd == NULL) {
966 lpfc_adjust_queue_depth(phba);
967
845 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 968 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
846 "%d:0707 driver's buffer pool is empty, " 969 "%d (%d):0707 driver's buffer pool is empty, "
847 "IO busied\n", phba->brd_no); 970 "IO busied\n",
971 phba->brd_no, vport->vpi);
848 goto out_host_busy; 972 goto out_host_busy;
849 } 973 }
850 974
@@ -862,10 +986,10 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
862 if (err) 986 if (err)
863 goto out_host_busy_free_buf; 987 goto out_host_busy_free_buf;
864 988
865 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp); 989 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
866 990
867 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], 991 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
868 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); 992 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
869 if (err) 993 if (err)
870 goto out_host_busy_free_buf; 994 goto out_host_busy_free_buf;
871 995
@@ -907,8 +1031,9 @@ lpfc_block_error_handler(struct scsi_cmnd *cmnd)
907static int 1031static int
908lpfc_abort_handler(struct scsi_cmnd *cmnd) 1032lpfc_abort_handler(struct scsi_cmnd *cmnd)
909{ 1033{
910 struct Scsi_Host *shost = cmnd->device->host; 1034 struct Scsi_Host *shost = cmnd->device->host;
911 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1035 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1036 struct lpfc_hba *phba = vport->phba;
912 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; 1037 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
913 struct lpfc_iocbq *iocb; 1038 struct lpfc_iocbq *iocb;
914 struct lpfc_iocbq *abtsiocb; 1039 struct lpfc_iocbq *abtsiocb;
@@ -918,8 +1043,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
918 int ret = SUCCESS; 1043 int ret = SUCCESS;
919 1044
920 lpfc_block_error_handler(cmnd); 1045 lpfc_block_error_handler(cmnd);
921 spin_lock_irq(shost->host_lock);
922
923 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; 1046 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
924 BUG_ON(!lpfc_cmd); 1047 BUG_ON(!lpfc_cmd);
925 1048
@@ -956,12 +1079,13 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
956 1079
957 icmd->ulpLe = 1; 1080 icmd->ulpLe = 1;
958 icmd->ulpClass = cmd->ulpClass; 1081 icmd->ulpClass = cmd->ulpClass;
959 if (phba->hba_state >= LPFC_LINK_UP) 1082 if (lpfc_is_link_up(phba))
960 icmd->ulpCommand = CMD_ABORT_XRI_CN; 1083 icmd->ulpCommand = CMD_ABORT_XRI_CN;
961 else 1084 else
962 icmd->ulpCommand = CMD_CLOSE_XRI_CN; 1085 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
963 1086
964 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 1087 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1088 abtsiocb->vport = vport;
965 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { 1089 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
966 lpfc_sli_release_iocbq(phba, abtsiocb); 1090 lpfc_sli_release_iocbq(phba, abtsiocb);
967 ret = FAILED; 1091 ret = FAILED;
@@ -977,9 +1101,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
977 if (phba->cfg_poll & DISABLE_FCP_RING_INT) 1101 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
978 lpfc_sli_poll_fcp_ring (phba); 1102 lpfc_sli_poll_fcp_ring (phba);
979 1103
980 spin_unlock_irq(phba->host->host_lock); 1104 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT * HZ);
981 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
982 spin_lock_irq(phba->host->host_lock);
983 if (++loop_count 1105 if (++loop_count
984 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT) 1106 > (2 * phba->cfg_devloss_tmo)/LPFC_ABORT_WAIT)
985 break; 1107 break;
@@ -988,30 +1110,30 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
988 if (lpfc_cmd->pCmd == cmnd) { 1110 if (lpfc_cmd->pCmd == cmnd) {
989 ret = FAILED; 1111 ret = FAILED;
990 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1112 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
991 "%d:0748 abort handler timed out waiting for " 1113 "%d (%d):0748 abort handler timed out waiting "
992 "abort to complete: ret %#x, ID %d, LUN %d, " 1114 "for abort to complete: ret %#x, ID %d, "
993 "snum %#lx\n", 1115 "LUN %d, snum %#lx\n",
994 phba->brd_no, ret, cmnd->device->id, 1116 phba->brd_no, vport->vpi, ret,
995 cmnd->device->lun, cmnd->serial_number); 1117 cmnd->device->id, cmnd->device->lun,
1118 cmnd->serial_number);
996 } 1119 }
997 1120
998 out: 1121 out:
999 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1122 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1000 "%d:0749 SCSI Layer I/O Abort Request " 1123 "%d (%d):0749 SCSI Layer I/O Abort Request "
1001 "Status x%x ID %d LUN %d snum %#lx\n", 1124 "Status x%x ID %d LUN %d snum %#lx\n",
1002 phba->brd_no, ret, cmnd->device->id, 1125 phba->brd_no, vport->vpi, ret, cmnd->device->id,
1003 cmnd->device->lun, cmnd->serial_number); 1126 cmnd->device->lun, cmnd->serial_number);
1004 1127
1005 spin_unlock_irq(shost->host_lock);
1006
1007 return ret; 1128 return ret;
1008} 1129}
1009 1130
1010static int 1131static int
1011lpfc_device_reset_handler(struct scsi_cmnd *cmnd) 1132lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1012{ 1133{
1013 struct Scsi_Host *shost = cmnd->device->host; 1134 struct Scsi_Host *shost = cmnd->device->host;
1014 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1135 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1136 struct lpfc_hba *phba = vport->phba;
1015 struct lpfc_scsi_buf *lpfc_cmd; 1137 struct lpfc_scsi_buf *lpfc_cmd;
1016 struct lpfc_iocbq *iocbq, *iocbqrsp; 1138 struct lpfc_iocbq *iocbq, *iocbqrsp;
1017 struct lpfc_rport_data *rdata = cmnd->device->hostdata; 1139 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
@@ -1022,28 +1144,26 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1022 int cnt, loopcnt; 1144 int cnt, loopcnt;
1023 1145
1024 lpfc_block_error_handler(cmnd); 1146 lpfc_block_error_handler(cmnd);
1025 spin_lock_irq(shost->host_lock);
1026 loopcnt = 0; 1147 loopcnt = 0;
1027 /* 1148 /*
1028 * If target is not in a MAPPED state, delay the reset until 1149 * If target is not in a MAPPED state, delay the reset until
1029 * target is rediscovered or devloss timeout expires. 1150 * target is rediscovered or devloss timeout expires.
1030 */ 1151 */
1031 while ( 1 ) { 1152 while (1) {
1032 if (!pnode) 1153 if (!pnode)
1033 goto out; 1154 goto out;
1034 1155
1035 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 1156 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1036 spin_unlock_irq(phba->host->host_lock);
1037 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1157 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1038 spin_lock_irq(phba->host->host_lock);
1039 loopcnt++; 1158 loopcnt++;
1040 rdata = cmnd->device->hostdata; 1159 rdata = cmnd->device->hostdata;
1041 if (!rdata || 1160 if (!rdata ||
1042 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) { 1161 (loopcnt > ((phba->cfg_devloss_tmo * 2) + 1))) {
1043 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1162 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1044 "%d:0721 LUN Reset rport failure:" 1163 "%d (%d):0721 LUN Reset rport "
1045 " cnt x%x rdata x%p\n", 1164 "failure: cnt x%x rdata x%p\n",
1046 phba->brd_no, loopcnt, rdata); 1165 phba->brd_no, vport->vpi,
1166 loopcnt, rdata);
1047 goto out; 1167 goto out;
1048 } 1168 }
1049 pnode = rdata->pnode; 1169 pnode = rdata->pnode;
@@ -1054,15 +1174,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1054 break; 1174 break;
1055 } 1175 }
1056 1176
1057 lpfc_cmd = lpfc_get_scsi_buf (phba); 1177 lpfc_cmd = lpfc_get_scsi_buf(phba);
1058 if (lpfc_cmd == NULL) 1178 if (lpfc_cmd == NULL)
1059 goto out; 1179 goto out;
1060 1180
1061 lpfc_cmd->timeout = 60; 1181 lpfc_cmd->timeout = 60;
1062 lpfc_cmd->scsi_hba = phba;
1063 lpfc_cmd->rdata = rdata; 1182 lpfc_cmd->rdata = rdata;
1064 1183
1065 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, cmnd->device->lun, 1184 ret = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, cmnd->device->lun,
1066 FCP_TARGET_RESET); 1185 FCP_TARGET_RESET);
1067 if (!ret) 1186 if (!ret)
1068 goto out_free_scsi_buf; 1187 goto out_free_scsi_buf;
@@ -1075,8 +1194,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1075 goto out_free_scsi_buf; 1194 goto out_free_scsi_buf;
1076 1195
1077 lpfc_printf_log(phba, KERN_INFO, LOG_FCP, 1196 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1078 "%d:0703 Issue target reset to TGT %d LUN %d rpi x%x " 1197 "%d (%d):0703 Issue target reset to TGT %d LUN %d "
1079 "nlp_flag x%x\n", phba->brd_no, cmnd->device->id, 1198 "rpi x%x nlp_flag x%x\n",
1199 phba->brd_no, vport->vpi, cmnd->device->id,
1080 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); 1200 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1081 1201
1082 iocb_status = lpfc_sli_issue_iocb_wait(phba, 1202 iocb_status = lpfc_sli_issue_iocb_wait(phba,
@@ -1111,9 +1231,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1111 0, LPFC_CTX_LUN); 1231 0, LPFC_CTX_LUN);
1112 loopcnt = 0; 1232 loopcnt = 0;
1113 while(cnt) { 1233 while(cnt) {
1114 spin_unlock_irq(phba->host->host_lock);
1115 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1234 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1116 spin_lock_irq(phba->host->host_lock);
1117 1235
1118 if (++loopcnt 1236 if (++loopcnt
1119 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1237 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1127,8 +1245,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1127 1245
1128 if (cnt) { 1246 if (cnt) {
1129 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1247 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1130 "%d:0719 device reset I/O flush failure: cnt x%x\n", 1248 "%d (%d):0719 device reset I/O flush failure: "
1131 phba->brd_no, cnt); 1249 "cnt x%x\n",
1250 phba->brd_no, vport->vpi, cnt);
1132 ret = FAILED; 1251 ret = FAILED;
1133 } 1252 }
1134 1253
@@ -1137,21 +1256,21 @@ out_free_scsi_buf:
1137 lpfc_release_scsi_buf(phba, lpfc_cmd); 1256 lpfc_release_scsi_buf(phba, lpfc_cmd);
1138 } 1257 }
1139 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1258 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1140 "%d:0713 SCSI layer issued device reset (%d, %d) " 1259 "%d (%d):0713 SCSI layer issued device reset (%d, %d) "
1141 "return x%x status x%x result x%x\n", 1260 "return x%x status x%x result x%x\n",
1142 phba->brd_no, cmnd->device->id, cmnd->device->lun, 1261 phba->brd_no, vport->vpi, cmnd->device->id,
1143 ret, cmd_status, cmd_result); 1262 cmnd->device->lun, ret, cmd_status, cmd_result);
1144 1263
1145out: 1264out:
1146 spin_unlock_irq(shost->host_lock);
1147 return ret; 1265 return ret;
1148} 1266}
1149 1267
1150static int 1268static int
1151lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) 1269lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1152{ 1270{
1153 struct Scsi_Host *shost = cmnd->device->host; 1271 struct Scsi_Host *shost = cmnd->device->host;
1154 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata; 1272 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1273 struct lpfc_hba *phba = vport->phba;
1155 struct lpfc_nodelist *ndlp = NULL; 1274 struct lpfc_nodelist *ndlp = NULL;
1156 int match; 1275 int match;
1157 int ret = FAILED, i, err_count = 0; 1276 int ret = FAILED, i, err_count = 0;
@@ -1159,7 +1278,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1159 struct lpfc_scsi_buf * lpfc_cmd; 1278 struct lpfc_scsi_buf * lpfc_cmd;
1160 1279
1161 lpfc_block_error_handler(cmnd); 1280 lpfc_block_error_handler(cmnd);
1162 spin_lock_irq(shost->host_lock);
1163 1281
1164 lpfc_cmd = lpfc_get_scsi_buf(phba); 1282 lpfc_cmd = lpfc_get_scsi_buf(phba);
1165 if (lpfc_cmd == NULL) 1283 if (lpfc_cmd == NULL)
@@ -1167,7 +1285,6 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1167 1285
1168 /* The lpfc_cmd storage is reused. Set all loop invariants. */ 1286 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1169 lpfc_cmd->timeout = 60; 1287 lpfc_cmd->timeout = 60;
1170 lpfc_cmd->scsi_hba = phba;
1171 1288
1172 /* 1289 /*
1173 * Since the driver manages a single bus device, reset all 1290 * Since the driver manages a single bus device, reset all
@@ -1177,7 +1294,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1177 for (i = 0; i < LPFC_MAX_TARGET; i++) { 1294 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1178 /* Search for mapped node by target ID */ 1295 /* Search for mapped node by target ID */
1179 match = 0; 1296 match = 0;
1180 list_for_each_entry(ndlp, &phba->fc_nodes, nlp_listp) { 1297 spin_lock_irq(shost->host_lock);
1298 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1181 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE && 1299 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1182 i == ndlp->nlp_sid && 1300 i == ndlp->nlp_sid &&
1183 ndlp->rport) { 1301 ndlp->rport) {
@@ -1185,15 +1303,18 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1185 break; 1303 break;
1186 } 1304 }
1187 } 1305 }
1306 spin_unlock_irq(shost->host_lock);
1188 if (!match) 1307 if (!match)
1189 continue; 1308 continue;
1190 1309
1191 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, i, cmnd->device->lun, 1310 ret = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1311 cmnd->device->lun,
1192 ndlp->rport->dd_data); 1312 ndlp->rport->dd_data);
1193 if (ret != SUCCESS) { 1313 if (ret != SUCCESS) {
1194 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1314 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1195 "%d:0700 Bus Reset on target %d failed\n", 1315 "%d (%d):0700 Bus Reset on target %d "
1196 phba->brd_no, i); 1316 "failed\n",
1317 phba->brd_no, vport->vpi, i);
1197 err_count++; 1318 err_count++;
1198 break; 1319 break;
1199 } 1320 }
@@ -1219,9 +1340,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1219 0, 0, 0, LPFC_CTX_HOST); 1340 0, 0, 0, LPFC_CTX_HOST);
1220 loopcnt = 0; 1341 loopcnt = 0;
1221 while(cnt) { 1342 while(cnt) {
1222 spin_unlock_irq(phba->host->host_lock);
1223 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); 1343 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1224 spin_lock_irq(phba->host->host_lock);
1225 1344
1226 if (++loopcnt 1345 if (++loopcnt
1227 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT) 1346 > (2 * phba->cfg_devloss_tmo)/LPFC_RESET_WAIT)
@@ -1234,25 +1353,24 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1234 1353
1235 if (cnt) { 1354 if (cnt) {
1236 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1355 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1237 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", 1356 "%d (%d):0715 Bus Reset I/O flush failure: "
1238 phba->brd_no, cnt, i); 1357 "cnt x%x left x%x\n",
1358 phba->brd_no, vport->vpi, cnt, i);
1239 ret = FAILED; 1359 ret = FAILED;
1240 } 1360 }
1241 1361
1242 lpfc_printf_log(phba, 1362 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1243 KERN_ERR, 1363 "%d (%d):0714 SCSI layer issued Bus Reset Data: x%x\n",
1244 LOG_FCP, 1364 phba->brd_no, vport->vpi, ret);
1245 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1246 phba->brd_no, ret);
1247out: 1365out:
1248 spin_unlock_irq(shost->host_lock);
1249 return ret; 1366 return ret;
1250} 1367}
1251 1368
1252static int 1369static int
1253lpfc_slave_alloc(struct scsi_device *sdev) 1370lpfc_slave_alloc(struct scsi_device *sdev)
1254{ 1371{
1255 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata; 1372 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1373 struct lpfc_hba *phba = vport->phba;
1256 struct lpfc_scsi_buf *scsi_buf = NULL; 1374 struct lpfc_scsi_buf *scsi_buf = NULL;
1257 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 1375 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1258 uint32_t total = 0, i; 1376 uint32_t total = 0, i;
@@ -1273,27 +1391,35 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1273 */ 1391 */
1274 total = phba->total_scsi_bufs; 1392 total = phba->total_scsi_bufs;
1275 num_to_alloc = phba->cfg_lun_queue_depth + 2; 1393 num_to_alloc = phba->cfg_lun_queue_depth + 2;
1276 if (total >= phba->cfg_hba_queue_depth) { 1394
1395 /* Allow some exchanges to be available always to complete discovery */
1396 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1277 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1397 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1278 "%d:0704 At limitation of %d preallocated " 1398 "%d (%d):0704 At limitation of %d "
1279 "command buffers\n", phba->brd_no, total); 1399 "preallocated command buffers\n",
1400 phba->brd_no, vport->vpi, total);
1280 return 0; 1401 return 0;
1281 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { 1402
1403 /* Allow some exchanges to be available always to complete discovery */
1404 } else if (total + num_to_alloc >
1405 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1282 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, 1406 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
1283 "%d:0705 Allocation request of %d command " 1407 "%d (%d):0705 Allocation request of %d "
1284 "buffers will exceed max of %d. Reducing " 1408 "command buffers will exceed max of %d. "
1285 "allocation request to %d.\n", phba->brd_no, 1409 "Reducing allocation request to %d.\n",
1286 num_to_alloc, phba->cfg_hba_queue_depth, 1410 phba->brd_no, vport->vpi, num_to_alloc,
1411 phba->cfg_hba_queue_depth,
1287 (phba->cfg_hba_queue_depth - total)); 1412 (phba->cfg_hba_queue_depth - total));
1288 num_to_alloc = phba->cfg_hba_queue_depth - total; 1413 num_to_alloc = phba->cfg_hba_queue_depth - total;
1289 } 1414 }
1290 1415
1291 for (i = 0; i < num_to_alloc; i++) { 1416 for (i = 0; i < num_to_alloc; i++) {
1292 scsi_buf = lpfc_new_scsi_buf(phba); 1417 scsi_buf = lpfc_new_scsi_buf(vport);
1293 if (!scsi_buf) { 1418 if (!scsi_buf) {
1294 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 1419 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1295 "%d:0706 Failed to allocate command " 1420 "%d (%d):0706 Failed to allocate "
1296 "buffer\n", phba->brd_no); 1421 "command buffer\n",
1422 phba->brd_no, vport->vpi);
1297 break; 1423 break;
1298 } 1424 }
1299 1425
@@ -1308,8 +1434,9 @@ lpfc_slave_alloc(struct scsi_device *sdev)
1308static int 1434static int
1309lpfc_slave_configure(struct scsi_device *sdev) 1435lpfc_slave_configure(struct scsi_device *sdev)
1310{ 1436{
1311 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata; 1437 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1312 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1438 struct lpfc_hba *phba = vport->phba;
1439 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1313 1440
1314 if (sdev->tagged_supported) 1441 if (sdev->tagged_supported)
1315 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); 1442 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
@@ -1340,6 +1467,7 @@ lpfc_slave_destroy(struct scsi_device *sdev)
1340 return; 1467 return;
1341} 1468}
1342 1469
1470
1343struct scsi_host_template lpfc_template = { 1471struct scsi_host_template lpfc_template = {
1344 .module = THIS_MODULE, 1472 .module = THIS_MODULE,
1345 .name = LPFC_DRIVER_NAME, 1473 .name = LPFC_DRIVER_NAME,
@@ -1352,11 +1480,10 @@ struct scsi_host_template lpfc_template = {
1352 .slave_configure = lpfc_slave_configure, 1480 .slave_configure = lpfc_slave_configure,
1353 .slave_destroy = lpfc_slave_destroy, 1481 .slave_destroy = lpfc_slave_destroy,
1354 .scan_finished = lpfc_scan_finished, 1482 .scan_finished = lpfc_scan_finished,
1355 .scan_start = lpfc_scan_start,
1356 .this_id = -1, 1483 .this_id = -1,
1357 .sg_tablesize = LPFC_SG_SEG_CNT, 1484 .sg_tablesize = LPFC_SG_SEG_CNT,
1358 .cmd_per_lun = LPFC_CMD_PER_LUN, 1485 .cmd_per_lun = LPFC_CMD_PER_LUN,
1359 .use_clustering = ENABLE_CLUSTERING, 1486 .use_clustering = ENABLE_CLUSTERING,
1360 .shost_attrs = lpfc_host_attrs, 1487 .shost_attrs = lpfc_hba_attrs,
1361 .max_sectors = 0xFFFF, 1488 .max_sectors = 0xFFFF,
1362}; 1489};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index cdcd2535803f..31787bb6d53e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -110,7 +110,6 @@ struct fcp_cmnd {
110struct lpfc_scsi_buf { 110struct lpfc_scsi_buf {
111 struct list_head list; 111 struct list_head list;
112 struct scsi_cmnd *pCmd; 112 struct scsi_cmnd *pCmd;
113 struct lpfc_hba *scsi_hba;
114 struct lpfc_rport_data *rdata; 113 struct lpfc_rport_data *rdata;
115 114
116 uint32_t timeout; 115 uint32_t timeout;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a1e721459e2b..f4d5a6b00fde 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -38,23 +38,25 @@
38#include "lpfc_crtn.h" 38#include "lpfc_crtn.h"
39#include "lpfc_logmsg.h" 39#include "lpfc_logmsg.h"
40#include "lpfc_compat.h" 40#include "lpfc_compat.h"
41#include "lpfc_debugfs.h"
41 42
42/* 43/*
43 * Define macro to log: Mailbox command x%x cannot issue Data 44 * Define macro to log: Mailbox command x%x cannot issue Data
44 * This allows multiple uses of lpfc_msgBlk0311 45 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility. 46 * w/o perturbing log msg utility.
46 */ 47 */
47#define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 48#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
48 lpfc_printf_log(phba, \ 49 lpfc_printf_log(phba, \
49 KERN_INFO, \ 50 KERN_INFO, \
50 LOG_MBOX | LOG_SLI, \ 51 LOG_MBOX | LOG_SLI, \
51 "%d:0311 Mailbox command x%x cannot issue " \ 52 "%d (%d):0311 Mailbox command x%x cannot " \
52 "Data: x%x x%x x%x\n", \ 53 "issue Data: x%x x%x x%x\n", \
53 phba->brd_no, \ 54 phba->brd_no, \
54 mb->mbxCommand, \ 55 pmbox->vport ? pmbox->vport->vpi : 0, \
55 phba->hba_state, \ 56 pmbox->mb.mbxCommand, \
57 phba->pport->port_state, \
56 psli->sli_flag, \ 58 psli->sli_flag, \
57 flag); 59 flag)
58 60
59 61
60/* There are only four IOCB completion types. */ 62/* There are only four IOCB completion types. */
@@ -65,8 +67,26 @@ typedef enum _lpfc_iocb_type {
65 LPFC_ABORT_IOCB 67 LPFC_ABORT_IOCB
66} lpfc_iocb_type; 68} lpfc_iocb_type;
67 69
68struct lpfc_iocbq * 70 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer
69lpfc_sli_get_iocbq(struct lpfc_hba * phba) 71 * to the start of the ring, and the slot number of the
72 * desired iocb entry, calc a pointer to that entry.
73 */
74static inline IOCB_t *
75lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
76{
77 return (IOCB_t *) (((char *) pring->cmdringaddr) +
78 pring->cmdidx * phba->iocb_cmd_size);
79}
80
81static inline IOCB_t *
82lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
83{
84 return (IOCB_t *) (((char *) pring->rspringaddr) +
85 pring->rspidx * phba->iocb_rsp_size);
86}
87
88static struct lpfc_iocbq *
89__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
70{ 90{
71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 91 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 struct lpfc_iocbq * iocbq = NULL; 92 struct lpfc_iocbq * iocbq = NULL;
@@ -75,10 +95,22 @@ lpfc_sli_get_iocbq(struct lpfc_hba * phba)
75 return iocbq; 95 return iocbq;
76} 96}
77 97
98struct lpfc_iocbq *
99lpfc_sli_get_iocbq(struct lpfc_hba *phba)
100{
101 struct lpfc_iocbq * iocbq = NULL;
102 unsigned long iflags;
103
104 spin_lock_irqsave(&phba->hbalock, iflags);
105 iocbq = __lpfc_sli_get_iocbq(phba);
106 spin_unlock_irqrestore(&phba->hbalock, iflags);
107 return iocbq;
108}
109
78void 110void
79lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 111__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
80{ 112{
81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb); 113 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
82 114
83 /* 115 /*
84 * Clean all volatile data fields, preserve iotag and node struct. 116 * Clean all volatile data fields, preserve iotag and node struct.
@@ -87,6 +119,19 @@ lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 119 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
88} 120}
89 121
122void
123lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
124{
125 unsigned long iflags;
126
127 /*
128 * Clean all volatile data fields, preserve iotag and node struct.
129 */
130 spin_lock_irqsave(&phba->hbalock, iflags);
131 __lpfc_sli_release_iocbq(phba, iocbq);
132 spin_unlock_irqrestore(&phba->hbalock, iflags);
133}
134
90/* 135/*
91 * Translate the iocb command to an iocb command type used to decide the final 136 * Translate the iocb command to an iocb command type used to decide the final
92 * disposition of each completed IOCB. 137 * disposition of each completed IOCB.
@@ -155,6 +200,9 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
155 case CMD_RCV_ELS_REQ_CX: 200 case CMD_RCV_ELS_REQ_CX:
156 case CMD_RCV_SEQUENCE64_CX: 201 case CMD_RCV_SEQUENCE64_CX:
157 case CMD_RCV_ELS_REQ64_CX: 202 case CMD_RCV_ELS_REQ64_CX:
203 case CMD_IOCB_RCV_SEQ64_CX:
204 case CMD_IOCB_RCV_ELS64_CX:
205 case CMD_IOCB_RCV_CONT64_CX:
158 type = LPFC_UNSOL_IOCB; 206 type = LPFC_UNSOL_IOCB;
159 break; 207 break;
160 default: 208 default:
@@ -166,73 +214,77 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
166} 214}
167 215
168static int 216static int
169lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) 217lpfc_sli_ring_map(struct lpfc_hba *phba)
170{ 218{
171 struct lpfc_sli *psli = &phba->sli; 219 struct lpfc_sli *psli = &phba->sli;
172 MAILBOX_t *pmbox = &pmb->mb; 220 LPFC_MBOXQ_t *pmb;
173 int i, rc; 221 MAILBOX_t *pmbox;
222 int i, rc, ret = 0;
174 223
224 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
225 if (!pmb)
226 return -ENOMEM;
227 pmbox = &pmb->mb;
228 phba->link_state = LPFC_INIT_MBX_CMDS;
175 for (i = 0; i < psli->num_rings; i++) { 229 for (i = 0; i < psli->num_rings; i++) {
176 phba->hba_state = LPFC_INIT_MBX_CMDS;
177 lpfc_config_ring(phba, i, pmb); 230 lpfc_config_ring(phba, i, pmb);
178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 if (rc != MBX_SUCCESS) { 232 if (rc != MBX_SUCCESS) {
180 lpfc_printf_log(phba, 233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
181 KERN_ERR, 234 "%d:0446 Adapter failed to init (%d), "
182 LOG_INIT,
183 "%d:0446 Adapter failed to init, "
184 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 235 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
185 "ring %d\n", 236 "ring %d\n",
186 phba->brd_no, 237 phba->brd_no, rc,
187 pmbox->mbxCommand, 238 pmbox->mbxCommand,
188 pmbox->mbxStatus, 239 pmbox->mbxStatus,
189 i); 240 i);
190 phba->hba_state = LPFC_HBA_ERROR; 241 phba->link_state = LPFC_HBA_ERROR;
191 return -ENXIO; 242 ret = -ENXIO;
243 break;
192 } 244 }
193 } 245 }
194 return 0; 246 mempool_free(pmb, phba->mbox_mem_pool);
247 return ret;
195} 248}
196 249
197static int 250static int
198lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 251lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
199 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 252 struct lpfc_iocbq *piocb)
200{ 253{
201 list_add_tail(&piocb->list, &pring->txcmplq); 254 list_add_tail(&piocb->list, &pring->txcmplq);
202 pring->txcmplq_cnt++; 255 pring->txcmplq_cnt++;
203 if (unlikely(pring->ringno == LPFC_ELS_RING)) 256 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
204 mod_timer(&phba->els_tmofunc, 257 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
205 jiffies + HZ * (phba->fc_ratov << 1)); 258 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
259 if (!piocb->vport)
260 BUG();
261 else
262 mod_timer(&piocb->vport->els_tmofunc,
263 jiffies + HZ * (phba->fc_ratov << 1));
264 }
206 265
207 return (0); 266
267 return 0;
208} 268}
209 269
210static struct lpfc_iocbq * 270static struct lpfc_iocbq *
211lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 271lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
212{ 272{
213 struct list_head *dlp;
214 struct lpfc_iocbq *cmd_iocb; 273 struct lpfc_iocbq *cmd_iocb;
215 274
216 dlp = &pring->txq; 275 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
217 cmd_iocb = NULL; 276 if (cmd_iocb != NULL)
218 list_remove_head((&pring->txq), cmd_iocb,
219 struct lpfc_iocbq,
220 list);
221 if (cmd_iocb) {
222 /* If the first ptr is not equal to the list header,
223 * deque the IOCBQ_t and return it.
224 */
225 pring->txq_cnt--; 277 pring->txq_cnt--;
226 } 278 return cmd_iocb;
227 return (cmd_iocb);
228} 279}
229 280
230static IOCB_t * 281static IOCB_t *
231lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 282lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
232{ 283{
233 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 284 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
285 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
286 &phba->slim2p->mbx.us.s2.port[pring->ringno];
234 uint32_t max_cmd_idx = pring->numCiocb; 287 uint32_t max_cmd_idx = pring->numCiocb;
235 IOCB_t *iocb = NULL;
236 288
237 if ((pring->next_cmdidx == pring->cmdidx) && 289 if ((pring->next_cmdidx == pring->cmdidx) &&
238 (++pring->next_cmdidx >= max_cmd_idx)) 290 (++pring->next_cmdidx >= max_cmd_idx))
@@ -249,15 +301,17 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
249 phba->brd_no, pring->ringno, 301 phba->brd_no, pring->ringno,
250 pring->local_getidx, max_cmd_idx); 302 pring->local_getidx, max_cmd_idx);
251 303
252 phba->hba_state = LPFC_HBA_ERROR; 304 phba->link_state = LPFC_HBA_ERROR;
253 /* 305 /*
254 * All error attention handlers are posted to 306 * All error attention handlers are posted to
255 * worker thread 307 * worker thread
256 */ 308 */
257 phba->work_ha |= HA_ERATT; 309 phba->work_ha |= HA_ERATT;
258 phba->work_hs = HS_FFER3; 310 phba->work_hs = HS_FFER3;
311
312 /* hbalock should already be held */
259 if (phba->work_wait) 313 if (phba->work_wait)
260 wake_up(phba->work_wait); 314 lpfc_worker_wake_up(phba);
261 315
262 return NULL; 316 return NULL;
263 } 317 }
@@ -266,39 +320,34 @@ lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
266 return NULL; 320 return NULL;
267 } 321 }
268 322
269 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); 323 return lpfc_cmd_iocb(phba, pring);
270
271 return iocb;
272} 324}
273 325
274uint16_t 326uint16_t
275lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 327lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
276{ 328{
277 struct lpfc_iocbq ** new_arr; 329 struct lpfc_iocbq **new_arr;
278 struct lpfc_iocbq ** old_arr; 330 struct lpfc_iocbq **old_arr;
279 size_t new_len; 331 size_t new_len;
280 struct lpfc_sli *psli = &phba->sli; 332 struct lpfc_sli *psli = &phba->sli;
281 uint16_t iotag; 333 uint16_t iotag;
282 334
283 spin_lock_irq(phba->host->host_lock); 335 spin_lock_irq(&phba->hbalock);
284 iotag = psli->last_iotag; 336 iotag = psli->last_iotag;
285 if(++iotag < psli->iocbq_lookup_len) { 337 if(++iotag < psli->iocbq_lookup_len) {
286 psli->last_iotag = iotag; 338 psli->last_iotag = iotag;
287 psli->iocbq_lookup[iotag] = iocbq; 339 psli->iocbq_lookup[iotag] = iocbq;
288 spin_unlock_irq(phba->host->host_lock); 340 spin_unlock_irq(&phba->hbalock);
289 iocbq->iotag = iotag; 341 iocbq->iotag = iotag;
290 return iotag; 342 return iotag;
291 } 343 } else if (psli->iocbq_lookup_len < (0xffff
292 else if (psli->iocbq_lookup_len < (0xffff
293 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 344 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
294 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 345 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
295 spin_unlock_irq(phba->host->host_lock); 346 spin_unlock_irq(&phba->hbalock);
296 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *), 347 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
297 GFP_KERNEL); 348 GFP_KERNEL);
298 if (new_arr) { 349 if (new_arr) {
299 memset((char *)new_arr, 0, 350 spin_lock_irq(&phba->hbalock);
300 new_len * sizeof (struct lpfc_iocbq *));
301 spin_lock_irq(phba->host->host_lock);
302 old_arr = psli->iocbq_lookup; 351 old_arr = psli->iocbq_lookup;
303 if (new_len <= psli->iocbq_lookup_len) { 352 if (new_len <= psli->iocbq_lookup_len) {
304 /* highly unprobable case */ 353 /* highly unprobable case */
@@ -307,11 +356,11 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
307 if(++iotag < psli->iocbq_lookup_len) { 356 if(++iotag < psli->iocbq_lookup_len) {
308 psli->last_iotag = iotag; 357 psli->last_iotag = iotag;
309 psli->iocbq_lookup[iotag] = iocbq; 358 psli->iocbq_lookup[iotag] = iocbq;
310 spin_unlock_irq(phba->host->host_lock); 359 spin_unlock_irq(&phba->hbalock);
311 iocbq->iotag = iotag; 360 iocbq->iotag = iotag;
312 return iotag; 361 return iotag;
313 } 362 }
314 spin_unlock_irq(phba->host->host_lock); 363 spin_unlock_irq(&phba->hbalock);
315 return 0; 364 return 0;
316 } 365 }
317 if (psli->iocbq_lookup) 366 if (psli->iocbq_lookup)
@@ -322,13 +371,13 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
322 psli->iocbq_lookup_len = new_len; 371 psli->iocbq_lookup_len = new_len;
323 psli->last_iotag = iotag; 372 psli->last_iotag = iotag;
324 psli->iocbq_lookup[iotag] = iocbq; 373 psli->iocbq_lookup[iotag] = iocbq;
325 spin_unlock_irq(phba->host->host_lock); 374 spin_unlock_irq(&phba->hbalock);
326 iocbq->iotag = iotag; 375 iocbq->iotag = iotag;
327 kfree(old_arr); 376 kfree(old_arr);
328 return iotag; 377 return iotag;
329 } 378 }
330 } else 379 } else
331 spin_unlock_irq(phba->host->host_lock); 380 spin_unlock_irq(&phba->hbalock);
332 381
333 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 382 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
334 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 383 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -349,7 +398,7 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
349 /* 398 /*
350 * Issue iocb command to adapter 399 * Issue iocb command to adapter
351 */ 400 */
352 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 401 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
353 wmb(); 402 wmb();
354 pring->stats.iocb_cmd++; 403 pring->stats.iocb_cmd++;
355 404
@@ -361,20 +410,18 @@ lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
361 if (nextiocb->iocb_cmpl) 410 if (nextiocb->iocb_cmpl)
362 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 411 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
363 else 412 else
364 lpfc_sli_release_iocbq(phba, nextiocb); 413 __lpfc_sli_release_iocbq(phba, nextiocb);
365 414
366 /* 415 /*
367 * Let the HBA know what IOCB slot will be the next one the 416 * Let the HBA know what IOCB slot will be the next one the
368 * driver will put a command into. 417 * driver will put a command into.
369 */ 418 */
370 pring->cmdidx = pring->next_cmdidx; 419 pring->cmdidx = pring->next_cmdidx;
371 writel(pring->cmdidx, phba->MBslimaddr 420 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
372 + (SLIMOFF + (pring->ringno * 2)) * 4);
373} 421}
374 422
375static void 423static void
376lpfc_sli_update_full_ring(struct lpfc_hba * phba, 424lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
377 struct lpfc_sli_ring *pring)
378{ 425{
379 int ringno = pring->ringno; 426 int ringno = pring->ringno;
380 427
@@ -393,8 +440,7 @@ lpfc_sli_update_full_ring(struct lpfc_hba * phba,
393} 440}
394 441
395static void 442static void
396lpfc_sli_update_ring(struct lpfc_hba * phba, 443lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
397 struct lpfc_sli_ring *pring)
398{ 444{
399 int ringno = pring->ringno; 445 int ringno = pring->ringno;
400 446
@@ -407,7 +453,7 @@ lpfc_sli_update_ring(struct lpfc_hba * phba,
407} 453}
408 454
409static void 455static void
410lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 456lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
411{ 457{
412 IOCB_t *iocb; 458 IOCB_t *iocb;
413 struct lpfc_iocbq *nextiocb; 459 struct lpfc_iocbq *nextiocb;
@@ -420,7 +466,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
420 * (d) IOCB processing is not blocked by the outstanding mbox command. 466 * (d) IOCB processing is not blocked by the outstanding mbox command.
421 */ 467 */
422 if (pring->txq_cnt && 468 if (pring->txq_cnt &&
423 (phba->hba_state > LPFC_LINK_DOWN) && 469 lpfc_is_link_up(phba) &&
424 (pring->ringno != phba->sli.fcp_ring || 470 (pring->ringno != phba->sli.fcp_ring ||
425 phba->sli.sli_flag & LPFC_PROCESS_LA) && 471 phba->sli.sli_flag & LPFC_PROCESS_LA) &&
426 !(pring->flag & LPFC_STOP_IOCB_MBX)) { 472 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
@@ -440,11 +486,15 @@ lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
440 486
441/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 487/* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
442static void 488static void
443lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) 489lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
444{ 490{
445 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno]; 491 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
492 &phba->slim2p->mbx.us.s3_pgp.port[ringno] :
493 &phba->slim2p->mbx.us.s2.port[ringno];
494 unsigned long iflags;
446 495
447 /* If the ring is active, flag it */ 496 /* If the ring is active, flag it */
497 spin_lock_irqsave(&phba->hbalock, iflags);
448 if (phba->sli.ring[ringno].cmdringaddr) { 498 if (phba->sli.ring[ringno].cmdringaddr) {
449 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { 499 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
450 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; 500 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
@@ -453,11 +503,176 @@ lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
453 */ 503 */
454 phba->sli.ring[ringno].local_getidx 504 phba->sli.ring[ringno].local_getidx
455 = le32_to_cpu(pgp->cmdGetInx); 505 = le32_to_cpu(pgp->cmdGetInx);
456 spin_lock_irq(phba->host->host_lock);
457 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); 506 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
458 spin_unlock_irq(phba->host->host_lock);
459 } 507 }
460 } 508 }
509 spin_unlock_irqrestore(&phba->hbalock, iflags);
510}
511
512struct lpfc_hbq_entry *
513lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
514{
515 struct hbq_s *hbqp = &phba->hbqs[hbqno];
516
517 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
518 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
519 hbqp->next_hbqPutIdx = 0;
520
521 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
522 uint32_t raw_index = phba->hbq_get[hbqno];
523 uint32_t getidx = le32_to_cpu(raw_index);
524
525 hbqp->local_hbqGetIdx = getidx;
526
527 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
528 lpfc_printf_log(phba, KERN_ERR,
529 LOG_SLI | LOG_VPORT,
530 "%d:1802 HBQ %d: local_hbqGetIdx "
531 "%u is > than hbqp->entry_count %u\n",
532 phba->brd_no, hbqno,
533 hbqp->local_hbqGetIdx,
534 hbqp->entry_count);
535
536 phba->link_state = LPFC_HBA_ERROR;
537 return NULL;
538 }
539
540 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
541 return NULL;
542 }
543
544 return (struct lpfc_hbq_entry *) phba->hbqslimp.virt + hbqp->hbqPutIdx;
545}
546
547void
548lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
549{
550 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
551 struct hbq_dmabuf *hbq_buf;
552
553 /* Return all memory used by all HBQs */
554 list_for_each_entry_safe(dmabuf, next_dmabuf,
555 &phba->hbq_buffer_list, list) {
556 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
557 list_del(&hbq_buf->dbuf.list);
558 lpfc_hbq_free(phba, hbq_buf->dbuf.virt, hbq_buf->dbuf.phys);
559 kfree(hbq_buf);
560 }
561}
562
563static void
564lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
565 struct hbq_dmabuf *hbq_buf)
566{
567 struct lpfc_hbq_entry *hbqe;
568 dma_addr_t physaddr = hbq_buf->dbuf.phys;
569
570 /* Get next HBQ entry slot to use */
571 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
572 if (hbqe) {
573 struct hbq_s *hbqp = &phba->hbqs[hbqno];
574
575 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
576 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
577 hbqe->bde.tus.f.bdeSize = FCELSSIZE;
578 hbqe->bde.tus.f.bdeFlags = 0;
579 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
580 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
581 /* Sync SLIM */
582 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
583 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
584 /* flush */
585 readl(phba->hbq_put + hbqno);
586 list_add_tail(&hbq_buf->dbuf.list, &phba->hbq_buffer_list);
587 }
588}
589
590static struct lpfc_hbq_init lpfc_els_hbq = {
591 .rn = 1,
592 .entry_count = 200,
593 .mask_count = 0,
594 .profile = 0,
595 .ring_mask = 1 << LPFC_ELS_RING,
596 .buffer_count = 0,
597 .init_count = 20,
598 .add_count = 5,
599};
600
601static struct lpfc_hbq_init *lpfc_hbq_defs[] = {
602 &lpfc_els_hbq,
603};
604
605int
606lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
607{
608 uint32_t i, start, end;
609 struct hbq_dmabuf *hbq_buffer;
610
611 start = lpfc_hbq_defs[hbqno]->buffer_count;
612 end = count + lpfc_hbq_defs[hbqno]->buffer_count;
613 if (end > lpfc_hbq_defs[hbqno]->entry_count) {
614 end = lpfc_hbq_defs[hbqno]->entry_count;
615 }
616
617 /* Populate HBQ entries */
618 for (i = start; i < end; i++) {
619 hbq_buffer = kmalloc(sizeof(struct hbq_dmabuf),
620 GFP_KERNEL);
621 if (!hbq_buffer)
622 return 1;
623 hbq_buffer->dbuf.virt = lpfc_hbq_alloc(phba, MEM_PRI,
624 &hbq_buffer->dbuf.phys);
625 if (hbq_buffer->dbuf.virt == NULL)
626 return 1;
627 hbq_buffer->tag = (i | (hbqno << 16));
628 lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer);
629 lpfc_hbq_defs[hbqno]->buffer_count++;
630 }
631 return 0;
632}
633
634int
635lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
636{
637 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
638 lpfc_hbq_defs[qno]->add_count));
639}
640
641int
642lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
643{
644 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
645 lpfc_hbq_defs[qno]->init_count));
646}
647
648struct hbq_dmabuf *
649lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
650{
651 struct lpfc_dmabuf *d_buf;
652 struct hbq_dmabuf *hbq_buf;
653
654 list_for_each_entry(d_buf, &phba->hbq_buffer_list, list) {
655 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
656 if ((hbq_buf->tag & 0xffff) == tag) {
657 return hbq_buf;
658 }
659 }
660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
661 "%d:1803 Bad hbq tag. Data: x%x x%x\n",
662 phba->brd_no, tag,
663 lpfc_hbq_defs[tag >> 16]->buffer_count);
664 return NULL;
665}
666
667void
668lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *sp)
669{
670 uint32_t hbqno;
671
672 if (sp) {
673 hbqno = sp->tag >> 16;
674 lpfc_sli_hbq_to_firmware(phba, hbqno, sp);
675 }
461} 676}
462 677
463static int 678static int
@@ -511,32 +726,38 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
511 case MBX_FLASH_WR_ULA: 726 case MBX_FLASH_WR_ULA:
512 case MBX_SET_DEBUG: 727 case MBX_SET_DEBUG:
513 case MBX_LOAD_EXP_ROM: 728 case MBX_LOAD_EXP_ROM:
729 case MBX_REG_VPI:
730 case MBX_UNREG_VPI:
731 case MBX_HEARTBEAT:
514 ret = mbxCommand; 732 ret = mbxCommand;
515 break; 733 break;
516 default: 734 default:
517 ret = MBX_SHUTDOWN; 735 ret = MBX_SHUTDOWN;
518 break; 736 break;
519 } 737 }
520 return (ret); 738 return ret;
521} 739}
522static void 740static void
523lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 741lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
524{ 742{
525 wait_queue_head_t *pdone_q; 743 wait_queue_head_t *pdone_q;
744 unsigned long drvr_flag;
526 745
527 /* 746 /*
528 * If pdone_q is empty, the driver thread gave up waiting and 747 * If pdone_q is empty, the driver thread gave up waiting and
529 * continued running. 748 * continued running.
530 */ 749 */
531 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 750 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
751 spin_lock_irqsave(&phba->hbalock, drvr_flag);
532 pdone_q = (wait_queue_head_t *) pmboxq->context1; 752 pdone_q = (wait_queue_head_t *) pmboxq->context1;
533 if (pdone_q) 753 if (pdone_q)
534 wake_up_interruptible(pdone_q); 754 wake_up_interruptible(pdone_q);
755 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
535 return; 756 return;
536} 757}
537 758
538void 759void
539lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 760lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
540{ 761{
541 struct lpfc_dmabuf *mp; 762 struct lpfc_dmabuf *mp;
542 uint16_t rpi; 763 uint16_t rpi;
@@ -553,79 +774,64 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
553 * If a REG_LOGIN succeeded after node is destroyed or node 774 * If a REG_LOGIN succeeded after node is destroyed or node
554 * is in re-discovery driver need to cleanup the RPI. 775 * is in re-discovery driver need to cleanup the RPI.
555 */ 776 */
556 if (!(phba->fc_flag & FC_UNLOADING) && 777 if (!(phba->pport->load_flag & FC_UNLOADING) &&
557 (pmb->mb.mbxCommand == MBX_REG_LOGIN64) && 778 pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
558 (!pmb->mb.mbxStatus)) { 779 !pmb->mb.mbxStatus) {
559 780
560 rpi = pmb->mb.un.varWords[0]; 781 rpi = pmb->mb.un.varWords[0];
561 lpfc_unreg_login(phba, rpi, pmb); 782 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
562 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 783 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 784 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
564 if (rc != MBX_NOT_FINISHED) 785 if (rc != MBX_NOT_FINISHED)
565 return; 786 return;
566 } 787 }
567 788
568 mempool_free( pmb, phba->mbox_mem_pool); 789 mempool_free(pmb, phba->mbox_mem_pool);
569 return; 790 return;
570} 791}
571 792
572int 793int
573lpfc_sli_handle_mb_event(struct lpfc_hba * phba) 794lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
574{ 795{
575 MAILBOX_t *mbox;
576 MAILBOX_t *pmbox; 796 MAILBOX_t *pmbox;
577 LPFC_MBOXQ_t *pmb; 797 LPFC_MBOXQ_t *pmb;
578 struct lpfc_sli *psli; 798 int rc;
579 int i, rc; 799 LIST_HEAD(cmplq);
580 uint32_t process_next;
581
582 psli = &phba->sli;
583 /* We should only get here if we are in SLI2 mode */
584 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
585 return (1);
586 }
587 800
588 phba->sli.slistat.mbox_event++; 801 phba->sli.slistat.mbox_event++;
589 802
803 /* Get all completed mailboxe buffers into the cmplq */
804 spin_lock_irq(&phba->hbalock);
805 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
806 spin_unlock_irq(&phba->hbalock);
807
590 /* Get a Mailbox buffer to setup mailbox commands for callback */ 808 /* Get a Mailbox buffer to setup mailbox commands for callback */
591 if ((pmb = phba->sli.mbox_active)) { 809 do {
592 pmbox = &pmb->mb; 810 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
593 mbox = &phba->slim2p->mbx; 811 if (pmb == NULL)
812 break;
594 813
595 /* First check out the status word */ 814 pmbox = &pmb->mb;
596 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
597 815
598 /* Sanity check to ensure the host owns the mailbox */ 816 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
599 if (pmbox->mbxOwner != OWN_HOST) { 817 if (pmb->vport) {
600 /* Lets try for a while */ 818 lpfc_debugfs_disc_trc(pmb->vport,
601 for (i = 0; i < 10240; i++) { 819 LPFC_DISC_TRC_MBOX_VPORT,
602 /* First copy command data */ 820 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
603 lpfc_sli_pcimem_bcopy(mbox, pmbox, 821 (uint32_t)pmbox->mbxCommand,
604 sizeof (uint32_t)); 822 pmbox->un.varWords[0],
605 if (pmbox->mbxOwner == OWN_HOST) 823 pmbox->un.varWords[1]);
606 goto mbout; 824 }
825 else {
826 lpfc_debugfs_disc_trc(phba->pport,
827 LPFC_DISC_TRC_MBOX,
828 "MBOX cmpl: cmd:x%x mb:x%x x%x",
829 (uint32_t)pmbox->mbxCommand,
830 pmbox->un.varWords[0],
831 pmbox->un.varWords[1]);
607 } 832 }
608 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
609 <status> */
610 lpfc_printf_log(phba,
611 KERN_WARNING,
612 LOG_MBOX | LOG_SLI,
613 "%d:0304 Stray Mailbox Interrupt "
614 "mbxCommand x%x mbxStatus x%x\n",
615 phba->brd_no,
616 pmbox->mbxCommand,
617 pmbox->mbxStatus);
618
619 spin_lock_irq(phba->host->host_lock);
620 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
621 spin_unlock_irq(phba->host->host_lock);
622 return (1);
623 } 833 }
624 834
625 mbout:
626 del_timer_sync(&phba->sli.mbox_tmo);
627 phba->work_hba_events &= ~WORKER_MBOX_TMO;
628
629 /* 835 /*
630 * It is a fatal error if unknown mbox command completion. 836 * It is a fatal error if unknown mbox command completion.
631 */ 837 */
@@ -633,51 +839,50 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
633 MBX_SHUTDOWN) { 839 MBX_SHUTDOWN) {
634 840
635 /* Unknow mailbox command compl */ 841 /* Unknow mailbox command compl */
636 lpfc_printf_log(phba, 842 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
637 KERN_ERR, 843 "%d (%d):0323 Unknown Mailbox command "
638 LOG_MBOX | LOG_SLI, 844 "%x Cmpl\n",
639 "%d:0323 Unknown Mailbox command %x Cmpl\n", 845 phba->brd_no,
640 phba->brd_no, 846 pmb->vport ? pmb->vport->vpi : 0,
641 pmbox->mbxCommand); 847 pmbox->mbxCommand);
642 phba->hba_state = LPFC_HBA_ERROR; 848 phba->link_state = LPFC_HBA_ERROR;
643 phba->work_hs = HS_FFER3; 849 phba->work_hs = HS_FFER3;
644 lpfc_handle_eratt(phba); 850 lpfc_handle_eratt(phba);
645 return (0); 851 continue;
646 } 852 }
647 853
648 phba->sli.mbox_active = NULL;
649 if (pmbox->mbxStatus) { 854 if (pmbox->mbxStatus) {
650 phba->sli.slistat.mbox_stat_err++; 855 phba->sli.slistat.mbox_stat_err++;
651 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 856 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
652 /* Mbox cmd cmpl error - RETRYing */ 857 /* Mbox cmd cmpl error - RETRYing */
653 lpfc_printf_log(phba, 858 lpfc_printf_log(phba, KERN_INFO,
654 KERN_INFO, 859 LOG_MBOX | LOG_SLI,
655 LOG_MBOX | LOG_SLI, 860 "%d (%d):0305 Mbox cmd cmpl "
656 "%d:0305 Mbox cmd cmpl error - " 861 "error - RETRYing Data: x%x "
657 "RETRYing Data: x%x x%x x%x x%x\n", 862 "x%x x%x x%x\n",
658 phba->brd_no, 863 phba->brd_no,
659 pmbox->mbxCommand, 864 pmb->vport ? pmb->vport->vpi :0,
660 pmbox->mbxStatus, 865 pmbox->mbxCommand,
661 pmbox->un.varWords[0], 866 pmbox->mbxStatus,
662 phba->hba_state); 867 pmbox->un.varWords[0],
868 pmb->vport->port_state);
663 pmbox->mbxStatus = 0; 869 pmbox->mbxStatus = 0;
664 pmbox->mbxOwner = OWN_HOST; 870 pmbox->mbxOwner = OWN_HOST;
665 spin_lock_irq(phba->host->host_lock); 871 spin_lock_irq(&phba->hbalock);
666 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 872 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
667 spin_unlock_irq(phba->host->host_lock); 873 spin_unlock_irq(&phba->hbalock);
668 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 874 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
669 if (rc == MBX_SUCCESS) 875 if (rc == MBX_SUCCESS)
670 return (0); 876 continue;
671 } 877 }
672 } 878 }
673 879
674 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 880 /* Mailbox cmd <cmd> Cmpl <cmpl> */
675 lpfc_printf_log(phba, 881 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
676 KERN_INFO, 882 "%d (%d):0307 Mailbox cmd x%x Cmpl x%p "
677 LOG_MBOX | LOG_SLI,
678 "%d:0307 Mailbox cmd x%x Cmpl x%p "
679 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 883 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
680 phba->brd_no, 884 phba->brd_no,
885 pmb->vport ? pmb->vport->vpi : 0,
681 pmbox->mbxCommand, 886 pmbox->mbxCommand,
682 pmb->mbox_cmpl, 887 pmb->mbox_cmpl,
683 *((uint32_t *) pmbox), 888 *((uint32_t *) pmbox),
@@ -690,39 +895,35 @@ lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
690 pmbox->un.varWords[6], 895 pmbox->un.varWords[6],
691 pmbox->un.varWords[7]); 896 pmbox->un.varWords[7]);
692 897
693 if (pmb->mbox_cmpl) { 898 if (pmb->mbox_cmpl)
694 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
695 pmb->mbox_cmpl(phba,pmb); 899 pmb->mbox_cmpl(phba,pmb);
696 } 900 } while (1);
697 } 901 return 0;
698 902}
699
700 do {
701 process_next = 0; /* by default don't loop */
702 spin_lock_irq(phba->host->host_lock);
703 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
704
705 /* Process next mailbox command if there is one */
706 if ((pmb = lpfc_mbox_get(phba))) {
707 spin_unlock_irq(phba->host->host_lock);
708 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
709 if (rc == MBX_NOT_FINISHED) {
710 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
711 pmb->mbox_cmpl(phba,pmb);
712 process_next = 1;
713 continue; /* loop back */
714 }
715 } else {
716 spin_unlock_irq(phba->host->host_lock);
717 /* Turn on IOCB processing */
718 for (i = 0; i < phba->sli.num_rings; i++)
719 lpfc_sli_turn_on_ring(phba, i);
720 }
721
722 } while (process_next);
723 903
724 return (0); 904static struct lpfc_dmabuf *
905lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
906{
907 struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
908
909 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
910 if (hbq_entry == NULL)
911 return NULL;
912 list_del(&hbq_entry->dbuf.list);
913 new_hbq_entry = kmalloc(sizeof(struct hbq_dmabuf), GFP_ATOMIC);
914 if (new_hbq_entry == NULL)
915 return &hbq_entry->dbuf;
916 new_hbq_entry->dbuf = hbq_entry->dbuf;
917 new_hbq_entry->tag = -1;
918 hbq_entry->dbuf.virt = lpfc_hbq_alloc(phba, 0, &hbq_entry->dbuf.phys);
919 if (hbq_entry->dbuf.virt == NULL) {
920 kfree(new_hbq_entry);
921 return &hbq_entry->dbuf;
922 }
923 lpfc_sli_free_hbq(phba, hbq_entry);
924 return &new_hbq_entry->dbuf;
725} 925}
926
726static int 927static int
727lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 928lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
728 struct lpfc_iocbq *saveq) 929 struct lpfc_iocbq *saveq)
@@ -735,7 +936,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
735 match = 0; 936 match = 0;
736 irsp = &(saveq->iocb); 937 irsp = &(saveq->iocb);
737 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 938 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
738 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { 939 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
940 || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
941 || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
739 Rctl = FC_ELS_REQ; 942 Rctl = FC_ELS_REQ;
740 Type = FC_ELS_DATA; 943 Type = FC_ELS_DATA;
741 } else { 944 } else {
@@ -747,13 +950,24 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
747 950
748 /* Firmware Workaround */ 951 /* Firmware Workaround */
749 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 952 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
750 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { 953 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
954 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
751 Rctl = FC_ELS_REQ; 955 Rctl = FC_ELS_REQ;
752 Type = FC_ELS_DATA; 956 Type = FC_ELS_DATA;
753 w5p->hcsw.Rctl = Rctl; 957 w5p->hcsw.Rctl = Rctl;
754 w5p->hcsw.Type = Type; 958 w5p->hcsw.Type = Type;
755 } 959 }
756 } 960 }
961
962 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
963 if (irsp->ulpBdeCount != 0)
964 saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
965 irsp->un.ulpWord[3]);
966 if (irsp->ulpBdeCount == 2)
967 saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
968 irsp->un.ulpWord[15]);
969 }
970
757 /* unSolicited Responses */ 971 /* unSolicited Responses */
758 if (pring->prt[0].profile) { 972 if (pring->prt[0].profile) {
759 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 973 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -781,23 +995,21 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
781 /* Unexpected Rctl / Type received */ 995 /* Unexpected Rctl / Type received */
782 /* Ring <ringno> handler: unexpected 996 /* Ring <ringno> handler: unexpected
783 Rctl <Rctl> Type <Type> received */ 997 Rctl <Rctl> Type <Type> received */
784 lpfc_printf_log(phba, 998 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
785 KERN_WARNING,
786 LOG_SLI,
787 "%d:0313 Ring %d handler: unexpected Rctl x%x " 999 "%d:0313 Ring %d handler: unexpected Rctl x%x "
788 "Type x%x received \n", 1000 "Type x%x received\n",
789 phba->brd_no, 1001 phba->brd_no,
790 pring->ringno, 1002 pring->ringno,
791 Rctl, 1003 Rctl,
792 Type); 1004 Type);
793 } 1005 }
794 return(1); 1006 return 1;
795} 1007}
796 1008
797static struct lpfc_iocbq * 1009static struct lpfc_iocbq *
798lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, 1010lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
799 struct lpfc_sli_ring * pring, 1011 struct lpfc_sli_ring *pring,
800 struct lpfc_iocbq * prspiocb) 1012 struct lpfc_iocbq *prspiocb)
801{ 1013{
802 struct lpfc_iocbq *cmd_iocb = NULL; 1014 struct lpfc_iocbq *cmd_iocb = NULL;
803 uint16_t iotag; 1015 uint16_t iotag;
@@ -806,7 +1018,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
806 1018
807 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1019 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
808 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1020 cmd_iocb = phba->sli.iocbq_lookup[iotag];
809 list_del(&cmd_iocb->list); 1021 list_del_init(&cmd_iocb->list);
810 pring->txcmplq_cnt--; 1022 pring->txcmplq_cnt--;
811 return cmd_iocb; 1023 return cmd_iocb;
812 } 1024 }
@@ -821,16 +1033,18 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
821} 1033}
822 1034
823static int 1035static int
824lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 1036lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
825 struct lpfc_iocbq *saveq) 1037 struct lpfc_iocbq *saveq)
826{ 1038{
827 struct lpfc_iocbq * cmdiocbp; 1039 struct lpfc_iocbq *cmdiocbp;
828 int rc = 1; 1040 int rc = 1;
829 unsigned long iflag; 1041 unsigned long iflag;
830 1042
831 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 1043 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
832 spin_lock_irqsave(phba->host->host_lock, iflag); 1044 spin_lock_irqsave(&phba->hbalock, iflag);
833 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 1045 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1046 spin_unlock_irqrestore(&phba->hbalock, iflag);
1047
834 if (cmdiocbp) { 1048 if (cmdiocbp) {
835 if (cmdiocbp->iocb_cmpl) { 1049 if (cmdiocbp->iocb_cmpl) {
836 /* 1050 /*
@@ -846,17 +1060,8 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
846 saveq->iocb.un.ulpWord[4] = 1060 saveq->iocb.un.ulpWord[4] =
847 IOERR_SLI_ABORTED; 1061 IOERR_SLI_ABORTED;
848 } 1062 }
849 spin_unlock_irqrestore(phba->host->host_lock,
850 iflag);
851 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
852 spin_lock_irqsave(phba->host->host_lock, iflag);
853 }
854 else {
855 spin_unlock_irqrestore(phba->host->host_lock,
856 iflag);
857 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
858 spin_lock_irqsave(phba->host->host_lock, iflag);
859 } 1063 }
1064 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
860 } else 1065 } else
861 lpfc_sli_release_iocbq(phba, cmdiocbp); 1066 lpfc_sli_release_iocbq(phba, cmdiocbp);
862 } else { 1067 } else {
@@ -870,29 +1075,30 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
870 * Ring <ringno> handler: unexpected completion IoTag 1075 * Ring <ringno> handler: unexpected completion IoTag
871 * <IoTag> 1076 * <IoTag>
872 */ 1077 */
873 lpfc_printf_log(phba, 1078 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
874 KERN_WARNING, 1079 "%d (%d):0322 Ring %d handler: "
875 LOG_SLI, 1080 "unexpected completion IoTag x%x "
876 "%d:0322 Ring %d handler: unexpected " 1081 "Data: x%x x%x x%x x%x\n",
877 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 1082 phba->brd_no,
878 phba->brd_no, 1083 cmdiocbp->vport->vpi,
879 pring->ringno, 1084 pring->ringno,
880 saveq->iocb.ulpIoTag, 1085 saveq->iocb.ulpIoTag,
881 saveq->iocb.ulpStatus, 1086 saveq->iocb.ulpStatus,
882 saveq->iocb.un.ulpWord[4], 1087 saveq->iocb.un.ulpWord[4],
883 saveq->iocb.ulpCommand, 1088 saveq->iocb.ulpCommand,
884 saveq->iocb.ulpContext); 1089 saveq->iocb.ulpContext);
885 } 1090 }
886 } 1091 }
887 1092
888 spin_unlock_irqrestore(phba->host->host_lock, iflag);
889 return rc; 1093 return rc;
890} 1094}
891 1095
892static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, 1096static void
893 struct lpfc_sli_ring * pring) 1097lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
894{ 1098{
895 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1099 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1100 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1101 &phba->slim2p->mbx.us.s2.port[pring->ringno];
896 /* 1102 /*
897 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1103 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
898 * rsp ring <portRspMax> 1104 * rsp ring <portRspMax>
@@ -904,7 +1110,7 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
904 le32_to_cpu(pgp->rspPutInx), 1110 le32_to_cpu(pgp->rspPutInx),
905 pring->numRiocb); 1111 pring->numRiocb);
906 1112
907 phba->hba_state = LPFC_HBA_ERROR; 1113 phba->link_state = LPFC_HBA_ERROR;
908 1114
909 /* 1115 /*
910 * All error attention handlers are posted to 1116 * All error attention handlers are posted to
@@ -912,16 +1118,18 @@ static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
912 */ 1118 */
913 phba->work_ha |= HA_ERATT; 1119 phba->work_ha |= HA_ERATT;
914 phba->work_hs = HS_FFER3; 1120 phba->work_hs = HS_FFER3;
1121
1122 /* hbalock should already be held */
915 if (phba->work_wait) 1123 if (phba->work_wait)
916 wake_up(phba->work_wait); 1124 lpfc_worker_wake_up(phba);
917 1125
918 return; 1126 return;
919} 1127}
920 1128
921void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) 1129void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
922{ 1130{
923 struct lpfc_sli * psli = &phba->sli; 1131 struct lpfc_sli *psli = &phba->sli;
924 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING]; 1132 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
925 IOCB_t *irsp = NULL; 1133 IOCB_t *irsp = NULL;
926 IOCB_t *entry = NULL; 1134 IOCB_t *entry = NULL;
927 struct lpfc_iocbq *cmdiocbq = NULL; 1135 struct lpfc_iocbq *cmdiocbq = NULL;
@@ -931,13 +1139,15 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
931 uint32_t portRspPut, portRspMax; 1139 uint32_t portRspPut, portRspMax;
932 int type; 1140 int type;
933 uint32_t rsp_cmpl = 0; 1141 uint32_t rsp_cmpl = 0;
934 void __iomem *to_slim;
935 uint32_t ha_copy; 1142 uint32_t ha_copy;
1143 unsigned long iflags;
936 1144
937 pring->stats.iocb_event++; 1145 pring->stats.iocb_event++;
938 1146
939 /* The driver assumes SLI-2 mode */ 1147 pgp = (phba->sli_rev == 3) ?
940 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1148 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1149 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1150
941 1151
942 /* 1152 /*
943 * The next available response entry should never exceed the maximum 1153 * The next available response entry should never exceed the maximum
@@ -952,15 +1162,13 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
952 1162
953 rmb(); 1163 rmb();
954 while (pring->rspidx != portRspPut) { 1164 while (pring->rspidx != portRspPut) {
955 1165 entry = lpfc_resp_iocb(phba, pring);
956 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
957
958 if (++pring->rspidx >= portRspMax) 1166 if (++pring->rspidx >= portRspMax)
959 pring->rspidx = 0; 1167 pring->rspidx = 0;
960 1168
961 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1169 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
962 (uint32_t *) &rspiocbq.iocb, 1170 (uint32_t *) &rspiocbq.iocb,
963 sizeof (IOCB_t)); 1171 phba->iocb_rsp_size);
964 irsp = &rspiocbq.iocb; 1172 irsp = &rspiocbq.iocb;
965 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1173 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
966 pring->stats.iocb_rsp++; 1174 pring->stats.iocb_rsp++;
@@ -998,8 +1206,10 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
998 break; 1206 break;
999 } 1207 }
1000 1208
1209 spin_lock_irqsave(&phba->hbalock, iflags);
1001 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1210 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1002 &rspiocbq); 1211 &rspiocbq);
1212 spin_unlock_irqrestore(&phba->hbalock, iflags);
1003 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1213 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1004 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1214 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1005 &rspiocbq); 1215 &rspiocbq);
@@ -1033,9 +1243,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1033 * been updated, sync the pgp->rspPutInx and fetch the new port 1243 * been updated, sync the pgp->rspPutInx and fetch the new port
1034 * response put pointer. 1244 * response put pointer.
1035 */ 1245 */
1036 to_slim = phba->MBslimaddr + 1246 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1037 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1038 writeb(pring->rspidx, to_slim);
1039 1247
1040 if (pring->rspidx == portRspPut) 1248 if (pring->rspidx == portRspPut)
1041 portRspPut = le32_to_cpu(pgp->rspPutInx); 1249 portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1045,13 +1253,16 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1045 ha_copy >>= (LPFC_FCP_RING * 4); 1253 ha_copy >>= (LPFC_FCP_RING * 4);
1046 1254
1047 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1255 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1256 spin_lock_irqsave(&phba->hbalock, iflags);
1048 pring->stats.iocb_rsp_full++; 1257 pring->stats.iocb_rsp_full++;
1049 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1258 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1050 writel(status, phba->CAregaddr); 1259 writel(status, phba->CAregaddr);
1051 readl(phba->CAregaddr); 1260 readl(phba->CAregaddr);
1261 spin_unlock_irqrestore(&phba->hbalock, iflags);
1052 } 1262 }
1053 if ((ha_copy & HA_R0CE_RSP) && 1263 if ((ha_copy & HA_R0CE_RSP) &&
1054 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1264 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1265 spin_lock_irqsave(&phba->hbalock, iflags);
1055 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1266 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1056 pring->stats.iocb_cmd_empty++; 1267 pring->stats.iocb_cmd_empty++;
1057 1268
@@ -1062,6 +1273,7 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1062 if ((pring->lpfc_sli_cmd_available)) 1273 if ((pring->lpfc_sli_cmd_available))
1063 (pring->lpfc_sli_cmd_available) (phba, pring); 1274 (pring->lpfc_sli_cmd_available) (phba, pring);
1064 1275
1276 spin_unlock_irqrestore(&phba->hbalock, iflags);
1065 } 1277 }
1066 1278
1067 return; 1279 return;
@@ -1072,10 +1284,12 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
1072 * to check it explicitly. 1284 * to check it explicitly.
1073 */ 1285 */
1074static int 1286static int
1075lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, 1287lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1076 struct lpfc_sli_ring * pring, uint32_t mask) 1288 struct lpfc_sli_ring *pring, uint32_t mask)
1077{ 1289{
1078 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1290 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1291 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1292 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1079 IOCB_t *irsp = NULL; 1293 IOCB_t *irsp = NULL;
1080 IOCB_t *entry = NULL; 1294 IOCB_t *entry = NULL;
1081 struct lpfc_iocbq *cmdiocbq = NULL; 1295 struct lpfc_iocbq *cmdiocbq = NULL;
@@ -1086,9 +1300,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1086 lpfc_iocb_type type; 1300 lpfc_iocb_type type;
1087 unsigned long iflag; 1301 unsigned long iflag;
1088 uint32_t rsp_cmpl = 0; 1302 uint32_t rsp_cmpl = 0;
1089 void __iomem *to_slim;
1090 1303
1091 spin_lock_irqsave(phba->host->host_lock, iflag); 1304 spin_lock_irqsave(&phba->hbalock, iflag);
1092 pring->stats.iocb_event++; 1305 pring->stats.iocb_event++;
1093 1306
1094 /* 1307 /*
@@ -1099,7 +1312,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1099 portRspPut = le32_to_cpu(pgp->rspPutInx); 1312 portRspPut = le32_to_cpu(pgp->rspPutInx);
1100 if (unlikely(portRspPut >= portRspMax)) { 1313 if (unlikely(portRspPut >= portRspMax)) {
1101 lpfc_sli_rsp_pointers_error(phba, pring); 1314 lpfc_sli_rsp_pointers_error(phba, pring);
1102 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1315 spin_unlock_irqrestore(&phba->hbalock, iflag);
1103 return 1; 1316 return 1;
1104 } 1317 }
1105 1318
@@ -1110,14 +1323,15 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1110 * structure. The copy involves a byte-swap since the 1323 * structure. The copy involves a byte-swap since the
1111 * network byte order and pci byte orders are different. 1324 * network byte order and pci byte orders are different.
1112 */ 1325 */
1113 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1326 entry = lpfc_resp_iocb(phba, pring);
1327 phba->last_completion_time = jiffies;
1114 1328
1115 if (++pring->rspidx >= portRspMax) 1329 if (++pring->rspidx >= portRspMax)
1116 pring->rspidx = 0; 1330 pring->rspidx = 0;
1117 1331
1118 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1332 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1119 (uint32_t *) &rspiocbq.iocb, 1333 (uint32_t *) &rspiocbq.iocb,
1120 sizeof (IOCB_t)); 1334 phba->iocb_rsp_size);
1121 INIT_LIST_HEAD(&(rspiocbq.list)); 1335 INIT_LIST_HEAD(&(rspiocbq.list));
1122 irsp = &rspiocbq.iocb; 1336 irsp = &rspiocbq.iocb;
1123 1337
@@ -1126,16 +1340,30 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1126 rsp_cmpl++; 1340 rsp_cmpl++;
1127 1341
1128 if (unlikely(irsp->ulpStatus)) { 1342 if (unlikely(irsp->ulpStatus)) {
1343 /*
1344 * If resource errors reported from HBA, reduce
1345 * queuedepths of the SCSI device.
1346 */
1347 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1348 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1349 spin_unlock_irqrestore(&phba->hbalock, iflag);
1350 lpfc_adjust_queue_depth(phba);
1351 spin_lock_irqsave(&phba->hbalock, iflag);
1352 }
1353
1129 /* Rsp ring <ringno> error: IOCB */ 1354 /* Rsp ring <ringno> error: IOCB */
1130 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1355 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1131 "%d:0336 Rsp Ring %d error: IOCB Data: " 1356 "%d:0336 Rsp Ring %d error: IOCB Data: "
1132 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1357 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1133 phba->brd_no, pring->ringno, 1358 phba->brd_no, pring->ringno,
1134 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1359 irsp->un.ulpWord[0],
1135 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1360 irsp->un.ulpWord[1],
1136 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1361 irsp->un.ulpWord[2],
1137 *(((uint32_t *) irsp) + 6), 1362 irsp->un.ulpWord[3],
1138 *(((uint32_t *) irsp) + 7)); 1363 irsp->un.ulpWord[4],
1364 irsp->un.ulpWord[5],
1365 *(((uint32_t *) irsp) + 6),
1366 *(((uint32_t *) irsp) + 7));
1139 } 1367 }
1140 1368
1141 switch (type) { 1369 switch (type) {
@@ -1149,7 +1377,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1149 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1377 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1150 "%d:0333 IOCB cmd 0x%x" 1378 "%d:0333 IOCB cmd 0x%x"
1151 " processed. Skipping" 1379 " processed. Skipping"
1152 " completion\n", phba->brd_no, 1380 " completion\n",
1381 phba->brd_no,
1153 irsp->ulpCommand); 1382 irsp->ulpCommand);
1154 break; 1383 break;
1155 } 1384 }
@@ -1161,19 +1390,19 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1161 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1390 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1162 &rspiocbq); 1391 &rspiocbq);
1163 } else { 1392 } else {
1164 spin_unlock_irqrestore( 1393 spin_unlock_irqrestore(&phba->hbalock,
1165 phba->host->host_lock, iflag); 1394 iflag);
1166 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1395 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1167 &rspiocbq); 1396 &rspiocbq);
1168 spin_lock_irqsave(phba->host->host_lock, 1397 spin_lock_irqsave(&phba->hbalock,
1169 iflag); 1398 iflag);
1170 } 1399 }
1171 } 1400 }
1172 break; 1401 break;
1173 case LPFC_UNSOL_IOCB: 1402 case LPFC_UNSOL_IOCB:
1174 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1403 spin_unlock_irqrestore(&phba->hbalock, iflag);
1175 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1404 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1176 spin_lock_irqsave(phba->host->host_lock, iflag); 1405 spin_lock_irqsave(&phba->hbalock, iflag);
1177 break; 1406 break;
1178 default: 1407 default:
1179 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1408 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
@@ -1186,11 +1415,13 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1186 } else { 1415 } else {
1187 /* Unknown IOCB command */ 1416 /* Unknown IOCB command */
1188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1189 "%d:0334 Unknown IOCB command " 1418 "%d:0334 Unknown IOCB command "
1190 "Data: x%x, x%x x%x x%x x%x\n", 1419 "Data: x%x, x%x x%x x%x x%x\n",
1191 phba->brd_no, type, irsp->ulpCommand, 1420 phba->brd_no, type,
1192 irsp->ulpStatus, irsp->ulpIoTag, 1421 irsp->ulpCommand,
1193 irsp->ulpContext); 1422 irsp->ulpStatus,
1423 irsp->ulpIoTag,
1424 irsp->ulpContext);
1194 } 1425 }
1195 break; 1426 break;
1196 } 1427 }
@@ -1201,9 +1432,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1201 * been updated, sync the pgp->rspPutInx and fetch the new port 1432 * been updated, sync the pgp->rspPutInx and fetch the new port
1202 * response put pointer. 1433 * response put pointer.
1203 */ 1434 */
1204 to_slim = phba->MBslimaddr + 1435 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1205 (SLIMOFF + (pring->ringno * 2) + 1) * 4;
1206 writel(pring->rspidx, to_slim);
1207 1436
1208 if (pring->rspidx == portRspPut) 1437 if (pring->rspidx == portRspPut)
1209 portRspPut = le32_to_cpu(pgp->rspPutInx); 1438 portRspPut = le32_to_cpu(pgp->rspPutInx);
@@ -1228,31 +1457,31 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1228 1457
1229 } 1458 }
1230 1459
1231 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1460 spin_unlock_irqrestore(&phba->hbalock, iflag);
1232 return rc; 1461 return rc;
1233} 1462}
1234 1463
1235
1236int 1464int
1237lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, 1465lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1238 struct lpfc_sli_ring * pring, uint32_t mask) 1466 struct lpfc_sli_ring *pring, uint32_t mask)
1239{ 1467{
1468 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1469 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1470 &phba->slim2p->mbx.us.s2.port[pring->ringno];
1240 IOCB_t *entry; 1471 IOCB_t *entry;
1241 IOCB_t *irsp = NULL; 1472 IOCB_t *irsp = NULL;
1242 struct lpfc_iocbq *rspiocbp = NULL; 1473 struct lpfc_iocbq *rspiocbp = NULL;
1243 struct lpfc_iocbq *next_iocb; 1474 struct lpfc_iocbq *next_iocb;
1244 struct lpfc_iocbq *cmdiocbp; 1475 struct lpfc_iocbq *cmdiocbp;
1245 struct lpfc_iocbq *saveq; 1476 struct lpfc_iocbq *saveq;
1246 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1247 uint8_t iocb_cmd_type; 1477 uint8_t iocb_cmd_type;
1248 lpfc_iocb_type type; 1478 lpfc_iocb_type type;
1249 uint32_t status, free_saveq; 1479 uint32_t status, free_saveq;
1250 uint32_t portRspPut, portRspMax; 1480 uint32_t portRspPut, portRspMax;
1251 int rc = 1; 1481 int rc = 1;
1252 unsigned long iflag; 1482 unsigned long iflag;
1253 void __iomem *to_slim;
1254 1483
1255 spin_lock_irqsave(phba->host->host_lock, iflag); 1484 spin_lock_irqsave(&phba->hbalock, iflag);
1256 pring->stats.iocb_event++; 1485 pring->stats.iocb_event++;
1257 1486
1258 /* 1487 /*
@@ -1266,16 +1495,14 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1266 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1495 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1267 * rsp ring <portRspMax> 1496 * rsp ring <portRspMax>
1268 */ 1497 */
1269 lpfc_printf_log(phba, 1498 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1270 KERN_ERR,
1271 LOG_SLI,
1272 "%d:0303 Ring %d handler: portRspPut %d " 1499 "%d:0303 Ring %d handler: portRspPut %d "
1273 "is bigger then rsp ring %d\n", 1500 "is bigger then rsp ring %d\n",
1274 phba->brd_no, 1501 phba->brd_no, pring->ringno, portRspPut,
1275 pring->ringno, portRspPut, portRspMax); 1502 portRspMax);
1276 1503
1277 phba->hba_state = LPFC_HBA_ERROR; 1504 phba->link_state = LPFC_HBA_ERROR;
1278 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1505 spin_unlock_irqrestore(&phba->hbalock, iflag);
1279 1506
1280 phba->work_hs = HS_FFER3; 1507 phba->work_hs = HS_FFER3;
1281 lpfc_handle_eratt(phba); 1508 lpfc_handle_eratt(phba);
@@ -1298,23 +1525,24 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1298 * the ulpLe field is set, the entire Command has been 1525 * the ulpLe field is set, the entire Command has been
1299 * received. 1526 * received.
1300 */ 1527 */
1301 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1528 entry = lpfc_resp_iocb(phba, pring);
1302 rspiocbp = lpfc_sli_get_iocbq(phba); 1529
1530 phba->last_completion_time = jiffies;
1531 rspiocbp = __lpfc_sli_get_iocbq(phba);
1303 if (rspiocbp == NULL) { 1532 if (rspiocbp == NULL) {
1304 printk(KERN_ERR "%s: out of buffers! Failing " 1533 printk(KERN_ERR "%s: out of buffers! Failing "
1305 "completion.\n", __FUNCTION__); 1534 "completion.\n", __FUNCTION__);
1306 break; 1535 break;
1307 } 1536 }
1308 1537
1309 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); 1538 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1539 phba->iocb_rsp_size);
1310 irsp = &rspiocbp->iocb; 1540 irsp = &rspiocbp->iocb;
1311 1541
1312 if (++pring->rspidx >= portRspMax) 1542 if (++pring->rspidx >= portRspMax)
1313 pring->rspidx = 0; 1543 pring->rspidx = 0;
1314 1544
1315 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1545 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1316 + 1) * 4;
1317 writel(pring->rspidx, to_slim);
1318 1546
1319 if (list_empty(&(pring->iocb_continueq))) { 1547 if (list_empty(&(pring->iocb_continueq))) {
1320 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1548 list_add(&rspiocbp->list, &(pring->iocb_continueq));
@@ -1338,23 +1566,44 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1338 1566
1339 pring->stats.iocb_rsp++; 1567 pring->stats.iocb_rsp++;
1340 1568
1569 /*
1570 * If resource errors reported from HBA, reduce
1571 * queuedepths of the SCSI device.
1572 */
1573 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1574 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1575 spin_unlock_irqrestore(&phba->hbalock, iflag);
1576 lpfc_adjust_queue_depth(phba);
1577 spin_lock_irqsave(&phba->hbalock, iflag);
1578 }
1579
1341 if (irsp->ulpStatus) { 1580 if (irsp->ulpStatus) {
1342 /* Rsp ring <ringno> error: IOCB */ 1581 /* Rsp ring <ringno> error: IOCB */
1343 lpfc_printf_log(phba, 1582 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1344 KERN_WARNING, 1583 "%d:0328 Rsp Ring %d error: "
1345 LOG_SLI, 1584 "IOCB Data: "
1346 "%d:0328 Rsp Ring %d error: IOCB Data: " 1585 "x%x x%x x%x x%x "
1347 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1586 "x%x x%x x%x x%x "
1348 phba->brd_no, 1587 "x%x x%x x%x x%x "
1349 pring->ringno, 1588 "x%x x%x x%x x%x\n",
1350 irsp->un.ulpWord[0], 1589 phba->brd_no,
1351 irsp->un.ulpWord[1], 1590 pring->ringno,
1352 irsp->un.ulpWord[2], 1591 irsp->un.ulpWord[0],
1353 irsp->un.ulpWord[3], 1592 irsp->un.ulpWord[1],
1354 irsp->un.ulpWord[4], 1593 irsp->un.ulpWord[2],
1355 irsp->un.ulpWord[5], 1594 irsp->un.ulpWord[3],
1356 *(((uint32_t *) irsp) + 6), 1595 irsp->un.ulpWord[4],
1357 *(((uint32_t *) irsp) + 7)); 1596 irsp->un.ulpWord[5],
1597 *(((uint32_t *) irsp) + 6),
1598 *(((uint32_t *) irsp) + 7),
1599 *(((uint32_t *) irsp) + 8),
1600 *(((uint32_t *) irsp) + 9),
1601 *(((uint32_t *) irsp) + 10),
1602 *(((uint32_t *) irsp) + 11),
1603 *(((uint32_t *) irsp) + 12),
1604 *(((uint32_t *) irsp) + 13),
1605 *(((uint32_t *) irsp) + 14),
1606 *(((uint32_t *) irsp) + 15));
1358 } 1607 }
1359 1608
1360 /* 1609 /*
@@ -1366,17 +1615,17 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1366 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1615 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1367 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1616 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1368 if (type == LPFC_SOL_IOCB) { 1617 if (type == LPFC_SOL_IOCB) {
1369 spin_unlock_irqrestore(phba->host->host_lock, 1618 spin_unlock_irqrestore(&phba->hbalock,
1370 iflag); 1619 iflag);
1371 rc = lpfc_sli_process_sol_iocb(phba, pring, 1620 rc = lpfc_sli_process_sol_iocb(phba, pring,
1372 saveq); 1621 saveq);
1373 spin_lock_irqsave(phba->host->host_lock, iflag); 1622 spin_lock_irqsave(&phba->hbalock, iflag);
1374 } else if (type == LPFC_UNSOL_IOCB) { 1623 } else if (type == LPFC_UNSOL_IOCB) {
1375 spin_unlock_irqrestore(phba->host->host_lock, 1624 spin_unlock_irqrestore(&phba->hbalock,
1376 iflag); 1625 iflag);
1377 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1626 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1378 saveq); 1627 saveq);
1379 spin_lock_irqsave(phba->host->host_lock, iflag); 1628 spin_lock_irqsave(&phba->hbalock, iflag);
1380 } else if (type == LPFC_ABORT_IOCB) { 1629 } else if (type == LPFC_ABORT_IOCB) {
1381 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1630 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1382 ((cmdiocbp = 1631 ((cmdiocbp =
@@ -1386,15 +1635,15 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1386 routine */ 1635 routine */
1387 if (cmdiocbp->iocb_cmpl) { 1636 if (cmdiocbp->iocb_cmpl) {
1388 spin_unlock_irqrestore( 1637 spin_unlock_irqrestore(
1389 phba->host->host_lock, 1638 &phba->hbalock,
1390 iflag); 1639 iflag);
1391 (cmdiocbp->iocb_cmpl) (phba, 1640 (cmdiocbp->iocb_cmpl) (phba,
1392 cmdiocbp, saveq); 1641 cmdiocbp, saveq);
1393 spin_lock_irqsave( 1642 spin_lock_irqsave(
1394 phba->host->host_lock, 1643 &phba->hbalock,
1395 iflag); 1644 iflag);
1396 } else 1645 } else
1397 lpfc_sli_release_iocbq(phba, 1646 __lpfc_sli_release_iocbq(phba,
1398 cmdiocbp); 1647 cmdiocbp);
1399 } 1648 }
1400 } else if (type == LPFC_UNKNOWN_IOCB) { 1649 } else if (type == LPFC_UNKNOWN_IOCB) {
@@ -1411,32 +1660,28 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1411 phba->brd_no, adaptermsg); 1660 phba->brd_no, adaptermsg);
1412 } else { 1661 } else {
1413 /* Unknown IOCB command */ 1662 /* Unknown IOCB command */
1414 lpfc_printf_log(phba, 1663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1415 KERN_ERR, 1664 "%d:0335 Unknown IOCB "
1416 LOG_SLI, 1665 "command Data: x%x "
1417 "%d:0335 Unknown IOCB command " 1666 "x%x x%x x%x\n",
1418 "Data: x%x x%x x%x x%x\n", 1667 phba->brd_no,
1419 phba->brd_no, 1668 irsp->ulpCommand,
1420 irsp->ulpCommand, 1669 irsp->ulpStatus,
1421 irsp->ulpStatus, 1670 irsp->ulpIoTag,
1422 irsp->ulpIoTag, 1671 irsp->ulpContext);
1423 irsp->ulpContext);
1424 } 1672 }
1425 } 1673 }
1426 1674
1427 if (free_saveq) { 1675 if (free_saveq) {
1428 if (!list_empty(&saveq->list)) { 1676 list_for_each_entry_safe(rspiocbp, next_iocb,
1429 list_for_each_entry_safe(rspiocbp, 1677 &saveq->list, list) {
1430 next_iocb, 1678 list_del(&rspiocbp->list);
1431 &saveq->list, 1679 __lpfc_sli_release_iocbq(phba,
1432 list) { 1680 rspiocbp);
1433 list_del(&rspiocbp->list);
1434 lpfc_sli_release_iocbq(phba,
1435 rspiocbp);
1436 }
1437 } 1681 }
1438 lpfc_sli_release_iocbq(phba, saveq); 1682 __lpfc_sli_release_iocbq(phba, saveq);
1439 } 1683 }
1684 rspiocbp = NULL;
1440 } 1685 }
1441 1686
1442 /* 1687 /*
@@ -1449,7 +1694,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1449 } 1694 }
1450 } /* while (pring->rspidx != portRspPut) */ 1695 } /* while (pring->rspidx != portRspPut) */
1451 1696
1452 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1697 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1453 /* At least one response entry has been freed */ 1698 /* At least one response entry has been freed */
1454 pring->stats.iocb_rsp_full++; 1699 pring->stats.iocb_rsp_full++;
1455 /* SET RxRE_RSP in Chip Att register */ 1700 /* SET RxRE_RSP in Chip Att register */
@@ -1470,24 +1715,25 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1470 1715
1471 } 1716 }
1472 1717
1473 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1718 spin_unlock_irqrestore(&phba->hbalock, iflag);
1474 return rc; 1719 return rc;
1475} 1720}
1476 1721
1477int 1722void
1478lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1723lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1479{ 1724{
1480 LIST_HEAD(completions); 1725 LIST_HEAD(completions);
1481 struct lpfc_iocbq *iocb, *next_iocb; 1726 struct lpfc_iocbq *iocb, *next_iocb;
1482 IOCB_t *cmd = NULL; 1727 IOCB_t *cmd = NULL;
1483 int errcnt;
1484 1728
1485 errcnt = 0; 1729 if (pring->ringno == LPFC_ELS_RING) {
1730 lpfc_fabric_abort_hba(phba);
1731 }
1486 1732
1487 /* Error everything on txq and txcmplq 1733 /* Error everything on txq and txcmplq
1488 * First do the txq. 1734 * First do the txq.
1489 */ 1735 */
1490 spin_lock_irq(phba->host->host_lock); 1736 spin_lock_irq(&phba->hbalock);
1491 list_splice_init(&pring->txq, &completions); 1737 list_splice_init(&pring->txq, &completions);
1492 pring->txq_cnt = 0; 1738 pring->txq_cnt = 0;
1493 1739
@@ -1495,26 +1741,25 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1495 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 1741 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1496 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1742 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1497 1743
1498 spin_unlock_irq(phba->host->host_lock); 1744 spin_unlock_irq(&phba->hbalock);
1499 1745
1500 while (!list_empty(&completions)) { 1746 while (!list_empty(&completions)) {
1501 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1747 iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1502 cmd = &iocb->iocb; 1748 cmd = &iocb->iocb;
1503 list_del(&iocb->list); 1749 list_del_init(&iocb->list);
1504 1750
1505 if (iocb->iocb_cmpl) { 1751 if (!iocb->iocb_cmpl)
1752 lpfc_sli_release_iocbq(phba, iocb);
1753 else {
1506 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1754 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1507 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1755 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1508 (iocb->iocb_cmpl) (phba, iocb, iocb); 1756 (iocb->iocb_cmpl) (phba, iocb, iocb);
1509 } else 1757 }
1510 lpfc_sli_release_iocbq(phba, iocb);
1511 } 1758 }
1512
1513 return errcnt;
1514} 1759}
1515 1760
1516int 1761int
1517lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) 1762lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1518{ 1763{
1519 uint32_t status; 1764 uint32_t status;
1520 int i = 0; 1765 int i = 0;
@@ -1541,7 +1786,8 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1541 msleep(2500); 1786 msleep(2500);
1542 1787
1543 if (i == 15) { 1788 if (i == 15) {
1544 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1789 /* Do post */
1790 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1545 lpfc_sli_brdrestart(phba); 1791 lpfc_sli_brdrestart(phba);
1546 } 1792 }
1547 /* Read the HBA Host Status Register */ 1793 /* Read the HBA Host Status Register */
@@ -1550,7 +1796,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1550 1796
1551 /* Check to see if any errors occurred during init */ 1797 /* Check to see if any errors occurred during init */
1552 if ((status & HS_FFERM) || (i >= 20)) { 1798 if ((status & HS_FFERM) || (i >= 20)) {
1553 phba->hba_state = LPFC_HBA_ERROR; 1799 phba->link_state = LPFC_HBA_ERROR;
1554 retval = 1; 1800 retval = 1;
1555 } 1801 }
1556 1802
@@ -1559,7 +1805,7 @@ lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1559 1805
1560#define BARRIER_TEST_PATTERN (0xdeadbeef) 1806#define BARRIER_TEST_PATTERN (0xdeadbeef)
1561 1807
1562void lpfc_reset_barrier(struct lpfc_hba * phba) 1808void lpfc_reset_barrier(struct lpfc_hba *phba)
1563{ 1809{
1564 uint32_t __iomem *resp_buf; 1810 uint32_t __iomem *resp_buf;
1565 uint32_t __iomem *mbox_buf; 1811 uint32_t __iomem *mbox_buf;
@@ -1584,12 +1830,12 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1584 hc_copy = readl(phba->HCregaddr); 1830 hc_copy = readl(phba->HCregaddr);
1585 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1831 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1586 readl(phba->HCregaddr); /* flush */ 1832 readl(phba->HCregaddr); /* flush */
1587 phba->fc_flag |= FC_IGNORE_ERATT; 1833 phba->link_flag |= LS_IGNORE_ERATT;
1588 1834
1589 if (readl(phba->HAregaddr) & HA_ERATT) { 1835 if (readl(phba->HAregaddr) & HA_ERATT) {
1590 /* Clear Chip error bit */ 1836 /* Clear Chip error bit */
1591 writel(HA_ERATT, phba->HAregaddr); 1837 writel(HA_ERATT, phba->HAregaddr);
1592 phba->stopped = 1; 1838 phba->pport->stopped = 1;
1593 } 1839 }
1594 1840
1595 mbox = 0; 1841 mbox = 0;
@@ -1606,7 +1852,7 @@ void lpfc_reset_barrier(struct lpfc_hba * phba)
1606 1852
1607 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 1853 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1608 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 1854 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1609 phba->stopped) 1855 phba->pport->stopped)
1610 goto restore_hc; 1856 goto restore_hc;
1611 else 1857 else
1612 goto clear_errat; 1858 goto clear_errat;
@@ -1623,17 +1869,17 @@ clear_errat:
1623 1869
1624 if (readl(phba->HAregaddr) & HA_ERATT) { 1870 if (readl(phba->HAregaddr) & HA_ERATT) {
1625 writel(HA_ERATT, phba->HAregaddr); 1871 writel(HA_ERATT, phba->HAregaddr);
1626 phba->stopped = 1; 1872 phba->pport->stopped = 1;
1627 } 1873 }
1628 1874
1629restore_hc: 1875restore_hc:
1630 phba->fc_flag &= ~FC_IGNORE_ERATT; 1876 phba->link_flag &= ~LS_IGNORE_ERATT;
1631 writel(hc_copy, phba->HCregaddr); 1877 writel(hc_copy, phba->HCregaddr);
1632 readl(phba->HCregaddr); /* flush */ 1878 readl(phba->HCregaddr); /* flush */
1633} 1879}
1634 1880
1635int 1881int
1636lpfc_sli_brdkill(struct lpfc_hba * phba) 1882lpfc_sli_brdkill(struct lpfc_hba *phba)
1637{ 1883{
1638 struct lpfc_sli *psli; 1884 struct lpfc_sli *psli;
1639 LPFC_MBOXQ_t *pmb; 1885 LPFC_MBOXQ_t *pmb;
@@ -1645,26 +1891,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1645 psli = &phba->sli; 1891 psli = &phba->sli;
1646 1892
1647 /* Kill HBA */ 1893 /* Kill HBA */
1648 lpfc_printf_log(phba, 1894 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1649 KERN_INFO, 1895 "%d:0329 Kill HBA Data: x%x x%x\n",
1650 LOG_SLI, 1896 phba->brd_no, phba->pport->port_state, psli->sli_flag);
1651 "%d:0329 Kill HBA Data: x%x x%x\n",
1652 phba->brd_no,
1653 phba->hba_state,
1654 psli->sli_flag);
1655 1897
1656 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1898 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1657 GFP_KERNEL)) == 0) 1899 GFP_KERNEL)) == 0)
1658 return 1; 1900 return 1;
1659 1901
1660 /* Disable the error attention */ 1902 /* Disable the error attention */
1661 spin_lock_irq(phba->host->host_lock); 1903 spin_lock_irq(&phba->hbalock);
1662 status = readl(phba->HCregaddr); 1904 status = readl(phba->HCregaddr);
1663 status &= ~HC_ERINT_ENA; 1905 status &= ~HC_ERINT_ENA;
1664 writel(status, phba->HCregaddr); 1906 writel(status, phba->HCregaddr);
1665 readl(phba->HCregaddr); /* flush */ 1907 readl(phba->HCregaddr); /* flush */
1666 phba->fc_flag |= FC_IGNORE_ERATT; 1908 phba->link_flag |= LS_IGNORE_ERATT;
1667 spin_unlock_irq(phba->host->host_lock); 1909 spin_unlock_irq(&phba->hbalock);
1668 1910
1669 lpfc_kill_board(phba, pmb); 1911 lpfc_kill_board(phba, pmb);
1670 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1912 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -1673,9 +1915,9 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1673 if (retval != MBX_SUCCESS) { 1915 if (retval != MBX_SUCCESS) {
1674 if (retval != MBX_BUSY) 1916 if (retval != MBX_BUSY)
1675 mempool_free(pmb, phba->mbox_mem_pool); 1917 mempool_free(pmb, phba->mbox_mem_pool);
1676 spin_lock_irq(phba->host->host_lock); 1918 spin_lock_irq(&phba->hbalock);
1677 phba->fc_flag &= ~FC_IGNORE_ERATT; 1919 phba->link_flag &= ~LS_IGNORE_ERATT;
1678 spin_unlock_irq(phba->host->host_lock); 1920 spin_unlock_irq(&phba->hbalock);
1679 return 1; 1921 return 1;
1680 } 1922 }
1681 1923
@@ -1698,22 +1940,22 @@ lpfc_sli_brdkill(struct lpfc_hba * phba)
1698 del_timer_sync(&psli->mbox_tmo); 1940 del_timer_sync(&psli->mbox_tmo);
1699 if (ha_copy & HA_ERATT) { 1941 if (ha_copy & HA_ERATT) {
1700 writel(HA_ERATT, phba->HAregaddr); 1942 writel(HA_ERATT, phba->HAregaddr);
1701 phba->stopped = 1; 1943 phba->pport->stopped = 1;
1702 } 1944 }
1703 spin_lock_irq(phba->host->host_lock); 1945 spin_lock_irq(&phba->hbalock);
1704 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1946 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1705 phba->fc_flag &= ~FC_IGNORE_ERATT; 1947 phba->link_flag &= ~LS_IGNORE_ERATT;
1706 spin_unlock_irq(phba->host->host_lock); 1948 spin_unlock_irq(&phba->hbalock);
1707 1949
1708 psli->mbox_active = NULL; 1950 psli->mbox_active = NULL;
1709 lpfc_hba_down_post(phba); 1951 lpfc_hba_down_post(phba);
1710 phba->hba_state = LPFC_HBA_ERROR; 1952 phba->link_state = LPFC_HBA_ERROR;
1711 1953
1712 return (ha_copy & HA_ERATT ? 0 : 1); 1954 return ha_copy & HA_ERATT ? 0 : 1;
1713} 1955}
1714 1956
1715int 1957int
1716lpfc_sli_brdreset(struct lpfc_hba * phba) 1958lpfc_sli_brdreset(struct lpfc_hba *phba)
1717{ 1959{
1718 struct lpfc_sli *psli; 1960 struct lpfc_sli *psli;
1719 struct lpfc_sli_ring *pring; 1961 struct lpfc_sli_ring *pring;
@@ -1725,12 +1967,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1725 /* Reset HBA */ 1967 /* Reset HBA */
1726 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1968 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1727 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no, 1969 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1728 phba->hba_state, psli->sli_flag); 1970 phba->pport->port_state, psli->sli_flag);
1729 1971
1730 /* perform board reset */ 1972 /* perform board reset */
1731 phba->fc_eventTag = 0; 1973 phba->fc_eventTag = 0;
1732 phba->fc_myDID = 0; 1974 phba->pport->fc_myDID = 0;
1733 phba->fc_prevDID = 0; 1975 phba->pport->fc_prevDID = 0;
1734 1976
1735 /* Turn off parity checking and serr during the physical reset */ 1977 /* Turn off parity checking and serr during the physical reset */
1736 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1978 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
@@ -1760,12 +2002,12 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
1760 pring->missbufcnt = 0; 2002 pring->missbufcnt = 0;
1761 } 2003 }
1762 2004
1763 phba->hba_state = LPFC_WARM_START; 2005 phba->link_state = LPFC_WARM_START;
1764 return 0; 2006 return 0;
1765} 2007}
1766 2008
1767int 2009int
1768lpfc_sli_brdrestart(struct lpfc_hba * phba) 2010lpfc_sli_brdrestart(struct lpfc_hba *phba)
1769{ 2011{
1770 MAILBOX_t *mb; 2012 MAILBOX_t *mb;
1771 struct lpfc_sli *psli; 2013 struct lpfc_sli *psli;
@@ -1773,14 +2015,14 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1773 volatile uint32_t word0; 2015 volatile uint32_t word0;
1774 void __iomem *to_slim; 2016 void __iomem *to_slim;
1775 2017
1776 spin_lock_irq(phba->host->host_lock); 2018 spin_lock_irq(&phba->hbalock);
1777 2019
1778 psli = &phba->sli; 2020 psli = &phba->sli;
1779 2021
1780 /* Restart HBA */ 2022 /* Restart HBA */
1781 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2023 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1782 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, 2024 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1783 phba->hba_state, psli->sli_flag); 2025 phba->pport->port_state, psli->sli_flag);
1784 2026
1785 word0 = 0; 2027 word0 = 0;
1786 mb = (MAILBOX_t *) &word0; 2028 mb = (MAILBOX_t *) &word0;
@@ -1794,7 +2036,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1794 readl(to_slim); /* flush */ 2036 readl(to_slim); /* flush */
1795 2037
1796 /* Only skip post after fc_ffinit is completed */ 2038 /* Only skip post after fc_ffinit is completed */
1797 if (phba->hba_state) { 2039 if (phba->pport->port_state) {
1798 skip_post = 1; 2040 skip_post = 1;
1799 word0 = 1; /* This is really setting up word1 */ 2041 word0 = 1; /* This is really setting up word1 */
1800 } else { 2042 } else {
@@ -1806,10 +2048,10 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
1806 readl(to_slim); /* flush */ 2048 readl(to_slim); /* flush */
1807 2049
1808 lpfc_sli_brdreset(phba); 2050 lpfc_sli_brdreset(phba);
1809 phba->stopped = 0; 2051 phba->pport->stopped = 0;
1810 phba->hba_state = LPFC_INIT_START; 2052 phba->link_state = LPFC_INIT_START;
1811 2053
1812 spin_unlock_irq(phba->host->host_lock); 2054 spin_unlock_irq(&phba->hbalock);
1813 2055
1814 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2056 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1815 psli->stats_start = get_seconds(); 2057 psli->stats_start = get_seconds();
@@ -1843,14 +2085,11 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1843 if (i++ >= 20) { 2085 if (i++ >= 20) {
1844 /* Adapter failed to init, timeout, status reg 2086 /* Adapter failed to init, timeout, status reg
1845 <status> */ 2087 <status> */
1846 lpfc_printf_log(phba, 2088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1847 KERN_ERR,
1848 LOG_INIT,
1849 "%d:0436 Adapter failed to init, " 2089 "%d:0436 Adapter failed to init, "
1850 "timeout, status reg x%x\n", 2090 "timeout, status reg x%x\n",
1851 phba->brd_no, 2091 phba->brd_no, status);
1852 status); 2092 phba->link_state = LPFC_HBA_ERROR;
1853 phba->hba_state = LPFC_HBA_ERROR;
1854 return -ETIMEDOUT; 2093 return -ETIMEDOUT;
1855 } 2094 }
1856 2095
@@ -1859,14 +2098,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1859 /* ERROR: During chipset initialization */ 2098 /* ERROR: During chipset initialization */
1860 /* Adapter failed to init, chipset, status reg 2099 /* Adapter failed to init, chipset, status reg
1861 <status> */ 2100 <status> */
1862 lpfc_printf_log(phba, 2101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1863 KERN_ERR,
1864 LOG_INIT,
1865 "%d:0437 Adapter failed to init, " 2102 "%d:0437 Adapter failed to init, "
1866 "chipset, status reg x%x\n", 2103 "chipset, status reg x%x\n",
1867 phba->brd_no, 2104 phba->brd_no,
1868 status); 2105 status);
1869 phba->hba_state = LPFC_HBA_ERROR; 2106 phba->link_state = LPFC_HBA_ERROR;
1870 return -EIO; 2107 return -EIO;
1871 } 2108 }
1872 2109
@@ -1879,7 +2116,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1879 } 2116 }
1880 2117
1881 if (i == 15) { 2118 if (i == 15) {
1882 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 2119 /* Do post */
2120 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1883 lpfc_sli_brdrestart(phba); 2121 lpfc_sli_brdrestart(phba);
1884 } 2122 }
1885 /* Read the HBA Host Status Register */ 2123 /* Read the HBA Host Status Register */
@@ -1890,14 +2128,12 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1890 if (status & HS_FFERM) { 2128 if (status & HS_FFERM) {
1891 /* ERROR: During chipset initialization */ 2129 /* ERROR: During chipset initialization */
1892 /* Adapter failed to init, chipset, status reg <status> */ 2130 /* Adapter failed to init, chipset, status reg <status> */
1893 lpfc_printf_log(phba, 2131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1894 KERN_ERR,
1895 LOG_INIT,
1896 "%d:0438 Adapter failed to init, chipset, " 2132 "%d:0438 Adapter failed to init, chipset, "
1897 "status reg x%x\n", 2133 "status reg x%x\n",
1898 phba->brd_no, 2134 phba->brd_no,
1899 status); 2135 status);
1900 phba->hba_state = LPFC_HBA_ERROR; 2136 phba->link_state = LPFC_HBA_ERROR;
1901 return -EIO; 2137 return -EIO;
1902 } 2138 }
1903 2139
@@ -1911,80 +2147,253 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
1911 return 0; 2147 return 0;
1912} 2148}
1913 2149
2150static int
2151lpfc_sli_hbq_count(void)
2152{
2153 return ARRAY_SIZE(lpfc_hbq_defs);
2154}
2155
2156static int
2157lpfc_sli_hbq_entry_count(void)
2158{
2159 int hbq_count = lpfc_sli_hbq_count();
2160 int count = 0;
2161 int i;
2162
2163 for (i = 0; i < hbq_count; ++i)
2164 count += lpfc_hbq_defs[i]->entry_count;
2165 return count;
2166}
2167
1914int 2168int
1915lpfc_sli_hba_setup(struct lpfc_hba * phba) 2169lpfc_sli_hbq_size(void)
2170{
2171 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2172}
2173
2174static int
2175lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2176{
2177 int hbq_count = lpfc_sli_hbq_count();
2178 LPFC_MBOXQ_t *pmb;
2179 MAILBOX_t *pmbox;
2180 uint32_t hbqno;
2181 uint32_t hbq_entry_index;
2182
2183 /* Get a Mailbox buffer to setup mailbox
2184 * commands for HBA initialization
2185 */
2186 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2187
2188 if (!pmb)
2189 return -ENOMEM;
2190
2191 pmbox = &pmb->mb;
2192
2193 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
2194 phba->link_state = LPFC_INIT_MBX_CMDS;
2195
2196 hbq_entry_index = 0;
2197 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2198 phba->hbqs[hbqno].next_hbqPutIdx = 0;
2199 phba->hbqs[hbqno].hbqPutIdx = 0;
2200 phba->hbqs[hbqno].local_hbqGetIdx = 0;
2201 phba->hbqs[hbqno].entry_count =
2202 lpfc_hbq_defs[hbqno]->entry_count;
2203 lpfc_config_hbq(phba, lpfc_hbq_defs[hbqno], hbq_entry_index,
2204 pmb);
2205 hbq_entry_index += phba->hbqs[hbqno].entry_count;
2206
2207 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2208 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2209 mbxStatus <status>, ring <num> */
2210
2211 lpfc_printf_log(phba, KERN_ERR,
2212 LOG_SLI | LOG_VPORT,
2213 "%d:1805 Adapter failed to init. "
2214 "Data: x%x x%x x%x\n",
2215 phba->brd_no, pmbox->mbxCommand,
2216 pmbox->mbxStatus, hbqno);
2217
2218 phba->link_state = LPFC_HBA_ERROR;
2219 mempool_free(pmb, phba->mbox_mem_pool);
2220 return ENXIO;
2221 }
2222 }
2223 phba->hbq_count = hbq_count;
2224
2225 mempool_free(pmb, phba->mbox_mem_pool);
2226
2227 /* Initially populate or replenish the HBQs */
2228 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2229 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2230 return -ENOMEM;
2231 }
2232 return 0;
2233}
2234
2235static int
2236lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
1916{ 2237{
1917 LPFC_MBOXQ_t *pmb; 2238 LPFC_MBOXQ_t *pmb;
1918 uint32_t resetcount = 0, rc = 0, done = 0; 2239 uint32_t resetcount = 0, rc = 0, done = 0;
1919 2240
1920 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2241 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1921 if (!pmb) { 2242 if (!pmb) {
1922 phba->hba_state = LPFC_HBA_ERROR; 2243 phba->link_state = LPFC_HBA_ERROR;
1923 return -ENOMEM; 2244 return -ENOMEM;
1924 } 2245 }
1925 2246
2247 phba->sli_rev = sli_mode;
1926 while (resetcount < 2 && !done) { 2248 while (resetcount < 2 && !done) {
1927 spin_lock_irq(phba->host->host_lock); 2249 spin_lock_irq(&phba->hbalock);
1928 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2250 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1929 spin_unlock_irq(phba->host->host_lock); 2251 spin_unlock_irq(&phba->hbalock);
1930 phba->hba_state = LPFC_STATE_UNKNOWN; 2252 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1931 lpfc_sli_brdrestart(phba); 2253 lpfc_sli_brdrestart(phba);
1932 msleep(2500); 2254 msleep(2500);
1933 rc = lpfc_sli_chipset_init(phba); 2255 rc = lpfc_sli_chipset_init(phba);
1934 if (rc) 2256 if (rc)
1935 break; 2257 break;
1936 2258
1937 spin_lock_irq(phba->host->host_lock); 2259 spin_lock_irq(&phba->hbalock);
1938 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2260 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1939 spin_unlock_irq(phba->host->host_lock); 2261 spin_unlock_irq(&phba->hbalock);
1940 resetcount++; 2262 resetcount++;
1941 2263
1942 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 2264 /* Call pre CONFIG_PORT mailbox command initialization. A
1943 * means the call was successful. Any other nonzero value is a failure, 2265 * value of 0 means the call was successful. Any other
1944 * but if ERESTART is returned, the driver may reset the HBA and try 2266 * nonzero value is a failure, but if ERESTART is returned,
1945 * again. 2267 * the driver may reset the HBA and try again.
1946 */ 2268 */
1947 rc = lpfc_config_port_prep(phba); 2269 rc = lpfc_config_port_prep(phba);
1948 if (rc == -ERESTART) { 2270 if (rc == -ERESTART) {
1949 phba->hba_state = 0; 2271 phba->link_state = LPFC_LINK_UNKNOWN;
1950 continue; 2272 continue;
1951 } else if (rc) { 2273 } else if (rc) {
1952 break; 2274 break;
1953 } 2275 }
1954 2276
1955 phba->hba_state = LPFC_INIT_MBX_CMDS; 2277 phba->link_state = LPFC_INIT_MBX_CMDS;
1956 lpfc_config_port(phba, pmb); 2278 lpfc_config_port(phba, pmb);
1957 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2279 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1958 if (rc == MBX_SUCCESS) 2280 if (rc != MBX_SUCCESS) {
1959 done = 1;
1960 else {
1961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1962 "%d:0442 Adapter failed to init, mbxCmd x%x " 2282 "%d:0442 Adapter failed to init, mbxCmd x%x "
1963 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 2283 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1964 phba->brd_no, pmb->mb.mbxCommand, 2284 phba->brd_no, pmb->mb.mbxCommand,
1965 pmb->mb.mbxStatus, 0); 2285 pmb->mb.mbxStatus, 0);
2286 spin_lock_irq(&phba->hbalock);
1966 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2287 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2288 spin_unlock_irq(&phba->hbalock);
2289 rc = -ENXIO;
2290 } else {
2291 done = 1;
2292 phba->max_vpi = (phba->max_vpi &&
2293 pmb->mb.un.varCfgPort.gmv) != 0
2294 ? pmb->mb.un.varCfgPort.max_vpi
2295 : 0;
2296 }
2297 }
2298
2299 if (!done) {
2300 rc = -EINVAL;
2301 goto do_prep_failed;
2302 }
2303
2304 if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2305 (!pmb->mb.un.varCfgPort.cMA)) {
2306 rc = -ENXIO;
2307 goto do_prep_failed;
2308 }
2309 return rc;
2310
2311do_prep_failed:
2312 mempool_free(pmb, phba->mbox_mem_pool);
2313 return rc;
2314}
2315
2316int
2317lpfc_sli_hba_setup(struct lpfc_hba *phba)
2318{
2319 uint32_t rc;
2320 int mode = 3;
2321
2322 switch (lpfc_sli_mode) {
2323 case 2:
2324 if (phba->cfg_npiv_enable) {
2325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2326 "%d:1824 NPIV enabled: Override lpfc_sli_mode "
2327 "parameter (%d) to auto (0).\n",
2328 phba->brd_no, lpfc_sli_mode);
2329 break;
1967 } 2330 }
2331 mode = 2;
2332 break;
2333 case 0:
2334 case 3:
2335 break;
2336 default:
2337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2338 "%d:1819 Unrecognized lpfc_sli_mode "
2339 "parameter: %d.\n",
2340 phba->brd_no, lpfc_sli_mode);
2341
2342 break;
1968 } 2343 }
1969 if (!done) 2344
2345 rc = lpfc_do_config_port(phba, mode);
2346 if (rc && lpfc_sli_mode == 3)
2347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2348 "%d:1820 Unable to select SLI-3. "
2349 "Not supported by adapter.\n",
2350 phba->brd_no);
2351 if (rc && mode != 2)
2352 rc = lpfc_do_config_port(phba, 2);
2353 if (rc)
1970 goto lpfc_sli_hba_setup_error; 2354 goto lpfc_sli_hba_setup_error;
1971 2355
1972 rc = lpfc_sli_ring_map(phba, pmb); 2356 if (phba->sli_rev == 3) {
2357 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2358 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2359 phba->sli3_options |= LPFC_SLI3_ENABLED;
2360 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2361
2362 } else {
2363 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2364 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2365 phba->sli3_options = 0;
2366 }
2367
2368 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2369 "%d:0444 Firmware in SLI %x mode. Max_vpi %d\n",
2370 phba->brd_no, phba->sli_rev, phba->max_vpi);
2371 rc = lpfc_sli_ring_map(phba);
1973 2372
1974 if (rc) 2373 if (rc)
1975 goto lpfc_sli_hba_setup_error; 2374 goto lpfc_sli_hba_setup_error;
1976 2375
2376 /* Init HBQs */
2377
2378 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2379 rc = lpfc_sli_hbq_setup(phba);
2380 if (rc)
2381 goto lpfc_sli_hba_setup_error;
2382 }
2383
1977 phba->sli.sli_flag |= LPFC_PROCESS_LA; 2384 phba->sli.sli_flag |= LPFC_PROCESS_LA;
1978 2385
1979 rc = lpfc_config_port_post(phba); 2386 rc = lpfc_config_port_post(phba);
1980 if (rc) 2387 if (rc)
1981 goto lpfc_sli_hba_setup_error; 2388 goto lpfc_sli_hba_setup_error;
1982 2389
1983 goto lpfc_sli_hba_setup_exit; 2390 return rc;
2391
1984lpfc_sli_hba_setup_error: 2392lpfc_sli_hba_setup_error:
1985 phba->hba_state = LPFC_HBA_ERROR; 2393 phba->link_state = LPFC_HBA_ERROR;
1986lpfc_sli_hba_setup_exit: 2394 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1987 mempool_free(pmb, phba->mbox_mem_pool); 2395 "%d:0445 Firmware initialization failed\n",
2396 phba->brd_no);
1988 return rc; 2397 return rc;
1989} 2398}
1990 2399
@@ -2004,56 +2413,58 @@ lpfc_sli_hba_setup_exit:
2004void 2413void
2005lpfc_mbox_timeout(unsigned long ptr) 2414lpfc_mbox_timeout(unsigned long ptr)
2006{ 2415{
2007 struct lpfc_hba *phba; 2416 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2008 unsigned long iflag; 2417 unsigned long iflag;
2418 uint32_t tmo_posted;
2009 2419
2010 phba = (struct lpfc_hba *)ptr; 2420 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2011 spin_lock_irqsave(phba->host->host_lock, iflag); 2421 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2012 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2422 if (!tmo_posted)
2013 phba->work_hba_events |= WORKER_MBOX_TMO; 2423 phba->pport->work_port_events |= WORKER_MBOX_TMO;
2424 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2425
2426 if (!tmo_posted) {
2427 spin_lock_irqsave(&phba->hbalock, iflag);
2014 if (phba->work_wait) 2428 if (phba->work_wait)
2015 wake_up(phba->work_wait); 2429 lpfc_worker_wake_up(phba);
2430 spin_unlock_irqrestore(&phba->hbalock, iflag);
2016 } 2431 }
2017 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2018} 2432}
2019 2433
2020void 2434void
2021lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2435lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2022{ 2436{
2023 LPFC_MBOXQ_t *pmbox; 2437 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2024 MAILBOX_t *mb; 2438 MAILBOX_t *mb = &pmbox->mb;
2025 struct lpfc_sli *psli = &phba->sli; 2439 struct lpfc_sli *psli = &phba->sli;
2026 struct lpfc_sli_ring *pring; 2440 struct lpfc_sli_ring *pring;
2027 2441
2028 spin_lock_irq(phba->host->host_lock); 2442 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2029 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2030 spin_unlock_irq(phba->host->host_lock);
2031 return; 2443 return;
2032 } 2444 }
2033 2445
2034 pmbox = phba->sli.mbox_active;
2035 mb = &pmbox->mb;
2036
2037 /* Mbox cmd <mbxCommand> timeout */ 2446 /* Mbox cmd <mbxCommand> timeout */
2038 lpfc_printf_log(phba, 2447 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2039 KERN_ERR, 2448 "%d:0310 Mailbox command x%x timeout Data: x%x x%x "
2040 LOG_MBOX | LOG_SLI, 2449 "x%p\n",
2041 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2450 phba->brd_no,
2042 phba->brd_no, 2451 mb->mbxCommand,
2043 mb->mbxCommand, 2452 phba->pport->port_state,
2044 phba->hba_state, 2453 phba->sli.sli_flag,
2045 phba->sli.sli_flag, 2454 phba->sli.mbox_active);
2046 phba->sli.mbox_active);
2047 2455
2048 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2456 /* Setting state unknown so lpfc_sli_abort_iocb_ring
2049 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2457 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2050 * it to fail all oustanding SCSI IO. 2458 * it to fail all oustanding SCSI IO.
2051 */ 2459 */
2052 phba->hba_state = LPFC_STATE_UNKNOWN; 2460 spin_lock_irq(&phba->pport->work_port_lock);
2053 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2461 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2054 phba->fc_flag |= FC_ESTABLISH_LINK; 2462 spin_unlock_irq(&phba->pport->work_port_lock);
2463 spin_lock_irq(&phba->hbalock);
2464 phba->link_state = LPFC_LINK_UNKNOWN;
2465 phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2055 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2466 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2056 spin_unlock_irq(phba->host->host_lock); 2467 spin_unlock_irq(&phba->hbalock);
2057 2468
2058 pring = &psli->ring[psli->fcp_ring]; 2469 pring = &psli->ring[psli->fcp_ring];
2059 lpfc_sli_abort_iocb_ring(phba, pring); 2470 lpfc_sli_abort_iocb_ring(phba, pring);
@@ -2075,10 +2486,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2075} 2486}
2076 2487
2077int 2488int
2078lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) 2489lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2079{ 2490{
2080 MAILBOX_t *mb; 2491 MAILBOX_t *mb;
2081 struct lpfc_sli *psli; 2492 struct lpfc_sli *psli = &phba->sli;
2082 uint32_t status, evtctr; 2493 uint32_t status, evtctr;
2083 uint32_t ha_copy; 2494 uint32_t ha_copy;
2084 int i; 2495 int i;
@@ -2086,31 +2497,44 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2086 volatile uint32_t word0, ldata; 2497 volatile uint32_t word0, ldata;
2087 void __iomem *to_slim; 2498 void __iomem *to_slim;
2088 2499
2500 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2501 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2502 if(!pmbox->vport) {
2503 lpfc_printf_log(phba, KERN_ERR,
2504 LOG_MBOX | LOG_VPORT,
2505 "%d:1806 Mbox x%x failed. No vport\n",
2506 phba->brd_no,
2507 pmbox->mb.mbxCommand);
2508 dump_stack();
2509 return MBXERR_ERROR;
2510 }
2511 }
2512
2513
2089 /* If the PCI channel is in offline state, do not post mbox. */ 2514 /* If the PCI channel is in offline state, do not post mbox. */
2090 if (unlikely(pci_channel_offline(phba->pcidev))) 2515 if (unlikely(pci_channel_offline(phba->pcidev)))
2091 return MBX_NOT_FINISHED; 2516 return MBX_NOT_FINISHED;
2092 2517
2518 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2093 psli = &phba->sli; 2519 psli = &phba->sli;
2094 2520
2095 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2096
2097 2521
2098 mb = &pmbox->mb; 2522 mb = &pmbox->mb;
2099 status = MBX_SUCCESS; 2523 status = MBX_SUCCESS;
2100 2524
2101 if (phba->hba_state == LPFC_HBA_ERROR) { 2525 if (phba->link_state == LPFC_HBA_ERROR) {
2102 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2526 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2103 2527
2104 /* Mbox command <mbxCommand> cannot issue */ 2528 /* Mbox command <mbxCommand> cannot issue */
2105 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2529 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2106 return (MBX_NOT_FINISHED); 2530 return MBX_NOT_FINISHED;
2107 } 2531 }
2108 2532
2109 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2533 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2110 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2534 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2111 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2535 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2112 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2536 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2113 return (MBX_NOT_FINISHED); 2537 return MBX_NOT_FINISHED;
2114 } 2538 }
2115 2539
2116 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2540 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
@@ -2120,20 +2544,18 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2120 */ 2544 */
2121 2545
2122 if (flag & MBX_POLL) { 2546 if (flag & MBX_POLL) {
2123 spin_unlock_irqrestore(phba->host->host_lock, 2547 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2124 drvr_flag);
2125 2548
2126 /* Mbox command <mbxCommand> cannot issue */ 2549 /* Mbox command <mbxCommand> cannot issue */
2127 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2550 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2128 return (MBX_NOT_FINISHED); 2551 return MBX_NOT_FINISHED;
2129 } 2552 }
2130 2553
2131 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2554 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2132 spin_unlock_irqrestore(phba->host->host_lock, 2555 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2133 drvr_flag);
2134 /* Mbox command <mbxCommand> cannot issue */ 2556 /* Mbox command <mbxCommand> cannot issue */
2135 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2557 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2136 return (MBX_NOT_FINISHED); 2558 return MBX_NOT_FINISHED;
2137 } 2559 }
2138 2560
2139 /* Handle STOP IOCB processing flag. This is only meaningful 2561 /* Handle STOP IOCB processing flag. This is only meaningful
@@ -2157,21 +2579,33 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2157 lpfc_mbox_put(phba, pmbox); 2579 lpfc_mbox_put(phba, pmbox);
2158 2580
2159 /* Mbox cmd issue - BUSY */ 2581 /* Mbox cmd issue - BUSY */
2160 lpfc_printf_log(phba, 2582 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2161 KERN_INFO, 2583 "%d (%d):0308 Mbox cmd issue - BUSY Data: "
2162 LOG_MBOX | LOG_SLI, 2584 "x%x x%x x%x x%x\n",
2163 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2585 phba->brd_no,
2164 phba->brd_no, 2586 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2165 mb->mbxCommand, 2587 mb->mbxCommand, phba->pport->port_state,
2166 phba->hba_state, 2588 psli->sli_flag, flag);
2167 psli->sli_flag,
2168 flag);
2169 2589
2170 psli->slistat.mbox_busy++; 2590 psli->slistat.mbox_busy++;
2171 spin_unlock_irqrestore(phba->host->host_lock, 2591 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2172 drvr_flag); 2592
2593 if (pmbox->vport) {
2594 lpfc_debugfs_disc_trc(pmbox->vport,
2595 LPFC_DISC_TRC_MBOX_VPORT,
2596 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
2597 (uint32_t)mb->mbxCommand,
2598 mb->un.varWords[0], mb->un.varWords[1]);
2599 }
2600 else {
2601 lpfc_debugfs_disc_trc(phba->pport,
2602 LPFC_DISC_TRC_MBOX,
2603 "MBOX Bsy: cmd:x%x mb:x%x x%x",
2604 (uint32_t)mb->mbxCommand,
2605 mb->un.varWords[0], mb->un.varWords[1]);
2606 }
2173 2607
2174 return (MBX_BUSY); 2608 return MBX_BUSY;
2175 } 2609 }
2176 2610
2177 /* Handle STOP IOCB processing flag. This is only meaningful 2611 /* Handle STOP IOCB processing flag. This is only meaningful
@@ -2198,11 +2632,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2198 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2632 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2199 (mb->mbxCommand != MBX_KILL_BOARD)) { 2633 (mb->mbxCommand != MBX_KILL_BOARD)) {
2200 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2634 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2201 spin_unlock_irqrestore(phba->host->host_lock, 2635 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2202 drvr_flag);
2203 /* Mbox command <mbxCommand> cannot issue */ 2636 /* Mbox command <mbxCommand> cannot issue */
2204 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2637 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2205 return (MBX_NOT_FINISHED); 2638 return MBX_NOT_FINISHED;
2206 } 2639 }
2207 /* timeout active mbox command */ 2640 /* timeout active mbox command */
2208 mod_timer(&psli->mbox_tmo, (jiffies + 2641 mod_timer(&psli->mbox_tmo, (jiffies +
@@ -2210,15 +2643,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2210 } 2643 }
2211 2644
2212 /* Mailbox cmd <cmd> issue */ 2645 /* Mailbox cmd <cmd> issue */
2213 lpfc_printf_log(phba, 2646 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2214 KERN_INFO, 2647 "%d (%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2215 LOG_MBOX | LOG_SLI, 2648 "x%x\n",
2216 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2649 phba->brd_no, pmbox->vport ? pmbox->vport->vpi : 0,
2217 phba->brd_no, 2650 mb->mbxCommand, phba->pport->port_state,
2218 mb->mbxCommand, 2651 psli->sli_flag, flag);
2219 phba->hba_state, 2652
2220 psli->sli_flag, 2653 if (mb->mbxCommand != MBX_HEARTBEAT) {
2221 flag); 2654 if (pmbox->vport) {
2655 lpfc_debugfs_disc_trc(pmbox->vport,
2656 LPFC_DISC_TRC_MBOX_VPORT,
2657 "MBOX Send vport: cmd:x%x mb:x%x x%x",
2658 (uint32_t)mb->mbxCommand,
2659 mb->un.varWords[0], mb->un.varWords[1]);
2660 }
2661 else {
2662 lpfc_debugfs_disc_trc(phba->pport,
2663 LPFC_DISC_TRC_MBOX,
2664 "MBOX Send: cmd:x%x mb:x%x x%x",
2665 (uint32_t)mb->mbxCommand,
2666 mb->un.varWords[0], mb->un.varWords[1]);
2667 }
2668 }
2222 2669
2223 psli->slistat.mbox_cmd++; 2670 psli->slistat.mbox_cmd++;
2224 evtctr = psli->slistat.mbox_event; 2671 evtctr = psli->slistat.mbox_event;
@@ -2233,7 +2680,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2233 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2680 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2234 /* copy command data into host mbox for cmpl */ 2681 /* copy command data into host mbox for cmpl */
2235 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2682 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2236 MAILBOX_CMD_SIZE); 2683 MAILBOX_CMD_SIZE);
2237 } 2684 }
2238 2685
2239 /* First copy mbox command data to HBA SLIM, skip past first 2686 /* First copy mbox command data to HBA SLIM, skip past first
@@ -2285,12 +2732,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2285 /* Wait for command to complete */ 2732 /* Wait for command to complete */
2286 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2733 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2287 (!(ha_copy & HA_MBATT) && 2734 (!(ha_copy & HA_MBATT) &&
2288 (phba->hba_state > LPFC_WARM_START))) { 2735 (phba->link_state > LPFC_WARM_START))) {
2289 if (i-- <= 0) { 2736 if (i-- <= 0) {
2290 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2737 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2291 spin_unlock_irqrestore(phba->host->host_lock, 2738 spin_unlock_irqrestore(&phba->hbalock,
2292 drvr_flag); 2739 drvr_flag);
2293 return (MBX_NOT_FINISHED); 2740 return MBX_NOT_FINISHED;
2294 } 2741 }
2295 2742
2296 /* Check if we took a mbox interrupt while we were 2743 /* Check if we took a mbox interrupt while we were
@@ -2299,12 +2746,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2299 && (evtctr != psli->slistat.mbox_event)) 2746 && (evtctr != psli->slistat.mbox_event))
2300 break; 2747 break;
2301 2748
2302 spin_unlock_irqrestore(phba->host->host_lock, 2749 spin_unlock_irqrestore(&phba->hbalock,
2303 drvr_flag); 2750 drvr_flag);
2304 2751
2305 msleep(1); 2752 msleep(1);
2306 2753
2307 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2754 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2308 2755
2309 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2756 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2310 /* First copy command data */ 2757 /* First copy command data */
@@ -2335,7 +2782,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2335 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2782 if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2336 /* copy results back to user */ 2783 /* copy results back to user */
2337 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2784 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2338 MAILBOX_CMD_SIZE); 2785 MAILBOX_CMD_SIZE);
2339 } else { 2786 } else {
2340 /* First copy command data */ 2787 /* First copy command data */
2341 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2788 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
@@ -2355,23 +2802,25 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2355 status = mb->mbxStatus; 2802 status = mb->mbxStatus;
2356 } 2803 }
2357 2804
2358 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2805 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2359 return (status); 2806 return status;
2360} 2807}
2361 2808
2362static int 2809/*
2363lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2810 * Caller needs to hold lock.
2364 struct lpfc_iocbq * piocb) 2811 */
2812static void
2813__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2814 struct lpfc_iocbq *piocb)
2365{ 2815{
2366 /* Insert the caller's iocb in the txq tail for later processing. */ 2816 /* Insert the caller's iocb in the txq tail for later processing. */
2367 list_add_tail(&piocb->list, &pring->txq); 2817 list_add_tail(&piocb->list, &pring->txq);
2368 pring->txq_cnt++; 2818 pring->txq_cnt++;
2369 return (0);
2370} 2819}
2371 2820
2372static struct lpfc_iocbq * 2821static struct lpfc_iocbq *
2373lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2822lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2374 struct lpfc_iocbq ** piocb) 2823 struct lpfc_iocbq **piocb)
2375{ 2824{
2376 struct lpfc_iocbq * nextiocb; 2825 struct lpfc_iocbq * nextiocb;
2377 2826
@@ -2384,13 +2833,29 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2384 return nextiocb; 2833 return nextiocb;
2385} 2834}
2386 2835
2836/*
2837 * Lockless version of lpfc_sli_issue_iocb.
2838 */
2387int 2839int
2388lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2840__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2389 struct lpfc_iocbq *piocb, uint32_t flag) 2841 struct lpfc_iocbq *piocb, uint32_t flag)
2390{ 2842{
2391 struct lpfc_iocbq *nextiocb; 2843 struct lpfc_iocbq *nextiocb;
2392 IOCB_t *iocb; 2844 IOCB_t *iocb;
2393 2845
2846 if (piocb->iocb_cmpl && (!piocb->vport) &&
2847 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2848 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2849 lpfc_printf_log(phba, KERN_ERR,
2850 LOG_SLI | LOG_VPORT,
2851 "%d:1807 IOCB x%x failed. No vport\n",
2852 phba->brd_no,
2853 piocb->iocb.ulpCommand);
2854 dump_stack();
2855 return IOCB_ERROR;
2856 }
2857
2858
2394 /* If the PCI channel is in offline state, do not post iocbs. */ 2859 /* If the PCI channel is in offline state, do not post iocbs. */
2395 if (unlikely(pci_channel_offline(phba->pcidev))) 2860 if (unlikely(pci_channel_offline(phba->pcidev)))
2396 return IOCB_ERROR; 2861 return IOCB_ERROR;
@@ -2398,7 +2863,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2398 /* 2863 /*
2399 * We should never get an IOCB if we are in a < LINK_DOWN state 2864 * We should never get an IOCB if we are in a < LINK_DOWN state
2400 */ 2865 */
2401 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2866 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2402 return IOCB_ERROR; 2867 return IOCB_ERROR;
2403 2868
2404 /* 2869 /*
@@ -2408,7 +2873,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2408 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2873 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2409 goto iocb_busy; 2874 goto iocb_busy;
2410 2875
2411 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2876 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2412 /* 2877 /*
2413 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 2878 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2414 * can be issued if the link is not up. 2879 * can be issued if the link is not up.
@@ -2436,8 +2901,9 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2436 * attention events. 2901 * attention events.
2437 */ 2902 */
2438 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2903 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2439 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2904 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2440 goto iocb_busy; 2905 goto iocb_busy;
2906 }
2441 2907
2442 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2908 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2443 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2909 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
@@ -2459,13 +2925,28 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2459 out_busy: 2925 out_busy:
2460 2926
2461 if (!(flag & SLI_IOCB_RET_IOCB)) { 2927 if (!(flag & SLI_IOCB_RET_IOCB)) {
2462 lpfc_sli_ringtx_put(phba, pring, piocb); 2928 __lpfc_sli_ringtx_put(phba, pring, piocb);
2463 return IOCB_SUCCESS; 2929 return IOCB_SUCCESS;
2464 } 2930 }
2465 2931
2466 return IOCB_BUSY; 2932 return IOCB_BUSY;
2467} 2933}
2468 2934
2935
2936int
2937lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2938 struct lpfc_iocbq *piocb, uint32_t flag)
2939{
2940 unsigned long iflags;
2941 int rc;
2942
2943 spin_lock_irqsave(&phba->hbalock, iflags);
2944 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2945 spin_unlock_irqrestore(&phba->hbalock, iflags);
2946
2947 return rc;
2948}
2949
2469static int 2950static int
2470lpfc_extra_ring_setup( struct lpfc_hba *phba) 2951lpfc_extra_ring_setup( struct lpfc_hba *phba)
2471{ 2952{
@@ -2504,7 +2985,7 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
2504int 2985int
2505lpfc_sli_setup(struct lpfc_hba *phba) 2986lpfc_sli_setup(struct lpfc_hba *phba)
2506{ 2987{
2507 int i, totiocb = 0; 2988 int i, totiocbsize = 0;
2508 struct lpfc_sli *psli = &phba->sli; 2989 struct lpfc_sli *psli = &phba->sli;
2509 struct lpfc_sli_ring *pring; 2990 struct lpfc_sli_ring *pring;
2510 2991
@@ -2529,6 +3010,12 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2529 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3010 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2530 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3011 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2531 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3012 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3013 pring->sizeCiocb = (phba->sli_rev == 3) ?
3014 SLI3_IOCB_CMD_SIZE :
3015 SLI2_IOCB_CMD_SIZE;
3016 pring->sizeRiocb = (phba->sli_rev == 3) ?
3017 SLI3_IOCB_RSP_SIZE :
3018 SLI2_IOCB_RSP_SIZE;
2532 pring->iotag_ctr = 0; 3019 pring->iotag_ctr = 0;
2533 pring->iotag_max = 3020 pring->iotag_max =
2534 (phba->cfg_hba_queue_depth * 2); 3021 (phba->cfg_hba_queue_depth * 2);
@@ -2539,12 +3026,25 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2539 /* numCiocb and numRiocb are used in config_port */ 3026 /* numCiocb and numRiocb are used in config_port */
2540 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 3027 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2541 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 3028 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3029 pring->sizeCiocb = (phba->sli_rev == 3) ?
3030 SLI3_IOCB_CMD_SIZE :
3031 SLI2_IOCB_CMD_SIZE;
3032 pring->sizeRiocb = (phba->sli_rev == 3) ?
3033 SLI3_IOCB_RSP_SIZE :
3034 SLI2_IOCB_RSP_SIZE;
3035 pring->iotag_max = phba->cfg_hba_queue_depth;
2542 pring->num_mask = 0; 3036 pring->num_mask = 0;
2543 break; 3037 break;
2544 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 3038 case LPFC_ELS_RING: /* ring 2 - ELS / CT */
2545 /* numCiocb and numRiocb are used in config_port */ 3039 /* numCiocb and numRiocb are used in config_port */
2546 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 3040 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2547 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 3041 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3042 pring->sizeCiocb = (phba->sli_rev == 3) ?
3043 SLI3_IOCB_CMD_SIZE :
3044 SLI2_IOCB_CMD_SIZE;
3045 pring->sizeRiocb = (phba->sli_rev == 3) ?
3046 SLI3_IOCB_RSP_SIZE :
3047 SLI2_IOCB_RSP_SIZE;
2548 pring->fast_iotag = 0; 3048 pring->fast_iotag = 0;
2549 pring->iotag_ctr = 0; 3049 pring->iotag_ctr = 0;
2550 pring->iotag_max = 4096; 3050 pring->iotag_max = 4096;
@@ -2575,14 +3075,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2575 lpfc_ct_unsol_event; 3075 lpfc_ct_unsol_event;
2576 break; 3076 break;
2577 } 3077 }
2578 totiocb += (pring->numCiocb + pring->numRiocb); 3078 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3079 (pring->numRiocb * pring->sizeRiocb);
2579 } 3080 }
2580 if (totiocb > MAX_SLI2_IOCB) { 3081 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
2581 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3082 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2583 "%d:0462 Too many cmd / rsp ring entries in " 3084 "%d:0462 Too many cmd / rsp ring entries in "
2584 "SLI2 SLIM Data: x%x x%x\n", 3085 "SLI2 SLIM Data: x%x x%lx\n",
2585 phba->brd_no, totiocb, MAX_SLI2_IOCB); 3086 phba->brd_no, totiocbsize,
3087 (unsigned long) MAX_SLIM_IOCB_SIZE);
2586 } 3088 }
2587 if (phba->cfg_multi_ring_support == 2) 3089 if (phba->cfg_multi_ring_support == 2)
2588 lpfc_extra_ring_setup(phba); 3090 lpfc_extra_ring_setup(phba);
@@ -2591,15 +3093,16 @@ lpfc_sli_setup(struct lpfc_hba *phba)
2591} 3093}
2592 3094
2593int 3095int
2594lpfc_sli_queue_setup(struct lpfc_hba * phba) 3096lpfc_sli_queue_setup(struct lpfc_hba *phba)
2595{ 3097{
2596 struct lpfc_sli *psli; 3098 struct lpfc_sli *psli;
2597 struct lpfc_sli_ring *pring; 3099 struct lpfc_sli_ring *pring;
2598 int i; 3100 int i;
2599 3101
2600 psli = &phba->sli; 3102 psli = &phba->sli;
2601 spin_lock_irq(phba->host->host_lock); 3103 spin_lock_irq(&phba->hbalock);
2602 INIT_LIST_HEAD(&psli->mboxq); 3104 INIT_LIST_HEAD(&psli->mboxq);
3105 INIT_LIST_HEAD(&psli->mboxq_cmpl);
2603 /* Initialize list headers for txq and txcmplq as double linked lists */ 3106 /* Initialize list headers for txq and txcmplq as double linked lists */
2604 for (i = 0; i < psli->num_rings; i++) { 3107 for (i = 0; i < psli->num_rings; i++) {
2605 pring = &psli->ring[i]; 3108 pring = &psli->ring[i];
@@ -2612,15 +3115,73 @@ lpfc_sli_queue_setup(struct lpfc_hba * phba)
2612 INIT_LIST_HEAD(&pring->iocb_continueq); 3115 INIT_LIST_HEAD(&pring->iocb_continueq);
2613 INIT_LIST_HEAD(&pring->postbufq); 3116 INIT_LIST_HEAD(&pring->postbufq);
2614 } 3117 }
2615 spin_unlock_irq(phba->host->host_lock); 3118 spin_unlock_irq(&phba->hbalock);
2616 return (1); 3119 return 1;
2617} 3120}
2618 3121
2619int 3122int
2620lpfc_sli_hba_down(struct lpfc_hba * phba) 3123lpfc_sli_host_down(struct lpfc_vport *vport)
2621{ 3124{
2622 LIST_HEAD(completions); 3125 LIST_HEAD(completions);
2623 struct lpfc_sli *psli; 3126 struct lpfc_hba *phba = vport->phba;
3127 struct lpfc_sli *psli = &phba->sli;
3128 struct lpfc_sli_ring *pring;
3129 struct lpfc_iocbq *iocb, *next_iocb;
3130 int i;
3131 unsigned long flags = 0;
3132 uint16_t prev_pring_flag;
3133
3134 lpfc_cleanup_discovery_resources(vport);
3135
3136 spin_lock_irqsave(&phba->hbalock, flags);
3137 for (i = 0; i < psli->num_rings; i++) {
3138 pring = &psli->ring[i];
3139 prev_pring_flag = pring->flag;
3140 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3141 pring->flag |= LPFC_DEFERRED_RING_EVENT;
3142 /*
3143 * Error everything on the txq since these iocbs have not been
3144 * given to the FW yet.
3145 */
3146 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3147 if (iocb->vport != vport)
3148 continue;
3149 list_move_tail(&iocb->list, &completions);
3150 pring->txq_cnt--;
3151 }
3152
3153 /* Next issue ABTS for everything on the txcmplq */
3154 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3155 list) {
3156 if (iocb->vport != vport)
3157 continue;
3158 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3159 }
3160
3161 pring->flag = prev_pring_flag;
3162 }
3163
3164 spin_unlock_irqrestore(&phba->hbalock, flags);
3165
3166 while (!list_empty(&completions)) {
3167 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3168
3169 if (!iocb->iocb_cmpl)
3170 lpfc_sli_release_iocbq(phba, iocb);
3171 else {
3172 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3173 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3174 (iocb->iocb_cmpl) (phba, iocb, iocb);
3175 }
3176 }
3177 return 1;
3178}
3179
3180int
3181lpfc_sli_hba_down(struct lpfc_hba *phba)
3182{
3183 LIST_HEAD(completions);
3184 struct lpfc_sli *psli = &phba->sli;
2624 struct lpfc_sli_ring *pring; 3185 struct lpfc_sli_ring *pring;
2625 LPFC_MBOXQ_t *pmb; 3186 LPFC_MBOXQ_t *pmb;
2626 struct lpfc_iocbq *iocb; 3187 struct lpfc_iocbq *iocb;
@@ -2628,13 +3189,15 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2628 int i; 3189 int i;
2629 unsigned long flags = 0; 3190 unsigned long flags = 0;
2630 3191
2631 psli = &phba->sli;
2632 lpfc_hba_down_prep(phba); 3192 lpfc_hba_down_prep(phba);
2633 3193
2634 spin_lock_irqsave(phba->host->host_lock, flags); 3194 lpfc_fabric_abort_hba(phba);
3195
3196 spin_lock_irqsave(&phba->hbalock, flags);
2635 for (i = 0; i < psli->num_rings; i++) { 3197 for (i = 0; i < psli->num_rings; i++) {
2636 pring = &psli->ring[i]; 3198 pring = &psli->ring[i];
2637 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3199 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3200 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2638 3201
2639 /* 3202 /*
2640 * Error everything on the txq since these iocbs have not been 3203 * Error everything on the txq since these iocbs have not been
@@ -2644,51 +3207,50 @@ lpfc_sli_hba_down(struct lpfc_hba * phba)
2644 pring->txq_cnt = 0; 3207 pring->txq_cnt = 0;
2645 3208
2646 } 3209 }
2647 spin_unlock_irqrestore(phba->host->host_lock, flags); 3210 spin_unlock_irqrestore(&phba->hbalock, flags);
2648 3211
2649 while (!list_empty(&completions)) { 3212 while (!list_empty(&completions)) {
2650 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 3213 list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
2651 cmd = &iocb->iocb; 3214 cmd = &iocb->iocb;
2652 list_del(&iocb->list);
2653 3215
2654 if (iocb->iocb_cmpl) { 3216 if (!iocb->iocb_cmpl)
3217 lpfc_sli_release_iocbq(phba, iocb);
3218 else {
2655 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3219 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2656 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 3220 cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2657 (iocb->iocb_cmpl) (phba, iocb, iocb); 3221 (iocb->iocb_cmpl) (phba, iocb, iocb);
2658 } else 3222 }
2659 lpfc_sli_release_iocbq(phba, iocb);
2660 } 3223 }
2661 3224
2662 /* Return any active mbox cmds */ 3225 /* Return any active mbox cmds */
2663 del_timer_sync(&psli->mbox_tmo); 3226 del_timer_sync(&psli->mbox_tmo);
2664 spin_lock_irqsave(phba->host->host_lock, flags); 3227 spin_lock_irqsave(&phba->hbalock, flags);
2665 phba->work_hba_events &= ~WORKER_MBOX_TMO; 3228
3229 spin_lock(&phba->pport->work_port_lock);
3230 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3231 spin_unlock(&phba->pport->work_port_lock);
3232
2666 if (psli->mbox_active) { 3233 if (psli->mbox_active) {
2667 pmb = psli->mbox_active; 3234 list_add_tail(&psli->mbox_active->list, &completions);
2668 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3235 psli->mbox_active = NULL;
2669 if (pmb->mbox_cmpl) { 3236 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2670 spin_unlock_irqrestore(phba->host->host_lock, flags);
2671 pmb->mbox_cmpl(phba,pmb);
2672 spin_lock_irqsave(phba->host->host_lock, flags);
2673 }
2674 } 3237 }
2675 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2676 psli->mbox_active = NULL;
2677 3238
2678 /* Return any pending mbox cmds */ 3239 /* Return any pending or completed mbox cmds */
2679 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 3240 list_splice_init(&phba->sli.mboxq, &completions);
3241 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3242 INIT_LIST_HEAD(&psli->mboxq);
3243 INIT_LIST_HEAD(&psli->mboxq_cmpl);
3244
3245 spin_unlock_irqrestore(&phba->hbalock, flags);
3246
3247 while (!list_empty(&completions)) {
3248 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
2680 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3249 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2681 if (pmb->mbox_cmpl) { 3250 if (pmb->mbox_cmpl) {
2682 spin_unlock_irqrestore(phba->host->host_lock, flags);
2683 pmb->mbox_cmpl(phba,pmb); 3251 pmb->mbox_cmpl(phba,pmb);
2684 spin_lock_irqsave(phba->host->host_lock, flags);
2685 } 3252 }
2686 } 3253 }
2687
2688 INIT_LIST_HEAD(&psli->mboxq);
2689
2690 spin_unlock_irqrestore(phba->host->host_lock, flags);
2691
2692 return 1; 3254 return 1;
2693} 3255}
2694 3256
@@ -2710,14 +3272,15 @@ lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2710} 3272}
2711 3273
2712int 3274int
2713lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 3275lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2714 struct lpfc_dmabuf * mp) 3276 struct lpfc_dmabuf *mp)
2715{ 3277{
2716 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 3278 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2717 later */ 3279 later */
3280 spin_lock_irq(&phba->hbalock);
2718 list_add_tail(&mp->list, &pring->postbufq); 3281 list_add_tail(&mp->list, &pring->postbufq);
2719
2720 pring->postbufq_cnt++; 3282 pring->postbufq_cnt++;
3283 spin_unlock_irq(&phba->hbalock);
2721 return 0; 3284 return 0;
2722} 3285}
2723 3286
@@ -2730,14 +3293,17 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2730 struct list_head *slp = &pring->postbufq; 3293 struct list_head *slp = &pring->postbufq;
2731 3294
2732 /* Search postbufq, from the begining, looking for a match on phys */ 3295 /* Search postbufq, from the begining, looking for a match on phys */
3296 spin_lock_irq(&phba->hbalock);
2733 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3297 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2734 if (mp->phys == phys) { 3298 if (mp->phys == phys) {
2735 list_del_init(&mp->list); 3299 list_del_init(&mp->list);
2736 pring->postbufq_cnt--; 3300 pring->postbufq_cnt--;
3301 spin_unlock_irq(&phba->hbalock);
2737 return mp; 3302 return mp;
2738 } 3303 }
2739 } 3304 }
2740 3305
3306 spin_unlock_irq(&phba->hbalock);
2741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2742 "%d:0410 Cannot find virtual addr for mapped buf on " 3308 "%d:0410 Cannot find virtual addr for mapped buf on "
2743 "ring %d Data x%llx x%p x%p x%x\n", 3309 "ring %d Data x%llx x%p x%p x%x\n",
@@ -2747,92 +3313,110 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2747} 3313}
2748 3314
2749static void 3315static void
2750lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3316lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2751 struct lpfc_iocbq * rspiocb) 3317 struct lpfc_iocbq *rspiocb)
2752{ 3318{
2753 IOCB_t *irsp; 3319 IOCB_t *irsp = &rspiocb->iocb;
2754 uint16_t abort_iotag, abort_context; 3320 uint16_t abort_iotag, abort_context;
2755 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb; 3321 struct lpfc_iocbq *abort_iocb;
2756 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3322 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
2757 3323
2758 abort_iocb = NULL; 3324 abort_iocb = NULL;
2759 irsp = &rspiocb->iocb;
2760
2761 spin_lock_irq(phba->host->host_lock);
2762 3325
2763 if (irsp->ulpStatus) { 3326 if (irsp->ulpStatus) {
2764 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 3327 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
2765 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 3328 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
2766 3329
3330 spin_lock_irq(&phba->hbalock);
2767 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3331 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
2768 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3332 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
2769 3333
2770 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3334 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
2771 "%d:0327 Cannot abort els iocb %p" 3335 "%d:0327 Cannot abort els iocb %p "
2772 " with tag %x context %x\n", 3336 "with tag %x context %x, abort status %x, "
2773 phba->brd_no, abort_iocb, 3337 "abort code %x\n",
2774 abort_iotag, abort_context); 3338 phba->brd_no, abort_iocb, abort_iotag,
3339 abort_context, irsp->ulpStatus,
3340 irsp->un.ulpWord[4]);
2775 3341
2776 /* 3342 /*
2777 * make sure we have the right iocbq before taking it 3343 * make sure we have the right iocbq before taking it
2778 * off the txcmplq and try to call completion routine. 3344 * off the txcmplq and try to call completion routine.
2779 */ 3345 */
2780 if (abort_iocb && 3346 if (!abort_iocb ||
2781 abort_iocb->iocb.ulpContext == abort_context && 3347 abort_iocb->iocb.ulpContext != abort_context ||
2782 abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 3348 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
2783 list_del(&abort_iocb->list); 3349 spin_unlock_irq(&phba->hbalock);
3350 else {
3351 list_del_init(&abort_iocb->list);
2784 pring->txcmplq_cnt--; 3352 pring->txcmplq_cnt--;
3353 spin_unlock_irq(&phba->hbalock);
2785 3354
2786 rsp_ab_iocb = lpfc_sli_get_iocbq(phba); 3355 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
2787 if (rsp_ab_iocb == NULL) 3356 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
2788 lpfc_sli_release_iocbq(phba, abort_iocb); 3357 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
2789 else { 3358 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
2790 abort_iocb->iocb_flag &=
2791 ~LPFC_DRIVER_ABORTED;
2792 rsp_ab_iocb->iocb.ulpStatus =
2793 IOSTAT_LOCAL_REJECT;
2794 rsp_ab_iocb->iocb.un.ulpWord[4] =
2795 IOERR_SLI_ABORTED;
2796 spin_unlock_irq(phba->host->host_lock);
2797 (abort_iocb->iocb_cmpl)
2798 (phba, abort_iocb, rsp_ab_iocb);
2799 spin_lock_irq(phba->host->host_lock);
2800 lpfc_sli_release_iocbq(phba, rsp_ab_iocb);
2801 }
2802 } 3359 }
2803 } 3360 }
2804 3361
2805 lpfc_sli_release_iocbq(phba, cmdiocb); 3362 lpfc_sli_release_iocbq(phba, cmdiocb);
2806 spin_unlock_irq(phba->host->host_lock); 3363 return;
3364}
3365
3366static void
3367lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3368 struct lpfc_iocbq *rspiocb)
3369{
3370 IOCB_t *irsp = &rspiocb->iocb;
3371
3372 /* ELS cmd tag <ulpIoTag> completes */
3373 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3374 "%d (X):0133 Ignoring ELS cmd tag x%x completion Data: "
3375 "x%x x%x x%x\n",
3376 phba->brd_no, irsp->ulpIoTag, irsp->ulpStatus,
3377 irsp->un.ulpWord[4], irsp->ulpTimeout);
3378 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3379 lpfc_ct_free_iocb(phba, cmdiocb);
3380 else
3381 lpfc_els_free_iocb(phba, cmdiocb);
2807 return; 3382 return;
2808} 3383}
2809 3384
2810int 3385int
2811lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba, 3386lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2812 struct lpfc_sli_ring * pring, 3387 struct lpfc_iocbq *cmdiocb)
2813 struct lpfc_iocbq * cmdiocb)
2814{ 3388{
3389 struct lpfc_vport *vport = cmdiocb->vport;
2815 struct lpfc_iocbq *abtsiocbp; 3390 struct lpfc_iocbq *abtsiocbp;
2816 IOCB_t *icmd = NULL; 3391 IOCB_t *icmd = NULL;
2817 IOCB_t *iabt = NULL; 3392 IOCB_t *iabt = NULL;
2818 int retval = IOCB_ERROR; 3393 int retval = IOCB_ERROR;
2819 3394
2820 /* There are certain command types we don't want 3395 /*
2821 * to abort. 3396 * There are certain command types we don't want to abort. And we
3397 * don't want to abort commands that are already in the process of
3398 * being aborted.
2822 */ 3399 */
2823 icmd = &cmdiocb->iocb; 3400 icmd = &cmdiocb->iocb;
2824 if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) || 3401 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
2825 (icmd->ulpCommand == CMD_CLOSE_XRI_CN)) 3402 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3403 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
2826 return 0; 3404 return 0;
2827 3405
2828 /* If we're unloading, interrupts are disabled so we 3406 /* If we're unloading, don't abort iocb on the ELS ring, but change the
2829 * need to cleanup the iocb here. 3407 * callback so that nothing happens when it finishes.
2830 */ 3408 */
2831 if (phba->fc_flag & FC_UNLOADING) 3409 if ((vport->load_flag & FC_UNLOADING) &&
3410 (pring->ringno == LPFC_ELS_RING)) {
3411 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3412 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3413 else
3414 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
2832 goto abort_iotag_exit; 3415 goto abort_iotag_exit;
3416 }
2833 3417
2834 /* issue ABTS for this IOCB based on iotag */ 3418 /* issue ABTS for this IOCB based on iotag */
2835 abtsiocbp = lpfc_sli_get_iocbq(phba); 3419 abtsiocbp = __lpfc_sli_get_iocbq(phba);
2836 if (abtsiocbp == NULL) 3420 if (abtsiocbp == NULL)
2837 return 0; 3421 return 0;
2838 3422
@@ -2848,7 +3432,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
2848 iabt->ulpLe = 1; 3432 iabt->ulpLe = 1;
2849 iabt->ulpClass = icmd->ulpClass; 3433 iabt->ulpClass = icmd->ulpClass;
2850 3434
2851 if (phba->hba_state >= LPFC_LINK_UP) 3435 if (phba->link_state >= LPFC_LINK_UP)
2852 iabt->ulpCommand = CMD_ABORT_XRI_CN; 3436 iabt->ulpCommand = CMD_ABORT_XRI_CN;
2853 else 3437 else
2854 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 3438 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
@@ -2856,32 +3440,20 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,
2856 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3440 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
2857 3441
2858 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2859 "%d:0339 Abort xri x%x, original iotag x%x, abort " 3443 "%d (%d):0339 Abort xri x%x, original iotag x%x, "
2860 "cmd iotag x%x\n", 3444 "abort cmd iotag x%x\n",
2861 phba->brd_no, iabt->un.acxri.abortContextTag, 3445 phba->brd_no, vport->vpi,
3446 iabt->un.acxri.abortContextTag,
2862 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3447 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
2863 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3448 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
2864 3449
2865abort_iotag_exit: 3450abort_iotag_exit:
2866 3451 /*
2867 /* If we could not issue an abort dequeue the iocb and handle 3452 * Caller to this routine should check for IOCB_ERROR
2868 * the completion here. 3453 * and handle it properly. This routine no longer removes
3454 * iocb off txcmplq and call compl in case of IOCB_ERROR.
2869 */ 3455 */
2870 if (retval == IOCB_ERROR) { 3456 return retval;
2871 list_del(&cmdiocb->list);
2872 pring->txcmplq_cnt--;
2873
2874 if (cmdiocb->iocb_cmpl) {
2875 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2876 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2877 spin_unlock_irq(phba->host->host_lock);
2878 (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb);
2879 spin_lock_irq(phba->host->host_lock);
2880 } else
2881 lpfc_sli_release_iocbq(phba, cmdiocb);
2882 }
2883
2884 return 1;
2885} 3457}
2886 3458
2887static int 3459static int
@@ -2930,7 +3502,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2930 3502
2931int 3503int
2932lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3504lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2933 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 3505 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2934{ 3506{
2935 struct lpfc_iocbq *iocbq; 3507 struct lpfc_iocbq *iocbq;
2936 int sum, i; 3508 int sum, i;
@@ -2947,14 +3519,10 @@ lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2947} 3519}
2948 3520
2949void 3521void
2950lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 3522lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2951 struct lpfc_iocbq * rspiocb) 3523 struct lpfc_iocbq *rspiocb)
2952{ 3524{
2953 unsigned long iflags;
2954
2955 spin_lock_irqsave(phba->host->host_lock, iflags);
2956 lpfc_sli_release_iocbq(phba, cmdiocb); 3525 lpfc_sli_release_iocbq(phba, cmdiocb);
2957 spin_unlock_irqrestore(phba->host->host_lock, iflags);
2958 return; 3526 return;
2959} 3527}
2960 3528
@@ -2972,8 +3540,8 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2972 for (i = 1; i <= phba->sli.last_iotag; i++) { 3540 for (i = 1; i <= phba->sli.last_iotag; i++) {
2973 iocbq = phba->sli.iocbq_lookup[i]; 3541 iocbq = phba->sli.iocbq_lookup[i];
2974 3542
2975 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 3543 if (lpfc_sli_validate_fcp_iocb(iocbq, tgt_id, lun_id, 0,
2976 0, abort_cmd) != 0) 3544 abort_cmd) != 0)
2977 continue; 3545 continue;
2978 3546
2979 /* issue ABTS for this IOCB based on iotag */ 3547 /* issue ABTS for this IOCB based on iotag */
@@ -2989,8 +3557,9 @@ lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2989 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 3557 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2990 abtsiocb->iocb.ulpLe = 1; 3558 abtsiocb->iocb.ulpLe = 1;
2991 abtsiocb->iocb.ulpClass = cmd->ulpClass; 3559 abtsiocb->iocb.ulpClass = cmd->ulpClass;
3560 abtsiocb->vport = phba->pport;
2992 3561
2993 if (phba->hba_state >= LPFC_LINK_UP) 3562 if (lpfc_is_link_up(phba))
2994 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 3563 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2995 else 3564 else
2996 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 3565 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -3016,16 +3585,16 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3016 wait_queue_head_t *pdone_q; 3585 wait_queue_head_t *pdone_q;
3017 unsigned long iflags; 3586 unsigned long iflags;
3018 3587
3019 spin_lock_irqsave(phba->host->host_lock, iflags); 3588 spin_lock_irqsave(&phba->hbalock, iflags);
3020 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3589 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3021 if (cmdiocbq->context2 && rspiocbq) 3590 if (cmdiocbq->context2 && rspiocbq)
3022 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3591 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3023 &rspiocbq->iocb, sizeof(IOCB_t)); 3592 &rspiocbq->iocb, sizeof(IOCB_t));
3024 3593
3025 pdone_q = cmdiocbq->context_un.wait_queue; 3594 pdone_q = cmdiocbq->context_un.wait_queue;
3026 spin_unlock_irqrestore(phba->host->host_lock, iflags);
3027 if (pdone_q) 3595 if (pdone_q)
3028 wake_up(pdone_q); 3596 wake_up(pdone_q);
3597 spin_unlock_irqrestore(&phba->hbalock, iflags);
3029 return; 3598 return;
3030} 3599}
3031 3600
@@ -3035,11 +3604,12 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3035 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3604 * lpfc_sli_issue_call since the wake routine sets a unique value and by
3036 * definition this is a wait function. 3605 * definition this is a wait function.
3037 */ 3606 */
3607
3038int 3608int
3039lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 3609lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3040 struct lpfc_sli_ring * pring, 3610 struct lpfc_sli_ring *pring,
3041 struct lpfc_iocbq * piocb, 3611 struct lpfc_iocbq *piocb,
3042 struct lpfc_iocbq * prspiocbq, 3612 struct lpfc_iocbq *prspiocbq,
3043 uint32_t timeout) 3613 uint32_t timeout)
3044{ 3614{
3045 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3615 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
@@ -3071,11 +3641,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3071 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3641 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3072 if (retval == IOCB_SUCCESS) { 3642 if (retval == IOCB_SUCCESS) {
3073 timeout_req = timeout * HZ; 3643 timeout_req = timeout * HZ;
3074 spin_unlock_irq(phba->host->host_lock);
3075 timeleft = wait_event_timeout(done_q, 3644 timeleft = wait_event_timeout(done_q,
3076 piocb->iocb_flag & LPFC_IO_WAKE, 3645 piocb->iocb_flag & LPFC_IO_WAKE,
3077 timeout_req); 3646 timeout_req);
3078 spin_lock_irq(phba->host->host_lock);
3079 3647
3080 if (piocb->iocb_flag & LPFC_IO_WAKE) { 3648 if (piocb->iocb_flag & LPFC_IO_WAKE) {
3081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3649 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3117,16 +3685,16 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3117} 3685}
3118 3686
3119int 3687int
3120lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 3688lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3121 uint32_t timeout) 3689 uint32_t timeout)
3122{ 3690{
3123 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3691 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3124 int retval; 3692 int retval;
3693 unsigned long flag;
3125 3694
3126 /* The caller must leave context1 empty. */ 3695 /* The caller must leave context1 empty. */
3127 if (pmboxq->context1 != 0) { 3696 if (pmboxq->context1 != 0)
3128 return (MBX_NOT_FINISHED); 3697 return MBX_NOT_FINISHED;
3129 }
3130 3698
3131 /* setup wake call as IOCB callback */ 3699 /* setup wake call as IOCB callback */
3132 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3700 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
@@ -3141,6 +3709,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3141 pmboxq->mbox_flag & LPFC_MBX_WAKE, 3709 pmboxq->mbox_flag & LPFC_MBX_WAKE,
3142 timeout * HZ); 3710 timeout * HZ);
3143 3711
3712 spin_lock_irqsave(&phba->hbalock, flag);
3144 pmboxq->context1 = NULL; 3713 pmboxq->context1 = NULL;
3145 /* 3714 /*
3146 * if LPFC_MBX_WAKE flag is set the mailbox is completed 3715 * if LPFC_MBX_WAKE flag is set the mailbox is completed
@@ -3148,8 +3717,11 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3148 */ 3717 */
3149 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 3718 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3150 retval = MBX_SUCCESS; 3719 retval = MBX_SUCCESS;
3151 else 3720 else {
3152 retval = MBX_TIMEOUT; 3721 retval = MBX_TIMEOUT;
3722 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3723 }
3724 spin_unlock_irqrestore(&phba->hbalock, flag);
3153 } 3725 }
3154 3726
3155 return retval; 3727 return retval;
@@ -3158,14 +3730,27 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3158int 3730int
3159lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 3731lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3160{ 3732{
3733 struct lpfc_vport *vport = phba->pport;
3161 int i = 0; 3734 int i = 0;
3735 uint32_t ha_copy;
3162 3736
3163 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { 3737 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3164 if (i++ > LPFC_MBOX_TMO * 1000) 3738 if (i++ > LPFC_MBOX_TMO * 1000)
3165 return 1; 3739 return 1;
3166 3740
3167 if (lpfc_sli_handle_mb_event(phba) == 0) 3741 /*
3168 i = 0; 3742 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3743 * did finish. This way we won't get the misleading
3744 * "Stray Mailbox Interrupt" message.
3745 */
3746 spin_lock_irq(&phba->hbalock);
3747 ha_copy = phba->work_ha;
3748 phba->work_ha &= ~HA_MBATT;
3749 spin_unlock_irq(&phba->hbalock);
3750
3751 if (ha_copy & HA_MBATT)
3752 if (lpfc_sli_handle_mb_event(phba) == 0)
3753 i = 0;
3169 3754
3170 msleep(1); 3755 msleep(1);
3171 } 3756 }
@@ -3176,13 +3761,20 @@ lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3176irqreturn_t 3761irqreturn_t
3177lpfc_intr_handler(int irq, void *dev_id) 3762lpfc_intr_handler(int irq, void *dev_id)
3178{ 3763{
3179 struct lpfc_hba *phba; 3764 struct lpfc_hba *phba;
3180 uint32_t ha_copy; 3765 uint32_t ha_copy;
3181 uint32_t work_ha_copy; 3766 uint32_t work_ha_copy;
3182 unsigned long status; 3767 unsigned long status;
3183 int i; 3768 int i;
3184 uint32_t control; 3769 uint32_t control;
3185 3770
3771 MAILBOX_t *mbox, *pmbox;
3772 struct lpfc_vport *vport;
3773 struct lpfc_nodelist *ndlp;
3774 struct lpfc_dmabuf *mp;
3775 LPFC_MBOXQ_t *pmb;
3776 int rc;
3777
3186 /* 3778 /*
3187 * Get the driver's phba structure from the dev_id and 3779 * Get the driver's phba structure from the dev_id and
3188 * assume the HBA is not interrupting. 3780 * assume the HBA is not interrupting.
@@ -3204,7 +3796,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3204 */ 3796 */
3205 3797
3206 /* Ignore all interrupts during initialization. */ 3798 /* Ignore all interrupts during initialization. */
3207 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 3799 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3208 return IRQ_NONE; 3800 return IRQ_NONE;
3209 3801
3210 /* 3802 /*
@@ -3212,16 +3804,16 @@ lpfc_intr_handler(int irq, void *dev_id)
3212 * Clear Attention Sources, except Error Attention (to 3804 * Clear Attention Sources, except Error Attention (to
3213 * preserve status) and Link Attention 3805 * preserve status) and Link Attention
3214 */ 3806 */
3215 spin_lock(phba->host->host_lock); 3807 spin_lock(&phba->hbalock);
3216 ha_copy = readl(phba->HAregaddr); 3808 ha_copy = readl(phba->HAregaddr);
3217 /* If somebody is waiting to handle an eratt don't process it 3809 /* If somebody is waiting to handle an eratt don't process it
3218 * here. The brdkill function will do this. 3810 * here. The brdkill function will do this.
3219 */ 3811 */
3220 if (phba->fc_flag & FC_IGNORE_ERATT) 3812 if (phba->link_flag & LS_IGNORE_ERATT)
3221 ha_copy &= ~HA_ERATT; 3813 ha_copy &= ~HA_ERATT;
3222 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3814 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3223 readl(phba->HAregaddr); /* flush */ 3815 readl(phba->HAregaddr); /* flush */
3224 spin_unlock(phba->host->host_lock); 3816 spin_unlock(&phba->hbalock);
3225 3817
3226 if (unlikely(!ha_copy)) 3818 if (unlikely(!ha_copy))
3227 return IRQ_NONE; 3819 return IRQ_NONE;
@@ -3235,36 +3827,41 @@ lpfc_intr_handler(int irq, void *dev_id)
3235 * Turn off Link Attention interrupts 3827 * Turn off Link Attention interrupts
3236 * until CLEAR_LA done 3828 * until CLEAR_LA done
3237 */ 3829 */
3238 spin_lock(phba->host->host_lock); 3830 spin_lock(&phba->hbalock);
3239 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 3831 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3240 control = readl(phba->HCregaddr); 3832 control = readl(phba->HCregaddr);
3241 control &= ~HC_LAINT_ENA; 3833 control &= ~HC_LAINT_ENA;
3242 writel(control, phba->HCregaddr); 3834 writel(control, phba->HCregaddr);
3243 readl(phba->HCregaddr); /* flush */ 3835 readl(phba->HCregaddr); /* flush */
3244 spin_unlock(phba->host->host_lock); 3836 spin_unlock(&phba->hbalock);
3245 } 3837 }
3246 else 3838 else
3247 work_ha_copy &= ~HA_LATT; 3839 work_ha_copy &= ~HA_LATT;
3248 } 3840 }
3249 3841
3250 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 3842 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3251 for (i = 0; i < phba->sli.num_rings; i++) { 3843 /*
3252 if (work_ha_copy & (HA_RXATT << (4*i))) { 3844 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3253 /* 3845 * the only slow ring.
3254 * Turn off Slow Rings interrupts 3846 */
3255 */ 3847 status = (work_ha_copy &
3256 spin_lock(phba->host->host_lock); 3848 (HA_RXMASK << (4*LPFC_ELS_RING)));
3257 control = readl(phba->HCregaddr); 3849 status >>= (4*LPFC_ELS_RING);
3258 control &= ~(HC_R0INT_ENA << i); 3850 if (status & HA_RXMASK) {
3851 spin_lock(&phba->hbalock);
3852 control = readl(phba->HCregaddr);
3853 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
3854 control &=
3855 ~(HC_R0INT_ENA << LPFC_ELS_RING);
3259 writel(control, phba->HCregaddr); 3856 writel(control, phba->HCregaddr);
3260 readl(phba->HCregaddr); /* flush */ 3857 readl(phba->HCregaddr); /* flush */
3261 spin_unlock(phba->host->host_lock);
3262 } 3858 }
3859 spin_unlock(&phba->hbalock);
3263 } 3860 }
3264 } 3861 }
3265 3862
3266 if (work_ha_copy & HA_ERATT) { 3863 if (work_ha_copy & HA_ERATT) {
3267 phba->hba_state = LPFC_HBA_ERROR; 3864 phba->link_state = LPFC_HBA_ERROR;
3268 /* 3865 /*
3269 * There was a link/board error. Read the 3866 * There was a link/board error. Read the
3270 * status register to retrieve the error event 3867 * status register to retrieve the error event
@@ -3279,14 +3876,108 @@ lpfc_intr_handler(int irq, void *dev_id)
3279 /* Clear Chip error bit */ 3876 /* Clear Chip error bit */
3280 writel(HA_ERATT, phba->HAregaddr); 3877 writel(HA_ERATT, phba->HAregaddr);
3281 readl(phba->HAregaddr); /* flush */ 3878 readl(phba->HAregaddr); /* flush */
3282 phba->stopped = 1; 3879 phba->pport->stopped = 1;
3880 }
3881
3882 if ((work_ha_copy & HA_MBATT) &&
3883 (phba->sli.mbox_active)) {
3884 pmb = phba->sli.mbox_active;
3885 pmbox = &pmb->mb;
3886 mbox = &phba->slim2p->mbx;
3887 vport = pmb->vport;
3888
3889 /* First check out the status word */
3890 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3891 if (pmbox->mbxOwner != OWN_HOST) {
3892 /*
3893 * Stray Mailbox Interrupt, mbxCommand <cmd>
3894 * mbxStatus <status>
3895 */
3896 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3897 LOG_SLI,
3898 "%d (%d):0304 Stray Mailbox "
3899 "Interrupt mbxCommand x%x "
3900 "mbxStatus x%x\n",
3901 phba->brd_no,
3902 (vport
3903 ? vport->vpi : 0),
3904 pmbox->mbxCommand,
3905 pmbox->mbxStatus);
3906 }
3907 phba->last_completion_time = jiffies;
3908 del_timer_sync(&phba->sli.mbox_tmo);
3909
3910 phba->sli.mbox_active = NULL;
3911 if (pmb->mbox_cmpl) {
3912 lpfc_sli_pcimem_bcopy(mbox, pmbox,
3913 MAILBOX_CMD_SIZE);
3914 }
3915 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3916 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3917
3918 lpfc_debugfs_disc_trc(vport,
3919 LPFC_DISC_TRC_MBOX_VPORT,
3920 "MBOX dflt rpi: : status:x%x rpi:x%x",
3921 (uint32_t)pmbox->mbxStatus,
3922 pmbox->un.varWords[0], 0);
3923
3924 if ( !pmbox->mbxStatus) {
3925 mp = (struct lpfc_dmabuf *)
3926 (pmb->context1);
3927 ndlp = (struct lpfc_nodelist *)
3928 pmb->context2;
3929
3930 /* Reg_LOGIN of dflt RPI was successful.
3931 * new lets get rid of the RPI using the
3932 * same mbox buffer.
3933 */
3934 lpfc_unreg_login(phba, vport->vpi,
3935 pmbox->un.varWords[0], pmb);
3936 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3937 pmb->context1 = mp;
3938 pmb->context2 = ndlp;
3939 pmb->vport = vport;
3940 spin_lock(&phba->hbalock);
3941 phba->sli.sli_flag &=
3942 ~LPFC_SLI_MBOX_ACTIVE;
3943 spin_unlock(&phba->hbalock);
3944 goto send_current_mbox;
3945 }
3946 }
3947 spin_lock(&phba->pport->work_port_lock);
3948 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3949 spin_unlock(&phba->pport->work_port_lock);
3950 lpfc_mbox_cmpl_put(phba, pmb);
3951 }
3952 if ((work_ha_copy & HA_MBATT) &&
3953 (phba->sli.mbox_active == NULL)) {
3954send_next_mbox:
3955 spin_lock(&phba->hbalock);
3956 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3957 pmb = lpfc_mbox_get(phba);
3958 spin_unlock(&phba->hbalock);
3959send_current_mbox:
3960 /* Process next mailbox command if there is one */
3961 if (pmb != NULL) {
3962 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3963 if (rc == MBX_NOT_FINISHED) {
3964 pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3965 lpfc_mbox_cmpl_put(phba, pmb);
3966 goto send_next_mbox;
3967 }
3968 } else {
3969 /* Turn on IOCB processing */
3970 for (i = 0; i < phba->sli.num_rings; i++)
3971 lpfc_sli_turn_on_ring(phba, i);
3972 }
3973
3283 } 3974 }
3284 3975
3285 spin_lock(phba->host->host_lock); 3976 spin_lock(&phba->hbalock);
3286 phba->work_ha |= work_ha_copy; 3977 phba->work_ha |= work_ha_copy;
3287 if (phba->work_wait) 3978 if (phba->work_wait)
3288 wake_up(phba->work_wait); 3979 lpfc_worker_wake_up(phba);
3289 spin_unlock(phba->host->host_lock); 3980 spin_unlock(&phba->hbalock);
3290 } 3981 }
3291 3982
3292 ha_copy &= ~(phba->work_ha_mask); 3983 ha_copy &= ~(phba->work_ha_mask);
@@ -3298,7 +3989,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3298 */ 3989 */
3299 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 3990 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
3300 status >>= (4*LPFC_FCP_RING); 3991 status >>= (4*LPFC_FCP_RING);
3301 if (status & HA_RXATT) 3992 if (status & HA_RXMASK)
3302 lpfc_sli_handle_fast_ring_event(phba, 3993 lpfc_sli_handle_fast_ring_event(phba,
3303 &phba->sli.ring[LPFC_FCP_RING], 3994 &phba->sli.ring[LPFC_FCP_RING],
3304 status); 3995 status);
@@ -3311,7 +4002,7 @@ lpfc_intr_handler(int irq, void *dev_id)
3311 */ 4002 */
3312 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 4003 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
3313 status >>= (4*LPFC_EXTRA_RING); 4004 status >>= (4*LPFC_EXTRA_RING);
3314 if (status & HA_RXATT) { 4005 if (status & HA_RXMASK) {
3315 lpfc_sli_handle_fast_ring_event(phba, 4006 lpfc_sli_handle_fast_ring_event(phba,
3316 &phba->sli.ring[LPFC_EXTRA_RING], 4007 &phba->sli.ring[LPFC_EXTRA_RING],
3317 status); 4008 status);
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 41c38d324ab0..76058505795e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -20,6 +20,7 @@
20 20
21/* forward declaration for LPFC_IOCB_t's use */ 21/* forward declaration for LPFC_IOCB_t's use */
22struct lpfc_hba; 22struct lpfc_hba;
23struct lpfc_vport;
23 24
24/* Define the context types that SLI handles for abort and sums. */ 25/* Define the context types that SLI handles for abort and sums. */
25typedef enum _lpfc_ctx_cmd { 26typedef enum _lpfc_ctx_cmd {
@@ -43,10 +44,12 @@ struct lpfc_iocbq {
43#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */ 44#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
44#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */ 45#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
45#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */ 46#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
47#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
46 48
47 uint8_t abort_count; 49 uint8_t abort_count;
48 uint8_t rsvd2; 50 uint8_t rsvd2;
49 uint32_t drvrTimeout; /* driver timeout in seconds */ 51 uint32_t drvrTimeout; /* driver timeout in seconds */
52 struct lpfc_vport *vport;/* virtual port pointer */
50 void *context1; /* caller context information */ 53 void *context1; /* caller context information */
51 void *context2; /* caller context information */ 54 void *context2; /* caller context information */
52 void *context3; /* caller context information */ 55 void *context3; /* caller context information */
@@ -56,6 +59,8 @@ struct lpfc_iocbq {
56 struct lpfcMboxq *mbox; 59 struct lpfcMboxq *mbox;
57 } context_un; 60 } context_un;
58 61
62 void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
63 struct lpfc_iocbq *);
59 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, 64 void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
60 struct lpfc_iocbq *); 65 struct lpfc_iocbq *);
61 66
@@ -68,12 +73,14 @@ struct lpfc_iocbq {
68#define IOCB_ERROR 2 73#define IOCB_ERROR 2
69#define IOCB_TIMEDOUT 3 74#define IOCB_TIMEDOUT 3
70 75
71#define LPFC_MBX_WAKE 1 76#define LPFC_MBX_WAKE 1
77#define LPFC_MBX_IMED_UNREG 2
72 78
73typedef struct lpfcMboxq { 79typedef struct lpfcMboxq {
74 /* MBOXQs are used in single linked lists */ 80 /* MBOXQs are used in single linked lists */
75 struct list_head list; /* ptr to next mailbox command */ 81 struct list_head list; /* ptr to next mailbox command */
76 MAILBOX_t mb; /* Mailbox cmd */ 82 MAILBOX_t mb; /* Mailbox cmd */
83 struct lpfc_vport *vport;/* virutal port pointer */
77 void *context1; /* caller context information */ 84 void *context1; /* caller context information */
78 void *context2; /* caller context information */ 85 void *context2; /* caller context information */
79 86
@@ -135,6 +142,8 @@ struct lpfc_sli_ring {
135 uint8_t ringno; /* ring number */ 142 uint8_t ringno; /* ring number */
136 uint16_t numCiocb; /* number of command iocb's per ring */ 143 uint16_t numCiocb; /* number of command iocb's per ring */
137 uint16_t numRiocb; /* number of rsp iocb's per ring */ 144 uint16_t numRiocb; /* number of rsp iocb's per ring */
145 uint16_t sizeCiocb; /* Size of command iocb's in this ring */
146 uint16_t sizeRiocb; /* Size of response iocb's in this ring */
138 147
139 uint32_t fast_iotag; /* max fastlookup based iotag */ 148 uint32_t fast_iotag; /* max fastlookup based iotag */
140 uint32_t iotag_ctr; /* keeps track of the next iotag to use */ 149 uint32_t iotag_ctr; /* keeps track of the next iotag to use */
@@ -165,6 +174,34 @@ struct lpfc_sli_ring {
165 struct lpfc_sli_ring *); 174 struct lpfc_sli_ring *);
166}; 175};
167 176
177/* Structure used for configuring rings to a specific profile or rctl / type */
178struct lpfc_hbq_init {
179 uint32_t rn; /* Receive buffer notification */
180 uint32_t entry_count; /* max # of entries in HBQ */
181 uint32_t headerLen; /* 0 if not profile 4 or 5 */
182 uint32_t logEntry; /* Set to 1 if this HBQ used for LogEntry */
183 uint32_t profile; /* Selection profile 0=all, 7=logentry */
184 uint32_t ring_mask; /* Binds HBQ to a ring e.g. Ring0=b0001,
185 * ring2=b0100 */
186 uint32_t hbq_index; /* index of this hbq in ring .HBQs[] */
187
188 uint32_t seqlenoff;
189 uint32_t maxlen;
190 uint32_t seqlenbcnt;
191 uint32_t cmdcodeoff;
192 uint32_t cmdmatch[8];
193 uint32_t mask_count; /* number of mask entries in prt array */
194 struct hbq_mask hbqMasks[6];
195
196 /* Non-config rings fields to keep track of buffer allocations */
197 uint32_t buffer_count; /* number of buffers allocated */
198 uint32_t init_count; /* number to allocate when initialized */
199 uint32_t add_count; /* number to allocate when starved */
200} ;
201
202#define LPFC_MAX_HBQ 16
203
204
168/* Structure used to hold SLI statistical counters and info */ 205/* Structure used to hold SLI statistical counters and info */
169struct lpfc_sli_stat { 206struct lpfc_sli_stat {
170 uint64_t mbox_stat_err; /* Mbox cmds completed status error */ 207 uint64_t mbox_stat_err; /* Mbox cmds completed status error */
@@ -197,6 +234,7 @@ struct lpfc_sli {
197#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ 234#define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */
198#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ 235#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */
199#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ 236#define LPFC_PROCESS_LA 0x400 /* Able to process link attention */
237#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
200 238
201 struct lpfc_sli_ring ring[LPFC_MAX_RING]; 239 struct lpfc_sli_ring ring[LPFC_MAX_RING];
202 int fcp_ring; /* ring used for FCP initiator commands */ 240 int fcp_ring; /* ring used for FCP initiator commands */
@@ -209,6 +247,7 @@ struct lpfc_sli {
209 uint16_t mboxq_cnt; /* current length of queue */ 247 uint16_t mboxq_cnt; /* current length of queue */
210 uint16_t mboxq_max; /* max length */ 248 uint16_t mboxq_max; /* max length */
211 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */ 249 LPFC_MBOXQ_t *mbox_active; /* active mboxq information */
250 struct list_head mboxq_cmpl;
212 251
213 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox 252 struct timer_list mbox_tmo; /* Hold clk to timeout active mbox
214 cmd */ 253 cmd */
@@ -221,12 +260,6 @@ struct lpfc_sli {
221 struct lpfc_lnk_stat lnk_stat_offsets; 260 struct lpfc_lnk_stat lnk_stat_offsets;
222}; 261};
223 262
224/* Given a pointer to the start of the ring, and the slot number of
225 * the desired iocb entry, calc a pointer to that entry.
226 * (assume iocb entry size is 32 bytes, or 8 words)
227 */
228#define IOCB_ENTRY(ring,slot) ((IOCB_t *)(((char *)(ring)) + ((slot) * 32)))
229
230#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 263#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
231 command */ 264 command */
232#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 265#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 92a9107019d2..a5bc79eef052 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.1.12" 21#define LPFC_DRIVER_VERSION "8.2.1"
22 22
23#define LPFC_DRIVER_NAME "lpfc" 23#define LPFC_DRIVER_NAME "lpfc"
24 24
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
new file mode 100644
index 000000000000..85797dbf5478
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -0,0 +1,523 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kthread.h>
28#include <linux/pci.h>
29#include <linux/spinlock.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h>
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_version.h"
43#include "lpfc_vport.h"
44
45inline void lpfc_vport_set_state(struct lpfc_vport *vport,
46 enum fc_vport_state new_state)
47{
48 struct fc_vport *fc_vport = vport->fc_vport;
49
50 if (fc_vport) {
51 /*
52 * When the transport defines fc_vport_set state we will replace
53 * this code with the following line
54 */
55 /* fc_vport_set_state(fc_vport, new_state); */
56 if (new_state != FC_VPORT_INITIALIZING)
57 fc_vport->vport_last_state = fc_vport->vport_state;
58 fc_vport->vport_state = new_state;
59 }
60
61 /* for all the error states we will set the invternal state to FAILED */
62 switch (new_state) {
63 case FC_VPORT_NO_FABRIC_SUPP:
64 case FC_VPORT_NO_FABRIC_RSCS:
65 case FC_VPORT_FABRIC_LOGOUT:
66 case FC_VPORT_FABRIC_REJ_WWN:
67 case FC_VPORT_FAILED:
68 vport->port_state = LPFC_VPORT_FAILED;
69 break;
70 case FC_VPORT_LINKDOWN:
71 vport->port_state = LPFC_VPORT_UNKNOWN;
72 break;
73 default:
74 /* do nothing */
75 break;
76 }
77}
78
79static int
80lpfc_alloc_vpi(struct lpfc_hba *phba)
81{
82 int vpi;
83
84 spin_lock_irq(&phba->hbalock);
85 /* Start at bit 1 because vpi zero is reserved for the physical port */
86 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1);
87 if (vpi > phba->max_vpi)
88 vpi = 0;
89 else
90 set_bit(vpi, phba->vpi_bmask);
91 spin_unlock_irq(&phba->hbalock);
92 return vpi;
93}
94
95static void
96lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
97{
98 spin_lock_irq(&phba->hbalock);
99 clear_bit(vpi, phba->vpi_bmask);
100 spin_unlock_irq(&phba->hbalock);
101}
102
103static int
104lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
105{
106 LPFC_MBOXQ_t *pmb;
107 MAILBOX_t *mb;
108 struct lpfc_dmabuf *mp;
109 int rc;
110
111 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
112 if (!pmb) {
113 return -ENOMEM;
114 }
115 mb = &pmb->mb;
116
117 lpfc_read_sparam(phba, pmb, vport->vpi);
118 /*
119 * Grab buffer pointer and clear context1 so we can use
120 * lpfc_sli_issue_box_wait
121 */
122 mp = (struct lpfc_dmabuf *) pmb->context1;
123 pmb->context1 = NULL;
124
125 pmb->vport = vport;
126 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2);
127 if (rc != MBX_SUCCESS) {
128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
129 "%d (%d):1818 VPort failed init, mbxCmd x%x "
130 "READ_SPARM mbxStatus x%x, rc = x%x\n",
131 phba->brd_no, vport->vpi,
132 mb->mbxCommand, mb->mbxStatus, rc);
133 lpfc_mbuf_free(phba, mp->virt, mp->phys);
134 kfree(mp);
135 if (rc != MBX_TIMEOUT)
136 mempool_free(pmb, phba->mbox_mem_pool);
137 return -EIO;
138 }
139
140 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
141 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
142 sizeof (struct lpfc_name));
143 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
144 sizeof (struct lpfc_name));
145
146 lpfc_mbuf_free(phba, mp->virt, mp->phys);
147 kfree(mp);
148 mempool_free(pmb, phba->mbox_mem_pool);
149
150 return 0;
151}
152
153static int
154lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn,
155 const char *name_type)
156{
157 /* ensure that IEEE format 1 addresses
158 * contain zeros in bits 59-48
159 */
160 if (!((wwn->u.wwn[0] >> 4) == 1 &&
161 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0)))
162 return 1;
163
164 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
165 "%d:1822 Invalid %s: %02x:%02x:%02x:%02x:"
166 "%02x:%02x:%02x:%02x\n",
167 phba->brd_no, name_type,
168 wwn->u.wwn[0], wwn->u.wwn[1],
169 wwn->u.wwn[2], wwn->u.wwn[3],
170 wwn->u.wwn[4], wwn->u.wwn[5],
171 wwn->u.wwn[6], wwn->u.wwn[7]);
172 return 0;
173}
174
175static int
176lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
177{
178 struct lpfc_vport *vport;
179
180 list_for_each_entry(vport, &phba->port_list, listentry) {
181 if (vport == new_vport)
182 continue;
183 /* If they match, return not unique */
184 if (memcmp(&vport->fc_sparam.portName,
185 &new_vport->fc_sparam.portName,
186 sizeof(struct lpfc_name)) == 0)
187 return 0;
188 }
189 return 1;
190}
191
192int
193lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
194{
195 struct lpfc_nodelist *ndlp;
196 struct lpfc_vport *pport =
197 (struct lpfc_vport *) fc_vport->shost->hostdata;
198 struct lpfc_hba *phba = pport->phba;
199 struct lpfc_vport *vport = NULL;
200 int instance;
201 int vpi;
202 int rc = VPORT_ERROR;
203
204 if ((phba->sli_rev < 3) ||
205 !(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
206 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
207 "%d:1808 Create VPORT failed: "
208 "NPIV is not enabled: SLImode:%d\n",
209 phba->brd_no, phba->sli_rev);
210 rc = VPORT_INVAL;
211 goto error_out;
212 }
213
214 vpi = lpfc_alloc_vpi(phba);
215 if (vpi == 0) {
216 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
217 "%d:1809 Create VPORT failed: "
218 "Max VPORTs (%d) exceeded\n",
219 phba->brd_no, phba->max_vpi);
220 rc = VPORT_NORESOURCES;
221 goto error_out;
222 }
223
224
225 /* Assign an unused board number */
226 if ((instance = lpfc_get_instance()) < 0) {
227 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
228 "%d:1810 Create VPORT failed: Cannot get "
229 "instance number\n", phba->brd_no);
230 lpfc_free_vpi(phba, vpi);
231 rc = VPORT_NORESOURCES;
232 goto error_out;
233 }
234
235 vport = lpfc_create_port(phba, instance, fc_vport);
236 if (!vport) {
237 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
238 "%d:1811 Create VPORT failed: vpi x%x\n",
239 phba->brd_no, vpi);
240 lpfc_free_vpi(phba, vpi);
241 rc = VPORT_NORESOURCES;
242 goto error_out;
243 }
244
245 vport->vpi = vpi;
246 lpfc_debugfs_initialize(vport);
247
248 if (lpfc_vport_sparm(phba, vport)) {
249 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
250 "%d:1813 Create VPORT failed: vpi:%d "
251 "Cannot get sparam\n",
252 phba->brd_no, vpi);
253 lpfc_free_vpi(phba, vpi);
254 destroy_port(vport);
255 rc = VPORT_NORESOURCES;
256 goto error_out;
257 }
258
259 memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
260 memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
261
262 if (fc_vport->node_name != 0)
263 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
264 if (fc_vport->port_name != 0)
265 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
266
267 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
268 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
269
270 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") ||
271 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) {
272 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
273 "%d:1821 Create VPORT failed: vpi:%d "
274 "Invalid WWN format\n",
275 phba->brd_no, vpi);
276 lpfc_free_vpi(phba, vpi);
277 destroy_port(vport);
278 rc = VPORT_INVAL;
279 goto error_out;
280 }
281
282 if (!lpfc_unique_wwpn(phba, vport)) {
283 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
284 "%d:1823 Create VPORT failed: vpi:%d "
285 "Duplicate WWN on HBA\n",
286 phba->brd_no, vpi);
287 lpfc_free_vpi(phba, vpi);
288 destroy_port(vport);
289 rc = VPORT_INVAL;
290 goto error_out;
291 }
292
293 *(struct lpfc_vport **)fc_vport->dd_data = vport;
294 vport->fc_vport = fc_vport;
295
296 if ((phba->link_state < LPFC_LINK_UP) ||
297 (phba->fc_topology == TOPOLOGY_LOOP)) {
298 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
299 rc = VPORT_OK;
300 goto out;
301 }
302
303 if (disable) {
304 rc = VPORT_OK;
305 goto out;
306 }
307
308 /* Use the Physical nodes Fabric NDLP to determine if the link is
309 * up and ready to FDISC.
310 */
311 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
312 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
313 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
314 lpfc_set_disctmo(vport);
315 lpfc_initial_fdisc(vport);
316 } else {
317 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
318 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
319 "%d (%d):0262 No NPIV Fabric "
320 "support\n",
321 phba->brd_no, vport->vpi);
322 }
323 } else {
324 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
325 }
326 rc = VPORT_OK;
327
328out:
329 lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
330error_out:
331 return rc;
332}
333
334int
335disable_vport(struct fc_vport *fc_vport)
336{
337 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
338 struct lpfc_hba *phba = vport->phba;
339 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
340 long timeout;
341
342 ndlp = lpfc_findnode_did(vport, Fabric_DID);
343 if (ndlp && phba->link_state >= LPFC_LINK_UP) {
344 vport->unreg_vpi_cmpl = VPORT_INVAL;
345 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
346 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
347 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
348 timeout = schedule_timeout(timeout);
349 }
350
351 lpfc_sli_host_down(vport);
352
353 /* Mark all nodes for discovery so we can remove them by
354 * calling lpfc_cleanup_rpis(vport, 1)
355 */
356 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
357 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
358 continue;
359 lpfc_disc_state_machine(vport, ndlp, NULL,
360 NLP_EVT_DEVICE_RECOVERY);
361 }
362 lpfc_cleanup_rpis(vport, 1);
363
364 lpfc_stop_vport_timers(vport);
365 lpfc_unreg_all_rpis(vport);
366 lpfc_unreg_default_rpis(vport);
367 /*
368 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
369 * scsi_host_put() to release the vport.
370 */
371 lpfc_mbx_unreg_vpi(vport);
372
373 lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
374 return VPORT_OK;
375}
376
377int
378enable_vport(struct fc_vport *fc_vport)
379{
380 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
381 struct lpfc_hba *phba = vport->phba;
382 struct lpfc_nodelist *ndlp = NULL;
383
384 if ((phba->link_state < LPFC_LINK_UP) ||
385 (phba->fc_topology == TOPOLOGY_LOOP)) {
386 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
387 return VPORT_OK;
388 }
389
390 vport->load_flag |= FC_LOADING;
391 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
392
393 /* Use the Physical nodes Fabric NDLP to determine if the link is
394 * up and ready to FDISC.
395 */
396 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
397 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
398 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) {
399 lpfc_set_disctmo(vport);
400 lpfc_initial_fdisc(vport);
401 } else {
402 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
403 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
404 "%d (%d):0264 No NPIV Fabric "
405 "support\n",
406 phba->brd_no, vport->vpi);
407 }
408 } else {
409 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
410 }
411
412 return VPORT_OK;
413}
414
415int
416lpfc_vport_disable(struct fc_vport *fc_vport, bool disable)
417{
418 if (disable)
419 return disable_vport(fc_vport);
420 else
421 return enable_vport(fc_vport);
422}
423
424
425int
426lpfc_vport_delete(struct fc_vport *fc_vport)
427{
428 struct lpfc_nodelist *ndlp = NULL;
429 struct lpfc_nodelist *next_ndlp;
430 struct Scsi_Host *shost = (struct Scsi_Host *) fc_vport->shost;
431 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
432 struct lpfc_hba *phba = vport->phba;
433 long timeout;
434 int rc = VPORT_ERROR;
435
436 /*
437 * This is a bit of a mess. We want to ensure the shost doesn't get
438 * torn down until we're done with the embedded lpfc_vport structure.
439 *
440 * Beyond holding a reference for this function, we also need a
441 * reference for outstanding I/O requests we schedule during delete
442 * processing. But once we scsi_remove_host() we can no longer obtain
443 * a reference through scsi_host_get().
444 *
445 * So we take two references here. We release one reference at the
446 * bottom of the function -- after delinking the vport. And we
447 * release the other at the completion of the unreg_vpi that get's
448 * initiated after we've disposed of all other resources associated
449 * with the port.
450 */
451 if (!scsi_host_get(shost) || !scsi_host_get(shost))
452 return VPORT_INVAL;
453
454 if (vport->port_type == LPFC_PHYSICAL_PORT) {
455 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
456 "%d:1812 vport_delete failed: Cannot delete "
457 "physical host\n", phba->brd_no);
458 goto out;
459 }
460
461 vport->load_flag |= FC_UNLOADING;
462
463 kfree(vport->vname);
464 lpfc_debugfs_terminate(vport);
465 fc_remove_host(lpfc_shost_from_vport(vport));
466 scsi_remove_host(lpfc_shost_from_vport(vport));
467
468 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
469 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE &&
470 phba->link_state >= LPFC_LINK_UP) {
471
472 /* First look for the Fabric ndlp */
473 ndlp = lpfc_findnode_did(vport, Fabric_DID);
474 if (!ndlp) {
475 /* Cannot find existing Fabric ndlp, allocate one */
476 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
477 if (!ndlp)
478 goto skip_logo;
479 lpfc_nlp_init(vport, ndlp, Fabric_DID);
480 } else {
481 lpfc_dequeue_node(vport, ndlp);
482 }
483 vport->unreg_vpi_cmpl = VPORT_INVAL;
484 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
485 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
486 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
487 timeout = schedule_timeout(timeout);
488 }
489
490skip_logo:
491 lpfc_sli_host_down(vport);
492
493 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
494 lpfc_disc_state_machine(vport, ndlp, NULL,
495 NLP_EVT_DEVICE_RECOVERY);
496 lpfc_disc_state_machine(vport, ndlp, NULL,
497 NLP_EVT_DEVICE_RM);
498 }
499
500 lpfc_stop_vport_timers(vport);
501 lpfc_unreg_all_rpis(vport);
502 lpfc_unreg_default_rpis(vport);
503 /*
504 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the
505 * scsi_host_put() to release the vport.
506 */
507 lpfc_mbx_unreg_vpi(vport);
508
509 lpfc_free_vpi(phba, vport->vpi);
510 vport->work_port_events = 0;
511 spin_lock_irq(&phba->hbalock);
512 list_del_init(&vport->listentry);
513 spin_unlock_irq(&phba->hbalock);
514
515 rc = VPORT_OK;
516out:
517 scsi_host_put(shost);
518 return rc;
519}
520
521
522EXPORT_SYMBOL(lpfc_vport_create);
523EXPORT_SYMBOL(lpfc_vport_delete);
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
new file mode 100644
index 000000000000..f223550f8cba
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -0,0 +1,113 @@
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2006 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22#ifndef _H_LPFC_VPORT
23#define _H_LPFC_VPORT
24
25/* API version values (each will be an individual bit) */
26#define VPORT_API_VERSION_1 0x01
27
28/* Values returned via lpfc_vport_getinfo() */
29struct vport_info {
30
31 uint32_t api_versions;
32 uint8_t linktype;
33#define VPORT_TYPE_PHYSICAL 0
34#define VPORT_TYPE_VIRTUAL 1
35
36 uint8_t state;
37#define VPORT_STATE_OFFLINE 0
38#define VPORT_STATE_ACTIVE 1
39#define VPORT_STATE_FAILED 2
40
41 uint8_t fail_reason;
42 uint8_t prev_fail_reason;
43#define VPORT_FAIL_UNKNOWN 0
44#define VPORT_FAIL_LINKDOWN 1
45#define VPORT_FAIL_FAB_UNSUPPORTED 2
46#define VPORT_FAIL_FAB_NORESOURCES 3
47#define VPORT_FAIL_FAB_LOGOUT 4
48#define VPORT_FAIL_ADAP_NORESOURCES 5
49
50 uint8_t node_name[8]; /* WWNN */
51 uint8_t port_name[8]; /* WWPN */
52
53 struct Scsi_Host *shost;
54
55/* Following values are valid only on physical links */
56 uint32_t vports_max;
57 uint32_t vports_inuse;
58 uint32_t rpi_max;
59 uint32_t rpi_inuse;
60#define VPORT_CNT_INVALID 0xFFFFFFFF
61};
62
63/* data used in link creation */
64struct vport_data {
65 uint32_t api_version;
66
67 uint32_t options;
68#define VPORT_OPT_AUTORETRY 0x01
69
70 uint8_t node_name[8]; /* WWNN */
71 uint8_t port_name[8]; /* WWPN */
72
73/*
74 * Upon successful creation, vport_shost will point to the new Scsi_Host
75 * structure for the new virtual link.
76 */
77 struct Scsi_Host *vport_shost;
78};
79
80/* API function return codes */
81#define VPORT_OK 0
82#define VPORT_ERROR -1
83#define VPORT_INVAL -2
84#define VPORT_NOMEM -3
85#define VPORT_NORESOURCES -4
86
87int lpfc_vport_create(struct fc_vport *, bool);
88int lpfc_vport_delete(struct fc_vport *);
89int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *);
90int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint);
91
92/*
93 * queuecommand VPORT-specific return codes. Specified in the host byte code.
94 * Returned when the virtual link has failed or is not active.
95 */
96#define DID_VPORT_ERROR 0x0f
97
98#define VPORT_INFO 0x1
99#define VPORT_CREATE 0x2
100#define VPORT_DELETE 0x4
101
102struct vport_cmd_tag {
103 uint32_t cmd;
104 struct vport_data cdata;
105 struct vport_info cinfo;
106 void *vport;
107 int vport_num;
108};
109
110void lpfc_vport_set_state(struct lpfc_vport *vport,
111 enum fc_vport_state new_state);
112
113#endif /* H_LPFC_VPORT */
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 5806ede120a4..b12ad7c7c673 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -77,7 +77,7 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
77 for (i = 0; i < cmd->cmd_len; ++i) 77 for (i = 0; i < cmd->cmd_len; ++i)
78 printk(" %.2x", cmd->cmnd[i]); 78 printk(" %.2x", cmd->cmnd[i]);
79 printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", 79 printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
80 cmd->use_sg, cmd->request_bufflen, cmd->request_buffer); 80 scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
81 } 81 }
82#endif 82#endif
83 83
@@ -173,8 +173,7 @@ static void mac53c94_start(struct fsc_state *state)
173 writeb(CMD_SELECT, &regs->command); 173 writeb(CMD_SELECT, &regs->command);
174 state->phase = selecting; 174 state->phase = selecting;
175 175
176 if (cmd->use_sg > 0 || cmd->request_bufflen != 0) 176 set_dma_cmds(state, cmd);
177 set_dma_cmds(state, cmd);
178} 177}
179 178
180static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) 179static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id)
@@ -262,7 +261,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
262 writeb(CMD_NOP, &regs->command); 261 writeb(CMD_NOP, &regs->command);
263 /* set DMA controller going if any data to transfer */ 262 /* set DMA controller going if any data to transfer */
264 if ((stat & (STAT_MSG|STAT_CD)) == 0 263 if ((stat & (STAT_MSG|STAT_CD)) == 0
265 && (cmd->use_sg > 0 || cmd->request_bufflen != 0)) { 264 && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
266 nb = cmd->SCp.this_residual; 265 nb = cmd->SCp.this_residual;
267 if (nb > 0xfff0) 266 if (nb > 0xfff0)
268 nb = 0xfff0; 267 nb = 0xfff0;
@@ -310,14 +309,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
310 printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); 309 printk(KERN_DEBUG "intr %x before data xfer complete\n", intr);
311 } 310 }
312 writel(RUN << 16, &dma->control); /* stop dma */ 311 writel(RUN << 16, &dma->control); /* stop dma */
313 if (cmd->use_sg != 0) { 312 scsi_dma_unmap(cmd);
314 pci_unmap_sg(state->pdev,
315 (struct scatterlist *)cmd->request_buffer,
316 cmd->use_sg, cmd->sc_data_direction);
317 } else {
318 pci_unmap_single(state->pdev, state->dma_addr,
319 cmd->request_bufflen, cmd->sc_data_direction);
320 }
321 /* should check dma status */ 313 /* should check dma status */
322 writeb(CMD_I_COMPLETE, &regs->command); 314 writeb(CMD_I_COMPLETE, &regs->command);
323 state->phase = completing; 315 state->phase = completing;
@@ -365,47 +357,35 @@ static void cmd_done(struct fsc_state *state, int result)
365 */ 357 */
366static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) 358static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
367{ 359{
368 int i, dma_cmd, total; 360 int i, dma_cmd, total, nseg;
369 struct scatterlist *scl; 361 struct scatterlist *scl;
370 struct dbdma_cmd *dcmds; 362 struct dbdma_cmd *dcmds;
371 dma_addr_t dma_addr; 363 dma_addr_t dma_addr;
372 u32 dma_len; 364 u32 dma_len;
373 365
366 nseg = scsi_dma_map(cmd);
367 BUG_ON(nseg < 0);
368 if (!nseg)
369 return;
370
374 dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? 371 dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ?
375 OUTPUT_MORE : INPUT_MORE; 372 OUTPUT_MORE : INPUT_MORE;
376 dcmds = state->dma_cmds; 373 dcmds = state->dma_cmds;
377 if (cmd->use_sg > 0) { 374 total = 0;
378 int nseg; 375
379 376 scsi_for_each_sg(cmd, scl, nseg, i) {
380 total = 0; 377 dma_addr = sg_dma_address(scl);
381 scl = (struct scatterlist *) cmd->request_buffer; 378 dma_len = sg_dma_len(scl);
382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 379 if (dma_len > 0xffff)
383 cmd->sc_data_direction); 380 panic("mac53c94: scatterlist element >= 64k");
384 for (i = 0; i < nseg; ++i) { 381 total += dma_len;
385 dma_addr = sg_dma_address(scl); 382 st_le16(&dcmds->req_count, dma_len);
386 dma_len = sg_dma_len(scl); 383 st_le16(&dcmds->command, dma_cmd);
387 if (dma_len > 0xffff)
388 panic("mac53c94: scatterlist element >= 64k");
389 total += dma_len;
390 st_le16(&dcmds->req_count, dma_len);
391 st_le16(&dcmds->command, dma_cmd);
392 st_le32(&dcmds->phy_addr, dma_addr);
393 dcmds->xfer_status = 0;
394 ++scl;
395 ++dcmds;
396 }
397 } else {
398 total = cmd->request_bufflen;
399 if (total > 0xffff)
400 panic("mac53c94: transfer size >= 64k");
401 dma_addr = pci_map_single(state->pdev, cmd->request_buffer,
402 total, cmd->sc_data_direction);
403 state->dma_addr = dma_addr;
404 st_le16(&dcmds->req_count, total);
405 st_le32(&dcmds->phy_addr, dma_addr); 384 st_le32(&dcmds->phy_addr, dma_addr);
406 dcmds->xfer_status = 0; 385 dcmds->xfer_status = 0;
407 ++dcmds; 386 ++dcmds;
408 } 387 }
388
409 dma_cmd += OUTPUT_LAST - OUTPUT_MORE; 389 dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
410 st_le16(&dcmds[-1].command, dma_cmd); 390 st_le16(&dcmds[-1].command, dma_cmd);
411 st_le16(&dcmds->command, DBDMA_STOP); 391 st_le16(&dcmds->command, DBDMA_STOP);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3cce75d70263..3907f6718ede 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -523,10 +523,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
523 /* 523 /*
524 * filter the internal and ioctl commands 524 * filter the internal and ioctl commands
525 */ 525 */
526 if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) { 526 if((cmd->cmnd[0] == MEGA_INTERNAL_CMD))
527 return cmd->request_buffer; 527 return (scb_t *)cmd->host_scribble;
528 }
529
530 528
531 /* 529 /*
532 * We know what channels our logical drives are on - mega_find_card() 530 * We know what channels our logical drives are on - mega_find_card()
@@ -657,22 +655,14 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
657 655
658 case MODE_SENSE: { 656 case MODE_SENSE: {
659 char *buf; 657 char *buf;
658 struct scatterlist *sg;
660 659
661 if (cmd->use_sg) { 660 sg = scsi_sglist(cmd);
662 struct scatterlist *sg; 661 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
663 662
664 sg = (struct scatterlist *)cmd->request_buffer;
665 buf = kmap_atomic(sg->page, KM_IRQ0) +
666 sg->offset;
667 } else
668 buf = cmd->request_buffer;
669 memset(buf, 0, cmd->cmnd[4]); 663 memset(buf, 0, cmd->cmnd[4]);
670 if (cmd->use_sg) { 664 kunmap_atomic(buf - sg->offset, KM_IRQ0);
671 struct scatterlist *sg;
672 665
673 sg = (struct scatterlist *)cmd->request_buffer;
674 kunmap_atomic(buf - sg->offset, KM_IRQ0);
675 }
676 cmd->result = (DID_OK << 16); 666 cmd->result = (DID_OK << 16);
677 cmd->scsi_done(cmd); 667 cmd->scsi_done(cmd);
678 return NULL; 668 return NULL;
@@ -1551,23 +1541,15 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1551 islogical = adapter->logdrv_chan[cmd->device->channel]; 1541 islogical = adapter->logdrv_chan[cmd->device->channel];
1552 if( cmd->cmnd[0] == INQUIRY && !islogical ) { 1542 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1553 1543
1554 if( cmd->use_sg ) { 1544 sgl = scsi_sglist(cmd);
1555 sgl = (struct scatterlist *) 1545 if( sgl->page ) {
1556 cmd->request_buffer; 1546 c = *(unsigned char *)
1557
1558 if( sgl->page ) {
1559 c = *(unsigned char *)
1560 page_address((&sgl[0])->page) + 1547 page_address((&sgl[0])->page) +
1561 (&sgl[0])->offset; 1548 (&sgl[0])->offset;
1562 } 1549 } else {
1563 else { 1550 printk(KERN_WARNING
1564 printk(KERN_WARNING 1551 "megaraid: invalid sg.\n");
1565 "megaraid: invalid sg.\n"); 1552 c = 0;
1566 c = 0;
1567 }
1568 }
1569 else {
1570 c = *(u8 *)cmd->request_buffer;
1571 } 1553 }
1572 1554
1573 if(IS_RAID_CH(adapter, cmd->device->channel) && 1555 if(IS_RAID_CH(adapter, cmd->device->channel) &&
@@ -1704,30 +1686,14 @@ mega_rundoneq (adapter_t *adapter)
1704static void 1686static void
1705mega_free_scb(adapter_t *adapter, scb_t *scb) 1687mega_free_scb(adapter_t *adapter, scb_t *scb)
1706{ 1688{
1707 unsigned long length;
1708
1709 switch( scb->dma_type ) { 1689 switch( scb->dma_type ) {
1710 1690
1711 case MEGA_DMA_TYPE_NONE: 1691 case MEGA_DMA_TYPE_NONE:
1712 break; 1692 break;
1713 1693
1714 case MEGA_BULK_DATA:
1715 if (scb->cmd->use_sg == 0)
1716 length = scb->cmd->request_bufflen;
1717 else {
1718 struct scatterlist *sgl =
1719 (struct scatterlist *)scb->cmd->request_buffer;
1720 length = sgl->length;
1721 }
1722 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
1723 length, scb->dma_direction);
1724 break;
1725
1726 case MEGA_SGLIST: 1694 case MEGA_SGLIST:
1727 pci_unmap_sg(adapter->dev, scb->cmd->request_buffer, 1695 scsi_dma_unmap(scb->cmd);
1728 scb->cmd->use_sg, scb->dma_direction);
1729 break; 1696 break;
1730
1731 default: 1697 default:
1732 break; 1698 break;
1733 } 1699 }
@@ -1767,80 +1733,33 @@ __mega_busywait_mbox (adapter_t *adapter)
1767static int 1733static int
1768mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) 1734mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1769{ 1735{
1770 struct scatterlist *sgl; 1736 struct scatterlist *sg;
1771 struct page *page;
1772 unsigned long offset;
1773 unsigned int length;
1774 Scsi_Cmnd *cmd; 1737 Scsi_Cmnd *cmd;
1775 int sgcnt; 1738 int sgcnt;
1776 int idx; 1739 int idx;
1777 1740
1778 cmd = scb->cmd; 1741 cmd = scb->cmd;
1779 1742
1780 /* Scatter-gather not used */
1781 if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
1782 !adapter->has_64bit_addr)) {
1783
1784 if (cmd->use_sg == 0) {
1785 page = virt_to_page(cmd->request_buffer);
1786 offset = offset_in_page(cmd->request_buffer);
1787 length = cmd->request_bufflen;
1788 } else {
1789 sgl = (struct scatterlist *)cmd->request_buffer;
1790 page = sgl->page;
1791 offset = sgl->offset;
1792 length = sgl->length;
1793 }
1794
1795 scb->dma_h_bulkdata = pci_map_page(adapter->dev,
1796 page, offset,
1797 length,
1798 scb->dma_direction);
1799 scb->dma_type = MEGA_BULK_DATA;
1800
1801 /*
1802 * We need to handle special 64-bit commands that need a
1803 * minimum of 1 SG
1804 */
1805 if( adapter->has_64bit_addr ) {
1806 scb->sgl64[0].address = scb->dma_h_bulkdata;
1807 scb->sgl64[0].length = length;
1808 *buf = (u32)scb->sgl_dma_addr;
1809 *len = (u32)length;
1810 return 1;
1811 }
1812 else {
1813 *buf = (u32)scb->dma_h_bulkdata;
1814 *len = (u32)length;
1815 }
1816 return 0;
1817 }
1818
1819 sgl = (struct scatterlist *)cmd->request_buffer;
1820
1821 /* 1743 /*
1822 * Copy Scatter-Gather list info into controller structure. 1744 * Copy Scatter-Gather list info into controller structure.
1823 * 1745 *
1824 * The number of sg elements returned must not exceed our limit 1746 * The number of sg elements returned must not exceed our limit
1825 */ 1747 */
1826 sgcnt = pci_map_sg(adapter->dev, sgl, cmd->use_sg, 1748 sgcnt = scsi_dma_map(cmd);
1827 scb->dma_direction);
1828 1749
1829 scb->dma_type = MEGA_SGLIST; 1750 scb->dma_type = MEGA_SGLIST;
1830 1751
1831 BUG_ON(sgcnt > adapter->sglen); 1752 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1832 1753
1833 *len = 0; 1754 *len = 0;
1834 1755
1835 for( idx = 0; idx < sgcnt; idx++, sgl++ ) { 1756 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1836 1757 if (adapter->has_64bit_addr) {
1837 if( adapter->has_64bit_addr ) { 1758 scb->sgl64[idx].address = sg_dma_address(sg);
1838 scb->sgl64[idx].address = sg_dma_address(sgl); 1759 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1839 *len += scb->sgl64[idx].length = sg_dma_len(sgl); 1760 } else {
1840 } 1761 scb->sgl[idx].address = sg_dma_address(sg);
1841 else { 1762 *len += scb->sgl[idx].length = sg_dma_len(sg);
1842 scb->sgl[idx].address = sg_dma_address(sgl);
1843 *len += scb->sgl[idx].length = sg_dma_len(sgl);
1844 } 1763 }
1845 } 1764 }
1846 1765
@@ -3571,7 +3490,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3571 /* 3490 /*
3572 * The user passthru structure 3491 * The user passthru structure
3573 */ 3492 */
3574 upthru = (mega_passthru __user *)MBOX(uioc)->xferaddr; 3493 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3575 3494
3576 /* 3495 /*
3577 * Copy in the user passthru here. 3496 * Copy in the user passthru here.
@@ -3623,7 +3542,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3623 /* 3542 /*
3624 * Get the user data 3543 * Get the user data
3625 */ 3544 */
3626 if( copy_from_user(data, (char __user *)uxferaddr, 3545 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3627 pthru->dataxferlen) ) { 3546 pthru->dataxferlen) ) {
3628 rval = (-EFAULT); 3547 rval = (-EFAULT);
3629 goto freemem_and_return; 3548 goto freemem_and_return;
@@ -3649,7 +3568,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3649 * Is data going up-stream 3568 * Is data going up-stream
3650 */ 3569 */
3651 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3570 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3652 if( copy_to_user((char __user *)uxferaddr, data, 3571 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3653 pthru->dataxferlen) ) { 3572 pthru->dataxferlen) ) {
3654 rval = (-EFAULT); 3573 rval = (-EFAULT);
3655 } 3574 }
@@ -3702,7 +3621,7 @@ freemem_and_return:
3702 /* 3621 /*
3703 * Get the user data 3622 * Get the user data
3704 */ 3623 */
3705 if( copy_from_user(data, (char __user *)uxferaddr, 3624 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3706 uioc.xferlen) ) { 3625 uioc.xferlen) ) {
3707 3626
3708 pci_free_consistent(pdev, 3627 pci_free_consistent(pdev,
@@ -3742,7 +3661,7 @@ freemem_and_return:
3742 * Is data going up-stream 3661 * Is data going up-stream
3743 */ 3662 */
3744 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3663 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3745 if( copy_to_user((char __user *)uxferaddr, data, 3664 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3746 uioc.xferlen) ) { 3665 uioc.xferlen) ) {
3747 3666
3748 rval = (-EFAULT); 3667 rval = (-EFAULT);
@@ -4494,7 +4413,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4494 scmd->device = sdev; 4413 scmd->device = sdev;
4495 4414
4496 scmd->device->host = adapter->host; 4415 scmd->device->host = adapter->host;
4497 scmd->request_buffer = (void *)scb; 4416 scmd->host_scribble = (void *)scb;
4498 scmd->cmnd[0] = MEGA_INTERNAL_CMD; 4417 scmd->cmnd[0] = MEGA_INTERNAL_CMD;
4499 4418
4500 scb->state |= SCB_ACTIVE; 4419 scb->state |= SCB_ACTIVE;
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 26e1e6c55654..fef9ac958754 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -21,6 +21,7 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/mutex.h>
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/blkdev.h> 27#include <linux/blkdev.h>
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 04d0b6918c61..c46685a03a9f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1378,8 +1378,6 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1378{ 1378{
1379 struct scatterlist *sgl; 1379 struct scatterlist *sgl;
1380 mbox_ccb_t *ccb; 1380 mbox_ccb_t *ccb;
1381 struct page *page;
1382 unsigned long offset;
1383 struct scsi_cmnd *scp; 1381 struct scsi_cmnd *scp;
1384 int sgcnt; 1382 int sgcnt;
1385 int i; 1383 int i;
@@ -1388,48 +1386,16 @@ megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1388 scp = scb->scp; 1386 scp = scb->scp;
1389 ccb = (mbox_ccb_t *)scb->ccb; 1387 ccb = (mbox_ccb_t *)scb->ccb;
1390 1388
1389 sgcnt = scsi_dma_map(scp);
1390 BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
1391
1391 // no mapping required if no data to be transferred 1392 // no mapping required if no data to be transferred
1392 if (!scp->request_buffer || !scp->request_bufflen) 1393 if (!sgcnt)
1393 return 0; 1394 return 0;
1394 1395
1395 if (!scp->use_sg) { /* scatter-gather list not used */
1396
1397 page = virt_to_page(scp->request_buffer);
1398
1399 offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
1400
1401 ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
1402 scp->request_bufflen,
1403 scb->dma_direction);
1404 scb->dma_type = MRAID_DMA_WBUF;
1405
1406 /*
1407 * We need to handle special 64-bit commands that need a
1408 * minimum of 1 SG
1409 */
1410 sgcnt = 1;
1411 ccb->sgl64[0].address = ccb->buf_dma_h;
1412 ccb->sgl64[0].length = scp->request_bufflen;
1413
1414 return sgcnt;
1415 }
1416
1417 sgl = (struct scatterlist *)scp->request_buffer;
1418
1419 // The number of sg elements returned must not exceed our limit
1420 sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
1421 scb->dma_direction);
1422
1423 if (sgcnt > adapter->sglen) {
1424 con_log(CL_ANN, (KERN_CRIT
1425 "megaraid critical: too many sg elements:%d\n",
1426 sgcnt));
1427 BUG();
1428 }
1429
1430 scb->dma_type = MRAID_DMA_WSG; 1396 scb->dma_type = MRAID_DMA_WSG;
1431 1397
1432 for (i = 0; i < sgcnt; i++, sgl++) { 1398 scsi_for_each_sg(scp, sgl, sgcnt, i) {
1433 ccb->sgl64[i].address = sg_dma_address(sgl); 1399 ccb->sgl64[i].address = sg_dma_address(sgl);
1434 ccb->sgl64[i].length = sg_dma_len(sgl); 1400 ccb->sgl64[i].length = sg_dma_len(sgl);
1435 } 1401 }
@@ -1489,19 +1455,11 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1489 1455
1490 adapter->outstanding_cmds++; 1456 adapter->outstanding_cmds++;
1491 1457
1492 if (scb->dma_direction == PCI_DMA_TODEVICE) { 1458 if (scb->dma_direction == PCI_DMA_TODEVICE)
1493 if (!scb->scp->use_sg) { // sg list not used 1459 pci_dma_sync_sg_for_device(adapter->pdev,
1494 pci_dma_sync_single_for_device(adapter->pdev, 1460 scsi_sglist(scb->scp),
1495 ccb->buf_dma_h, 1461 scsi_sg_count(scb->scp),
1496 scb->scp->request_bufflen, 1462 PCI_DMA_TODEVICE);
1497 PCI_DMA_TODEVICE);
1498 }
1499 else {
1500 pci_dma_sync_sg_for_device(adapter->pdev,
1501 scb->scp->request_buffer,
1502 scb->scp->use_sg, PCI_DMA_TODEVICE);
1503 }
1504 }
1505 1463
1506 mbox->busy = 1; // Set busy 1464 mbox->busy = 1; // Set busy
1507 mbox->poll = 0; 1465 mbox->poll = 0;
@@ -1624,29 +1582,26 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1624 return scb; 1582 return scb;
1625 1583
1626 case MODE_SENSE: 1584 case MODE_SENSE:
1627 if (scp->use_sg) { 1585 {
1628 struct scatterlist *sgl; 1586 struct scatterlist *sgl;
1629 caddr_t vaddr; 1587 caddr_t vaddr;
1630 1588
1631 sgl = (struct scatterlist *)scp->request_buffer; 1589 sgl = scsi_sglist(scp);
1632 if (sgl->page) { 1590 if (sgl->page) {
1633 vaddr = (caddr_t) 1591 vaddr = (caddr_t)
1634 (page_address((&sgl[0])->page) 1592 (page_address((&sgl[0])->page)
1635 + (&sgl[0])->offset); 1593 + (&sgl[0])->offset);
1636 1594
1637 memset(vaddr, 0, scp->cmnd[4]); 1595 memset(vaddr, 0, scp->cmnd[4]);
1638 }
1639 else {
1640 con_log(CL_ANN, (KERN_WARNING
1641 "megaraid mailbox: invalid sg:%d\n",
1642 __LINE__));
1643 }
1644 } 1596 }
1645 else { 1597 else {
1646 memset(scp->request_buffer, 0, scp->cmnd[4]); 1598 con_log(CL_ANN, (KERN_WARNING
1599 "megaraid mailbox: invalid sg:%d\n",
1600 __LINE__));
1647 } 1601 }
1648 scp->result = (DID_OK << 16); 1602 }
1649 return NULL; 1603 scp->result = (DID_OK << 16);
1604 return NULL;
1650 1605
1651 case INQUIRY: 1606 case INQUIRY:
1652 /* 1607 /*
@@ -1716,7 +1671,7 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1716 mbox->cmd = MBOXCMD_PASSTHRU64; 1671 mbox->cmd = MBOXCMD_PASSTHRU64;
1717 scb->dma_direction = scp->sc_data_direction; 1672 scb->dma_direction = scp->sc_data_direction;
1718 1673
1719 pthru->dataxferlen = scp->request_bufflen; 1674 pthru->dataxferlen = scsi_bufflen(scp);
1720 pthru->dataxferaddr = ccb->sgl_dma_h; 1675 pthru->dataxferaddr = ccb->sgl_dma_h;
1721 pthru->numsge = megaraid_mbox_mksgl(adapter, 1676 pthru->numsge = megaraid_mbox_mksgl(adapter,
1722 scb); 1677 scb);
@@ -2050,8 +2005,8 @@ megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
2050 2005
2051 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 2006 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2052 2007
2053 if (scp->request_bufflen) { 2008 if (scsi_bufflen(scp)) {
2054 pthru->dataxferlen = scp->request_bufflen; 2009 pthru->dataxferlen = scsi_bufflen(scp);
2055 pthru->dataxferaddr = ccb->sgl_dma_h; 2010 pthru->dataxferaddr = ccb->sgl_dma_h;
2056 pthru->numsge = megaraid_mbox_mksgl(adapter, scb); 2011 pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2057 } 2012 }
@@ -2099,8 +2054,8 @@ megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2099 2054
2100 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len); 2055 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2101 2056
2102 if (scp->request_bufflen) { 2057 if (scsi_bufflen(scp)) {
2103 epthru->dataxferlen = scp->request_bufflen; 2058 epthru->dataxferlen = scsi_bufflen(scp);
2104 epthru->dataxferaddr = ccb->sgl_dma_h; 2059 epthru->dataxferaddr = ccb->sgl_dma_h;
2105 epthru->numsge = megaraid_mbox_mksgl(adapter, scb); 2060 epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2106 } 2061 }
@@ -2266,37 +2221,13 @@ megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2266 2221
2267 ccb = (mbox_ccb_t *)scb->ccb; 2222 ccb = (mbox_ccb_t *)scb->ccb;
2268 2223
2269 switch (scb->dma_type) { 2224 if (scb->dma_direction == PCI_DMA_FROMDEVICE)
2270 2225 pci_dma_sync_sg_for_cpu(adapter->pdev,
2271 case MRAID_DMA_WBUF: 2226 scsi_sglist(scb->scp),
2272 if (scb->dma_direction == PCI_DMA_FROMDEVICE) { 2227 scsi_sg_count(scb->scp),
2273 pci_dma_sync_single_for_cpu(adapter->pdev,
2274 ccb->buf_dma_h,
2275 scb->scp->request_bufflen,
2276 PCI_DMA_FROMDEVICE); 2228 PCI_DMA_FROMDEVICE);
2277 }
2278
2279 pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
2280 scb->scp->request_bufflen, scb->dma_direction);
2281
2282 break;
2283
2284 case MRAID_DMA_WSG:
2285 if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
2286 pci_dma_sync_sg_for_cpu(adapter->pdev,
2287 scb->scp->request_buffer,
2288 scb->scp->use_sg, PCI_DMA_FROMDEVICE);
2289 }
2290
2291 pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
2292 scb->scp->use_sg, scb->dma_direction);
2293
2294 break;
2295
2296 default:
2297 break;
2298 }
2299 2229
2230 scsi_dma_unmap(scb->scp);
2300 return; 2231 return;
2301} 2232}
2302 2233
@@ -2399,24 +2330,16 @@ megaraid_mbox_dpc(unsigned long devp)
2399 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 2330 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2400 && IS_RAID_CH(raid_dev, scb->dev_channel)) { 2331 && IS_RAID_CH(raid_dev, scb->dev_channel)) {
2401 2332
2402 if (scp->use_sg) { 2333 sgl = scsi_sglist(scp);
2403 sgl = (struct scatterlist *) 2334 if (sgl->page) {
2404 scp->request_buffer; 2335 c = *(unsigned char *)
2405
2406 if (sgl->page) {
2407 c = *(unsigned char *)
2408 (page_address((&sgl[0])->page) + 2336 (page_address((&sgl[0])->page) +
2409 (&sgl[0])->offset); 2337 (&sgl[0])->offset);
2410 } 2338 } else {
2411 else { 2339 con_log(CL_ANN, (KERN_WARNING
2412 con_log(CL_ANN, (KERN_WARNING 2340 "megaraid mailbox: invalid sg:%d\n",
2413 "megaraid mailbox: invalid sg:%d\n", 2341 __LINE__));
2414 __LINE__)); 2342 c = 0;
2415 c = 0;
2416 }
2417 }
2418 else {
2419 c = *(uint8_t *)scp->request_buffer;
2420 } 2343 }
2421 2344
2422 if ((c & 0x1F ) == TYPE_DISK) { 2345 if ((c & 0x1F ) == TYPE_DISK) {
@@ -3957,7 +3880,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
3957 megaraid_sysfs_free_resources(adapter); 3880 megaraid_sysfs_free_resources(adapter);
3958 } 3881 }
3959 3882
3960 sema_init(&raid_dev->sysfs_sem, 1); 3883 mutex_init(&raid_dev->sysfs_mtx);
3961 3884
3962 init_waitqueue_head(&raid_dev->sysfs_wait_q); 3885 init_waitqueue_head(&raid_dev->sysfs_wait_q);
3963 3886
@@ -4058,7 +3981,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
4058 /* 3981 /*
4059 * Allow only one read at a time to go through the sysfs attributes 3982 * Allow only one read at a time to go through the sysfs attributes
4060 */ 3983 */
4061 down(&raid_dev->sysfs_sem); 3984 mutex_lock(&raid_dev->sysfs_mtx);
4062 3985
4063 uioc = raid_dev->sysfs_uioc; 3986 uioc = raid_dev->sysfs_uioc;
4064 mbox64 = raid_dev->sysfs_mbox64; 3987 mbox64 = raid_dev->sysfs_mbox64;
@@ -4134,7 +4057,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter)
4134 4057
4135 del_timer_sync(timerp); 4058 del_timer_sync(timerp);
4136 4059
4137 up(&raid_dev->sysfs_sem); 4060 mutex_unlock(&raid_dev->sysfs_mtx);
4138 4061
4139 return rval; 4062 return rval;
4140} 4063}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 9de803cebd4b..626459d1e902 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -168,7 +168,7 @@ typedef struct {
168 * @hw_error : set if FW not responding 168 * @hw_error : set if FW not responding
169 * @fast_load : If set, skip physical device scanning 169 * @fast_load : If set, skip physical device scanning
170 * @channel_class : channel class, RAID or SCSI 170 * @channel_class : channel class, RAID or SCSI
171 * @sysfs_sem : semaphore to serialize access to sysfs res. 171 * @sysfs_mtx : mutex to serialize access to sysfs res.
172 * @sysfs_uioc : management packet to issue FW calls from sysfs 172 * @sysfs_uioc : management packet to issue FW calls from sysfs
173 * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs 173 * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs
174 * @sysfs_buffer : data buffer for FW commands issued from sysfs 174 * @sysfs_buffer : data buffer for FW commands issued from sysfs
@@ -208,7 +208,7 @@ typedef struct {
208 int hw_error; 208 int hw_error;
209 int fast_load; 209 int fast_load;
210 uint8_t channel_class; 210 uint8_t channel_class;
211 struct semaphore sysfs_sem; 211 struct mutex sysfs_mtx;
212 uioc_t *sysfs_uioc; 212 uioc_t *sysfs_uioc;
213 mbox64_t *sysfs_mbox64; 213 mbox64_t *sysfs_mbox64;
214 caddr_t sysfs_buffer; 214 caddr_t sysfs_buffer;
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index e2cf12ef3688..b7f2e613c903 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -433,34 +433,15 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
433 int sge_count; 433 int sge_count;
434 struct scatterlist *os_sgl; 434 struct scatterlist *os_sgl;
435 435
436 /* 436 sge_count = scsi_dma_map(scp);
437 * Return 0 if there is no data transfer 437 BUG_ON(sge_count < 0);
438 */
439 if (!scp->request_buffer || !scp->request_bufflen)
440 return 0;
441 438
442 if (!scp->use_sg) { 439 if (sge_count) {
443 mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev, 440 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
444 scp-> 441 mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
445 request_buffer, 442 mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
446 scp-> 443 }
447 request_bufflen,
448 scp->
449 sc_data_direction);
450 mfi_sgl->sge32[0].length = scp->request_bufflen;
451
452 return 1;
453 }
454
455 os_sgl = (struct scatterlist *)scp->request_buffer;
456 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
457 scp->sc_data_direction);
458
459 for (i = 0; i < sge_count; i++, os_sgl++) {
460 mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
461 mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
462 } 444 }
463
464 return sge_count; 445 return sge_count;
465} 446}
466 447
@@ -481,35 +462,15 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
481 int sge_count; 462 int sge_count;
482 struct scatterlist *os_sgl; 463 struct scatterlist *os_sgl;
483 464
484 /* 465 sge_count = scsi_dma_map(scp);
485 * Return 0 if there is no data transfer 466 BUG_ON(sge_count < 0);
486 */
487 if (!scp->request_buffer || !scp->request_bufflen)
488 return 0;
489
490 if (!scp->use_sg) {
491 mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
492 scp->
493 request_buffer,
494 scp->
495 request_bufflen,
496 scp->
497 sc_data_direction);
498
499 mfi_sgl->sge64[0].length = scp->request_bufflen;
500
501 return 1;
502 }
503
504 os_sgl = (struct scatterlist *)scp->request_buffer;
505 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
506 scp->sc_data_direction);
507 467
508 for (i = 0; i < sge_count; i++, os_sgl++) { 468 if (sge_count) {
509 mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); 469 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
510 mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); 470 mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
471 mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
472 }
511 } 473 }
512
513 return sge_count; 474 return sge_count;
514} 475}
515 476
@@ -593,7 +554,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
593 pthru->cdb_len = scp->cmd_len; 554 pthru->cdb_len = scp->cmd_len;
594 pthru->timeout = 0; 555 pthru->timeout = 0;
595 pthru->flags = flags; 556 pthru->flags = flags;
596 pthru->data_xfer_len = scp->request_bufflen; 557 pthru->data_xfer_len = scsi_bufflen(scp);
597 558
598 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 559 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
599 560
@@ -1195,45 +1156,6 @@ megasas_complete_abort(struct megasas_instance *instance,
1195} 1156}
1196 1157
1197/** 1158/**
1198 * megasas_unmap_sgbuf - Unmap SG buffers
1199 * @instance: Adapter soft state
1200 * @cmd: Completed command
1201 */
1202static void
1203megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
1204{
1205 dma_addr_t buf_h;
1206 u8 opcode;
1207
1208 if (cmd->scmd->use_sg) {
1209 pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
1210 cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
1211 return;
1212 }
1213
1214 if (!cmd->scmd->request_bufflen)
1215 return;
1216
1217 opcode = cmd->frame->hdr.cmd;
1218
1219 if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
1220 if (IS_DMA64)
1221 buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
1222 else
1223 buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
1224 } else {
1225 if (IS_DMA64)
1226 buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
1227 else
1228 buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
1229 }
1230
1231 pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
1232 cmd->scmd->sc_data_direction);
1233 return;
1234}
1235
1236/**
1237 * megasas_complete_cmd - Completes a command 1159 * megasas_complete_cmd - Completes a command
1238 * @instance: Adapter soft state 1160 * @instance: Adapter soft state
1239 * @cmd: Command to be completed 1161 * @cmd: Command to be completed
@@ -1281,7 +1203,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1281 1203
1282 atomic_dec(&instance->fw_outstanding); 1204 atomic_dec(&instance->fw_outstanding);
1283 1205
1284 megasas_unmap_sgbuf(instance, cmd); 1206 scsi_dma_unmap(cmd->scmd);
1285 cmd->scmd->scsi_done(cmd->scmd); 1207 cmd->scmd->scsi_done(cmd->scmd);
1286 megasas_return_cmd(instance, cmd); 1208 megasas_return_cmd(instance, cmd);
1287 1209
@@ -1329,7 +1251,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
1329 1251
1330 atomic_dec(&instance->fw_outstanding); 1252 atomic_dec(&instance->fw_outstanding);
1331 1253
1332 megasas_unmap_sgbuf(instance, cmd); 1254 scsi_dma_unmap(cmd->scmd);
1333 cmd->scmd->scsi_done(cmd->scmd); 1255 cmd->scmd->scsi_done(cmd->scmd);
1334 megasas_return_cmd(instance, cmd); 1256 megasas_return_cmd(instance, cmd);
1335 1257
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index e64d1a19d8d7..651d09b08f2a 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -421,7 +421,7 @@ static void mesh_start_cmd(struct mesh_state *ms, struct scsi_cmnd *cmd)
421 for (i = 0; i < cmd->cmd_len; ++i) 421 for (i = 0; i < cmd->cmd_len; ++i)
422 printk(" %x", cmd->cmnd[i]); 422 printk(" %x", cmd->cmnd[i]);
423 printk(" use_sg=%d buffer=%p bufflen=%u\n", 423 printk(" use_sg=%d buffer=%p bufflen=%u\n",
424 cmd->use_sg, cmd->request_buffer, cmd->request_bufflen); 424 scsi_sg_count(cmd), scsi_sglist(cmd), scsi_bufflen(cmd));
425 } 425 }
426#endif 426#endif
427 if (ms->dma_started) 427 if (ms->dma_started)
@@ -602,13 +602,16 @@ static void mesh_done(struct mesh_state *ms, int start_next)
602 cmd->result += (cmd->SCp.Message << 8); 602 cmd->result += (cmd->SCp.Message << 8);
603 if (DEBUG_TARGET(cmd)) { 603 if (DEBUG_TARGET(cmd)) {
604 printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", 604 printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n",
605 cmd->result, ms->data_ptr, cmd->request_bufflen); 605 cmd->result, ms->data_ptr, scsi_bufflen(cmd));
606#if 0
607 /* needs to use sg? */
606 if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3) 608 if ((cmd->cmnd[0] == 0 || cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 3)
607 && cmd->request_buffer != 0) { 609 && cmd->request_buffer != 0) {
608 unsigned char *b = cmd->request_buffer; 610 unsigned char *b = cmd->request_buffer;
609 printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n", 611 printk(KERN_DEBUG "buffer = %x %x %x %x %x %x %x %x\n",
610 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); 612 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
611 } 613 }
614#endif
612 } 615 }
613 cmd->SCp.this_residual -= ms->data_ptr; 616 cmd->SCp.this_residual -= ms->data_ptr;
614 mesh_completed(ms, cmd); 617 mesh_completed(ms, cmd);
@@ -1265,15 +1268,18 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1265 dcmds = ms->dma_cmds; 1268 dcmds = ms->dma_cmds;
1266 dtot = 0; 1269 dtot = 0;
1267 if (cmd) { 1270 if (cmd) {
1268 cmd->SCp.this_residual = cmd->request_bufflen; 1271 int nseg;
1269 if (cmd->use_sg > 0) { 1272
1270 int nseg; 1273 cmd->SCp.this_residual = scsi_bufflen(cmd);
1274
1275 nseg = scsi_dma_map(cmd);
1276 BUG_ON(nseg < 0);
1277
1278 if (nseg) {
1271 total = 0; 1279 total = 0;
1272 scl = (struct scatterlist *) cmd->request_buffer;
1273 off = ms->data_ptr; 1280 off = ms->data_ptr;
1274 nseg = pci_map_sg(ms->pdev, scl, cmd->use_sg, 1281
1275 cmd->sc_data_direction); 1282 scsi_for_each_sg(cmd, scl, nseg, i) {
1276 for (i = 0; i <nseg; ++i, ++scl) {
1277 u32 dma_addr = sg_dma_address(scl); 1283 u32 dma_addr = sg_dma_address(scl);
1278 u32 dma_len = sg_dma_len(scl); 1284 u32 dma_len = sg_dma_len(scl);
1279 1285
@@ -1292,16 +1298,6 @@ static void set_dma_cmds(struct mesh_state *ms, struct scsi_cmnd *cmd)
1292 dtot += dma_len - off; 1298 dtot += dma_len - off;
1293 off = 0; 1299 off = 0;
1294 } 1300 }
1295 } else if (ms->data_ptr < cmd->request_bufflen) {
1296 dtot = cmd->request_bufflen - ms->data_ptr;
1297 if (dtot > 0xffff)
1298 panic("mesh: transfer size >= 64k");
1299 st_le16(&dcmds->req_count, dtot);
1300 /* XXX Use pci DMA API here ... */
1301 st_le32(&dcmds->phy_addr,
1302 virt_to_phys(cmd->request_buffer) + ms->data_ptr);
1303 dcmds->xfer_status = 0;
1304 ++dcmds;
1305 } 1301 }
1306 } 1302 }
1307 if (dtot == 0) { 1303 if (dtot == 0) {
@@ -1356,18 +1352,14 @@ static void halt_dma(struct mesh_state *ms)
1356 dumplog(ms, ms->conn_tgt); 1352 dumplog(ms, ms->conn_tgt);
1357 dumpslog(ms); 1353 dumpslog(ms);
1358#endif /* MESH_DBG */ 1354#endif /* MESH_DBG */
1359 } else if (cmd && cmd->request_bufflen != 0 && 1355 } else if (cmd && scsi_bufflen(cmd) &&
1360 ms->data_ptr > cmd->request_bufflen) { 1356 ms->data_ptr > scsi_bufflen(cmd)) {
1361 printk(KERN_DEBUG "mesh: target %d overrun, " 1357 printk(KERN_DEBUG "mesh: target %d overrun, "
1362 "data_ptr=%x total=%x goes_out=%d\n", 1358 "data_ptr=%x total=%x goes_out=%d\n",
1363 ms->conn_tgt, ms->data_ptr, cmd->request_bufflen, 1359 ms->conn_tgt, ms->data_ptr, scsi_bufflen(cmd),
1364 ms->tgts[ms->conn_tgt].data_goes_out); 1360 ms->tgts[ms->conn_tgt].data_goes_out);
1365 } 1361 }
1366 if (cmd->use_sg != 0) { 1362 scsi_dma_unmap(cmd);
1367 struct scatterlist *sg;
1368 sg = (struct scatterlist *)cmd->request_buffer;
1369 pci_unmap_sg(ms->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
1370 }
1371 ms->dma_started = 0; 1363 ms->dma_started = 0;
1372} 1364}
1373 1365
diff --git a/drivers/scsi/mvme16x.c b/drivers/scsi/mvme16x.c
deleted file mode 100644
index 575fe6f7e0ec..000000000000
--- a/drivers/scsi/mvme16x.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
3 *
4 * Based on work by Alan Hourihane
5 */
6#include <linux/types.h>
7#include <linux/mm.h>
8#include <linux/blkdev.h>
9
10#include <asm/page.h>
11#include <asm/pgtable.h>
12#include <asm/mvme16xhw.h>
13#include <asm/irq.h>
14
15#include "scsi.h"
16#include <scsi/scsi_host.h>
17#include "53c7xx.h"
18#include "mvme16x.h"
19
20#include<linux/stat.h>
21
22
23int mvme16x_scsi_detect(struct scsi_host_template *tpnt)
24{
25 static unsigned char called = 0;
26 int clock;
27 long long options;
28
29 if (!MACH_IS_MVME16x)
30 return 0;
31 if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
32 printk ("SCSI detection disabled, SCSI chip not present\n");
33 return 0;
34 }
35 if (called)
36 return 0;
37
38 tpnt->proc_name = "MVME16x";
39
40 options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
41
42 clock = 66000000; /* 66MHz SCSI Clock */
43
44 ncr53c7xx_init(tpnt, 0, 710, (unsigned long)0xfff47000,
45 0, MVME16x_IRQ_SCSI, DMA_NONE,
46 options, clock);
47 called = 1;
48 return 1;
49}
50
51static int mvme16x_scsi_release(struct Scsi_Host *shost)
52{
53 if (shost->irq)
54 free_irq(shost->irq, NULL);
55 if (shost->dma_channel != 0xff)
56 free_dma(shost->dma_channel);
57 if (shost->io_port && shost->n_io_port)
58 release_region(shost->io_port, shost->n_io_port);
59 scsi_unregister(shost);
60 return 0;
61}
62
63static struct scsi_host_template driver_template = {
64 .name = "MVME16x NCR53c710 SCSI",
65 .detect = mvme16x_scsi_detect,
66 .release = mvme16x_scsi_release,
67 .queuecommand = NCR53c7xx_queue_command,
68 .abort = NCR53c7xx_abort,
69 .reset = NCR53c7xx_reset,
70 .can_queue = 24,
71 .this_id = 7,
72 .sg_tablesize = 63,
73 .cmd_per_lun = 3,
74 .use_clustering = DISABLE_CLUSTERING
75};
76
77
78#include "scsi_module.c"
diff --git a/drivers/scsi/mvme16x.h b/drivers/scsi/mvme16x.h
deleted file mode 100644
index 73e33b37a3f8..000000000000
--- a/drivers/scsi/mvme16x.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef MVME16x_SCSI_H
2#define MVME16x_SCSI_H
3
4#include <linux/types.h>
5
6int mvme16x_scsi_detect(struct scsi_host_template *);
7const char *NCR53c7x0_info(void);
8int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
9int NCR53c7xx_abort(Scsi_Cmnd *);
10int NCR53c7x0_release (struct Scsi_Host *);
11int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
12void NCR53c7x0_intr(int irq, void *dev_id);
13
14#ifndef CMD_PER_LUN
15#define CMD_PER_LUN 3
16#endif
17
18#ifndef CAN_QUEUE
19#define CAN_QUEUE 24
20#endif
21
22#include <scsi/scsicam.h>
23
24#endif /* MVME16x_SCSI_H */
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
new file mode 100644
index 000000000000..d6ef22a941c4
--- /dev/null
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -0,0 +1,158 @@
1/*
2 * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
3 *
4 * Based on work by Alan Hourihane
5 *
6 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
7 */
8
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/device.h>
12#include <linux/platform_device.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <asm/mvme16xhw.h>
16#include <scsi/scsi_host.h>
17#include <scsi/scsi_device.h>
18#include <scsi/scsi_transport.h>
19#include <scsi/scsi_transport_spi.h>
20
21#include "53c700.h"
22
23MODULE_AUTHOR("Kars de Jong <jongk@linux-m68k.org>");
24MODULE_DESCRIPTION("MVME16x NCR53C710 driver");
25MODULE_LICENSE("GPL");
26
27static struct scsi_host_template mvme16x_scsi_driver_template = {
28 .name = "MVME16x NCR53c710 SCSI",
29 .proc_name = "MVME16x",
30 .this_id = 7,
31 .module = THIS_MODULE,
32};
33
34static struct platform_device *mvme16x_scsi_device;
35
36static __devinit int
37mvme16x_probe(struct device *dev)
38{
39 struct Scsi_Host * host = NULL;
40 struct NCR_700_Host_Parameters *hostdata;
41
42 if (!MACH_IS_MVME16x)
43 goto out;
44
45 if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
46 printk(KERN_INFO "mvme16x-scsi: detection disabled, "
47 "SCSI chip not present\n");
48 goto out;
49 }
50
51 hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
52 if (hostdata == NULL) {
53 printk(KERN_ERR "mvme16x-scsi: "
54 "Failed to allocate host data\n");
55 goto out;
56 }
57 memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
58
59 /* Fill in the required pieces of hostdata */
60 hostdata->base = (void __iomem *)0xfff47000UL;
61 hostdata->clock = 50; /* XXX - depends on the CPU clock! */
62 hostdata->chip710 = 1;
63 hostdata->dmode_extra = DMODE_FC2;
64 hostdata->dcntl_extra = EA_710;
65 hostdata->ctest7_extra = CTEST7_TT1;
66
67 /* and register the chip */
68 host = NCR_700_detect(&mvme16x_scsi_driver_template, hostdata, dev);
69 if (!host) {
70 printk(KERN_ERR "mvme16x-scsi: No host detected; "
71 "board configuration problem?\n");
72 goto out_free;
73 }
74 host->this_id = 7;
75 host->base = 0xfff47000UL;
76 host->irq = MVME16x_IRQ_SCSI;
77 if (request_irq(host->irq, NCR_700_intr, 0, "mvme16x-scsi", host)) {
78 printk(KERN_ERR "mvme16x-scsi: request_irq failed\n");
79 goto out_put_host;
80 }
81
82 /* Enable scsi chip ints */
83 {
84 volatile unsigned long v;
85
86 /* Enable scsi interrupts at level 4 in PCCchip2 */
87 v = in_be32(0xfff4202c);
88 v = (v & ~0xff) | 0x10 | 4;
89 out_be32(0xfff4202c, v);
90 }
91
92 scsi_scan_host(host);
93
94 return 0;
95
96 out_put_host:
97 scsi_host_put(host);
98 out_free:
99 kfree(hostdata);
100 out:
101 return -ENODEV;
102}
103
104static __devexit int
105mvme16x_device_remove(struct device *dev)
106{
107 struct Scsi_Host *host = dev_to_shost(dev);
108 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
109
110 /* Disable scsi chip ints */
111 {
112 volatile unsigned long v;
113
114 v = in_be32(0xfff4202c);
115 v &= ~0x10;
116 out_be32(0xfff4202c, v);
117 }
118 scsi_remove_host(host);
119 NCR_700_release(host);
120 kfree(hostdata);
121 free_irq(host->irq, host);
122
123 return 0;
124}
125
126static struct device_driver mvme16x_scsi_driver = {
127 .name = "mvme16x-scsi",
128 .bus = &platform_bus_type,
129 .probe = mvme16x_probe,
130 .remove = __devexit_p(mvme16x_device_remove),
131};
132
133static int __init mvme16x_scsi_init(void)
134{
135 int err;
136
137 err = driver_register(&mvme16x_scsi_driver);
138 if (err)
139 return err;
140
141 mvme16x_scsi_device = platform_device_register_simple("mvme16x-scsi",
142 -1, NULL, 0);
143 if (IS_ERR(mvme16x_scsi_device)) {
144 driver_unregister(&mvme16x_scsi_driver);
145 return PTR_ERR(mvme16x_scsi_device);
146 }
147
148 return 0;
149}
150
151static void __exit mvme16x_scsi_exit(void)
152{
153 platform_device_unregister(mvme16x_scsi_device);
154 driver_unregister(&mvme16x_scsi_driver);
155}
156
157module_init(mvme16x_scsi_init);
158module_exit(mvme16x_scsi_exit);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index bbf521cbc55d..030ba49f33ff 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -529,43 +529,20 @@ static void __unmap_scsi_data(struct device *dev, struct scsi_cmnd *cmd)
529{ 529{
530 switch(cmd->__data_mapped) { 530 switch(cmd->__data_mapped) {
531 case 2: 531 case 2:
532 dma_unmap_sg(dev, cmd->request_buffer, cmd->use_sg, 532 scsi_dma_unmap(cmd);
533 cmd->sc_data_direction);
534 break;
535 case 1:
536 dma_unmap_single(dev, cmd->__data_mapping,
537 cmd->request_bufflen,
538 cmd->sc_data_direction);
539 break; 533 break;
540 } 534 }
541 cmd->__data_mapped = 0; 535 cmd->__data_mapped = 0;
542} 536}
543 537
544static u_long __map_scsi_single_data(struct device *dev, struct scsi_cmnd *cmd)
545{
546 dma_addr_t mapping;
547
548 if (cmd->request_bufflen == 0)
549 return 0;
550
551 mapping = dma_map_single(dev, cmd->request_buffer,
552 cmd->request_bufflen,
553 cmd->sc_data_direction);
554 cmd->__data_mapped = 1;
555 cmd->__data_mapping = mapping;
556
557 return mapping;
558}
559
560static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd) 538static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
561{ 539{
562 int use_sg; 540 int use_sg;
563 541
564 if (cmd->use_sg == 0) 542 use_sg = scsi_dma_map(cmd);
543 if (!use_sg)
565 return 0; 544 return 0;
566 545
567 use_sg = dma_map_sg(dev, cmd->request_buffer, cmd->use_sg,
568 cmd->sc_data_direction);
569 cmd->__data_mapped = 2; 546 cmd->__data_mapped = 2;
570 cmd->__data_mapping = use_sg; 547 cmd->__data_mapping = use_sg;
571 548
@@ -573,7 +550,6 @@ static int __map_scsi_sg_data(struct device *dev, struct scsi_cmnd *cmd)
573} 550}
574 551
575#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd) 552#define unmap_scsi_data(np, cmd) __unmap_scsi_data(np->dev, cmd)
576#define map_scsi_single_data(np, cmd) __map_scsi_single_data(np->dev, cmd)
577#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd) 553#define map_scsi_sg_data(np, cmd) __map_scsi_sg_data(np->dev, cmd)
578 554
579/*========================================================== 555/*==========================================================
@@ -7667,39 +7643,16 @@ fail:
7667** sizes to the data segment array. 7643** sizes to the data segment array.
7668*/ 7644*/
7669 7645
7670static int ncr_scatter_no_sglist(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
7671{
7672 struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER - 1];
7673 int segment;
7674
7675 cp->data_len = cmd->request_bufflen;
7676
7677 if (cmd->request_bufflen) {
7678 dma_addr_t baddr = map_scsi_single_data(np, cmd);
7679 if (baddr) {
7680 ncr_build_sge(np, data, baddr, cmd->request_bufflen);
7681 segment = 1;
7682 } else {
7683 segment = -2;
7684 }
7685 } else {
7686 segment = 0;
7687 }
7688
7689 return segment;
7690}
7691
7692static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd) 7646static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
7693{ 7647{
7694 int segment = 0; 7648 int segment = 0;
7695 int use_sg = (int) cmd->use_sg; 7649 int use_sg = scsi_sg_count(cmd);
7696 7650
7697 cp->data_len = 0; 7651 cp->data_len = 0;
7698 7652
7699 if (!use_sg) 7653 use_sg = map_scsi_sg_data(np, cmd);
7700 segment = ncr_scatter_no_sglist(np, cp, cmd); 7654 if (use_sg > 0) {
7701 else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) { 7655 struct scatterlist *sg;
7702 struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer;
7703 struct scr_tblmove *data; 7656 struct scr_tblmove *data;
7704 7657
7705 if (use_sg > MAX_SCATTER) { 7658 if (use_sg > MAX_SCATTER) {
@@ -7709,16 +7662,15 @@ static int ncr_scatter(struct ncb *np, struct ccb *cp, struct scsi_cmnd *cmd)
7709 7662
7710 data = &cp->phys.data[MAX_SCATTER - use_sg]; 7663 data = &cp->phys.data[MAX_SCATTER - use_sg];
7711 7664
7712 for (segment = 0; segment < use_sg; segment++) { 7665 scsi_for_each_sg(cmd, sg, use_sg, segment) {
7713 dma_addr_t baddr = sg_dma_address(&scatter[segment]); 7666 dma_addr_t baddr = sg_dma_address(sg);
7714 unsigned int len = sg_dma_len(&scatter[segment]); 7667 unsigned int len = sg_dma_len(sg);
7715 7668
7716 ncr_build_sge(np, &data[segment], baddr, len); 7669 ncr_build_sge(np, &data[segment], baddr, len);
7717 cp->data_len += len; 7670 cp->data_len += len;
7718 } 7671 }
7719 } else { 7672 } else
7720 segment = -2; 7673 segment = -2;
7721 }
7722 7674
7723 return segment; 7675 return segment;
7724} 7676}
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 3e9765f0281d..7fed35372150 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -49,10 +49,6 @@
49#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
50#include <scsi/scsi_ioctl.h> 50#include <scsi/scsi_ioctl.h>
51 51
52#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
53# include <linux/blk.h>
54#endif
55
56#include "nsp32.h" 52#include "nsp32.h"
57 53
58 54
@@ -199,17 +195,9 @@ static int __init init_nsp32 (void);
199static void __exit exit_nsp32 (void); 195static void __exit exit_nsp32 (void);
200 196
201/* struct struct scsi_host_template */ 197/* struct struct scsi_host_template */
202#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
203static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); 198static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
204#else
205static int nsp32_proc_info (char *, char **, off_t, int, int, int);
206#endif
207 199
208#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
209static int nsp32_detect (struct pci_dev *pdev); 200static int nsp32_detect (struct pci_dev *pdev);
210#else
211static int nsp32_detect (struct scsi_host_template *);
212#endif
213static int nsp32_queuecommand(struct scsi_cmnd *, 201static int nsp32_queuecommand(struct scsi_cmnd *,
214 void (*done)(struct scsi_cmnd *)); 202 void (*done)(struct scsi_cmnd *));
215static const char *nsp32_info (struct Scsi_Host *); 203static const char *nsp32_info (struct Scsi_Host *);
@@ -296,15 +284,7 @@ static struct scsi_host_template nsp32_template = {
296 .eh_abort_handler = nsp32_eh_abort, 284 .eh_abort_handler = nsp32_eh_abort,
297 .eh_bus_reset_handler = nsp32_eh_bus_reset, 285 .eh_bus_reset_handler = nsp32_eh_bus_reset,
298 .eh_host_reset_handler = nsp32_eh_host_reset, 286 .eh_host_reset_handler = nsp32_eh_host_reset,
299#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,74))
300 .detect = nsp32_detect,
301 .release = nsp32_release,
302#endif
303#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,2))
304 .use_new_eh_code = 1,
305#else
306/* .highmem_io = 1, */ 287/* .highmem_io = 1, */
307#endif
308}; 288};
309 289
310#include "nsp32_io.h" 290#include "nsp32_io.h"
@@ -739,7 +719,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
739 command = 0; 719 command = 0;
740 command |= (TRANSFER_GO | ALL_COUNTER_CLR); 720 command |= (TRANSFER_GO | ALL_COUNTER_CLR);
741 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { 721 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
742 if (SCpnt->request_bufflen > 0) { 722 if (scsi_bufflen(SCpnt) > 0) {
743 command |= BM_START; 723 command |= BM_START;
744 } 724 }
745 } else if (data->trans_method & NSP32_TRANSFER_MMIO) { 725 } else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -888,31 +868,28 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
888static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) 868static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
889{ 869{
890 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; 870 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
891 struct scatterlist *sgl; 871 struct scatterlist *sg;
892 nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; 872 nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
893 int num, i; 873 int num, i;
894 u32_le l; 874 u32_le l;
895 875
896 if (SCpnt->request_bufflen == 0) {
897 return TRUE;
898 }
899
900 if (sgt == NULL) { 876 if (sgt == NULL) {
901 nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null"); 877 nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
902 return FALSE; 878 return FALSE;
903 } 879 }
904 880
905 if (SCpnt->use_sg) { 881 num = scsi_dma_map(SCpnt);
906 sgl = (struct scatterlist *)SCpnt->request_buffer; 882 if (!num)
907 num = pci_map_sg(data->Pci, sgl, SCpnt->use_sg, 883 return TRUE;
908 SCpnt->sc_data_direction); 884 else if (num < 0)
909 for (i = 0; i < num; i++) { 885 return FALSE;
886 else {
887 scsi_for_each_sg(SCpnt, sg, num, i) {
910 /* 888 /*
911 * Build nsp32_sglist, substitute sg dma addresses. 889 * Build nsp32_sglist, substitute sg dma addresses.
912 */ 890 */
913 sgt[i].addr = cpu_to_le32(sg_dma_address(sgl)); 891 sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
914 sgt[i].len = cpu_to_le32(sg_dma_len(sgl)); 892 sgt[i].len = cpu_to_le32(sg_dma_len(sg));
915 sgl++;
916 893
917 if (le32_to_cpu(sgt[i].len) > 0x10000) { 894 if (le32_to_cpu(sgt[i].len) > 0x10000) {
918 nsp32_msg(KERN_ERR, 895 nsp32_msg(KERN_ERR,
@@ -929,23 +906,6 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
929 /* set end mark */ 906 /* set end mark */
930 l = le32_to_cpu(sgt[num-1].len); 907 l = le32_to_cpu(sgt[num-1].len);
931 sgt[num-1].len = cpu_to_le32(l | SGTEND); 908 sgt[num-1].len = cpu_to_le32(l | SGTEND);
932
933 } else {
934 SCpnt->SCp.have_data_in = pci_map_single(data->Pci,
935 SCpnt->request_buffer, SCpnt->request_bufflen,
936 SCpnt->sc_data_direction);
937
938 sgt[0].addr = cpu_to_le32(SCpnt->SCp.have_data_in);
939 sgt[0].len = cpu_to_le32(SCpnt->request_bufflen | SGTEND); /* set end mark */
940
941 if (SCpnt->request_bufflen > 0x10000) {
942 nsp32_msg(KERN_ERR,
943 "can't transfer over 64KB at a time, size=0x%lx", SCpnt->request_bufflen);
944 return FALSE;
945 }
946 nsp32_dbg(NSP32_DEBUG_SGLIST, "single : addr 0x%lx len=0x%lx",
947 le32_to_cpu(sgt[0].addr),
948 le32_to_cpu(sgt[0].len ));
949 } 909 }
950 910
951 return TRUE; 911 return TRUE;
@@ -962,7 +922,7 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
962 "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x " 922 "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
963 "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", 923 "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
964 SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, 924 SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
965 SCpnt->use_sg, SCpnt->request_buffer, SCpnt->request_bufflen); 925 scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
966 926
967 if (data->CurrentSC != NULL) { 927 if (data->CurrentSC != NULL) {
968 nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request"); 928 nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -994,10 +954,10 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
994 data->CurrentSC = SCpnt; 954 data->CurrentSC = SCpnt;
995 SCpnt->SCp.Status = CHECK_CONDITION; 955 SCpnt->SCp.Status = CHECK_CONDITION;
996 SCpnt->SCp.Message = 0; 956 SCpnt->SCp.Message = 0;
997 SCpnt->resid = SCpnt->request_bufflen; 957 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
998 958
999 SCpnt->SCp.ptr = (char *) SCpnt->request_buffer; 959 SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
1000 SCpnt->SCp.this_residual = SCpnt->request_bufflen; 960 SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
1001 SCpnt->SCp.buffer = NULL; 961 SCpnt->SCp.buffer = NULL;
1002 SCpnt->SCp.buffers_residual = 0; 962 SCpnt->SCp.buffers_residual = 0;
1003 963
@@ -1210,13 +1170,9 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1210 unsigned long flags; 1170 unsigned long flags;
1211 int ret; 1171 int ret;
1212 int handled = 0; 1172 int handled = 0;
1213
1214#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1215 struct Scsi_Host *host = data->Host; 1173 struct Scsi_Host *host = data->Host;
1174
1216 spin_lock_irqsave(host->host_lock, flags); 1175 spin_lock_irqsave(host->host_lock, flags);
1217#else
1218 spin_lock_irqsave(&io_request_lock, flags);
1219#endif
1220 1176
1221 /* 1177 /*
1222 * IRQ check, then enable IRQ mask 1178 * IRQ check, then enable IRQ mask
@@ -1312,7 +1268,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1312 } 1268 }
1313 1269
1314 if ((auto_stat & DATA_IN_PHASE) && 1270 if ((auto_stat & DATA_IN_PHASE) &&
1315 (SCpnt->resid > 0) && 1271 (scsi_get_resid(SCpnt) > 0) &&
1316 ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) { 1272 ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
1317 printk( "auto+fifo\n"); 1273 printk( "auto+fifo\n");
1318 //nsp32_pio_read(SCpnt); 1274 //nsp32_pio_read(SCpnt);
@@ -1333,7 +1289,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1333 nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx", 1289 nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
1334 nsp32_read4(base, SAVED_SACK_CNT)); 1290 nsp32_read4(base, SAVED_SACK_CNT));
1335 1291
1336 SCpnt->resid = 0; /* all data transfered! */ 1292 scsi_set_resid(SCpnt, 0); /* all data transfered! */
1337 } 1293 }
1338 1294
1339 /* 1295 /*
@@ -1480,11 +1436,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1480 nsp32_write2(base, IRQ_CONTROL, 0); 1436 nsp32_write2(base, IRQ_CONTROL, 0);
1481 1437
1482 out2: 1438 out2:
1483#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1484 spin_unlock_irqrestore(host->host_lock, flags); 1439 spin_unlock_irqrestore(host->host_lock, flags);
1485#else
1486 spin_unlock_irqrestore(&io_request_lock, flags);
1487#endif
1488 1440
1489 nsp32_dbg(NSP32_DEBUG_INTR, "exit"); 1441 nsp32_dbg(NSP32_DEBUG_INTR, "exit");
1490 1442
@@ -1499,28 +1451,15 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1499 nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ 1451 nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
1500 } \ 1452 } \
1501 } while(0) 1453 } while(0)
1502static int nsp32_proc_info( 1454
1503#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73)) 1455static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
1504 struct Scsi_Host *host, 1456 off_t offset, int length, int inout)
1505#endif
1506 char *buffer,
1507 char **start,
1508 off_t offset,
1509 int length,
1510#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
1511 int hostno,
1512#endif
1513 int inout)
1514{ 1457{
1515 char *pos = buffer; 1458 char *pos = buffer;
1516 int thislength; 1459 int thislength;
1517 unsigned long flags; 1460 unsigned long flags;
1518 nsp32_hw_data *data; 1461 nsp32_hw_data *data;
1519#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
1520 int hostno; 1462 int hostno;
1521#else
1522 struct Scsi_Host *host;
1523#endif
1524 unsigned int base; 1463 unsigned int base;
1525 unsigned char mode_reg; 1464 unsigned char mode_reg;
1526 int id, speed; 1465 int id, speed;
@@ -1531,15 +1470,7 @@ static int nsp32_proc_info(
1531 return -EINVAL; 1470 return -EINVAL;
1532 } 1471 }
1533 1472
1534#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
1535 hostno = host->host_no; 1473 hostno = host->host_no;
1536#else
1537 /* search this HBA host */
1538 host = scsi_host_hn_get(hostno);
1539 if (host == NULL) {
1540 return -ESRCH;
1541 }
1542#endif
1543 data = (nsp32_hw_data *)host->hostdata; 1474 data = (nsp32_hw_data *)host->hostdata;
1544 base = host->io_port; 1475 base = host->io_port;
1545 1476
@@ -1626,25 +1557,8 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
1626 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; 1557 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
1627 unsigned int base = SCpnt->device->host->io_port; 1558 unsigned int base = SCpnt->device->host->io_port;
1628 1559
1629 /* 1560 scsi_dma_unmap(SCpnt);
1630 * unmap pci
1631 */
1632 if (SCpnt->request_bufflen == 0) {
1633 goto skip;
1634 }
1635 1561
1636 if (SCpnt->use_sg) {
1637 pci_unmap_sg(data->Pci,
1638 (struct scatterlist *)SCpnt->request_buffer,
1639 SCpnt->use_sg, SCpnt->sc_data_direction);
1640 } else {
1641 pci_unmap_single(data->Pci,
1642 (u32)SCpnt->SCp.have_data_in,
1643 SCpnt->request_bufflen,
1644 SCpnt->sc_data_direction);
1645 }
1646
1647 skip:
1648 /* 1562 /*
1649 * clear TRANSFERCONTROL_BM_START 1563 * clear TRANSFERCONTROL_BM_START
1650 */ 1564 */
@@ -1800,7 +1714,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
1800 SCpnt->SCp.Message = 0; 1714 SCpnt->SCp.Message = 0;
1801 nsp32_dbg(NSP32_DEBUG_BUSFREE, 1715 nsp32_dbg(NSP32_DEBUG_BUSFREE,
1802 "normal end stat=0x%x resid=0x%x\n", 1716 "normal end stat=0x%x resid=0x%x\n",
1803 SCpnt->SCp.Status, SCpnt->resid); 1717 SCpnt->SCp.Status, scsi_get_resid(SCpnt));
1804 SCpnt->result = (DID_OK << 16) | 1718 SCpnt->result = (DID_OK << 16) |
1805 (SCpnt->SCp.Message << 8) | 1719 (SCpnt->SCp.Message << 8) |
1806 (SCpnt->SCp.Status << 0); 1720 (SCpnt->SCp.Status << 0);
@@ -1844,7 +1758,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
1844 unsigned int restlen, sentlen; 1758 unsigned int restlen, sentlen;
1845 u32_le len, addr; 1759 u32_le len, addr;
1846 1760
1847 nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", SCpnt->resid); 1761 nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
1848 1762
1849 /* adjust saved SACK count with 4 byte start address boundary */ 1763 /* adjust saved SACK count with 4 byte start address boundary */
1850 s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3; 1764 s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
@@ -1888,12 +1802,12 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
1888 return; 1802 return;
1889 1803
1890 last: 1804 last:
1891 if (SCpnt->resid < sentlen) { 1805 if (scsi_get_resid(SCpnt) < sentlen) {
1892 nsp32_msg(KERN_ERR, "resid underflow"); 1806 nsp32_msg(KERN_ERR, "resid underflow");
1893 } 1807 }
1894 1808
1895 SCpnt->resid -= sentlen; 1809 scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
1896 nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", SCpnt->resid); 1810 nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
1897 1811
1898 /* update hostdata and lun */ 1812 /* update hostdata and lun */
1899 1813
@@ -2022,7 +1936,7 @@ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short comma
2022 transfer = 0; 1936 transfer = 0;
2023 transfer |= (TRANSFER_GO | ALL_COUNTER_CLR); 1937 transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
2024 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { 1938 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
2025 if (SCpnt->request_bufflen > 0) { 1939 if (scsi_bufflen(SCpnt) > 0) {
2026 transfer |= BM_START; 1940 transfer |= BM_START;
2027 } 1941 }
2028 } else if (data->trans_method & NSP32_TRANSFER_MMIO) { 1942 } else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -2674,17 +2588,7 @@ static void nsp32_sack_negate(nsp32_hw_data *data)
2674 * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly) 2588 * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
2675 * 0xc00-0xfff: CardBus status registers 2589 * 0xc00-0xfff: CardBus status registers
2676 */ 2590 */
2677#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
2678#define DETECT_OK 0
2679#define DETECT_NG 1
2680#define PCIDEV pdev
2681static int nsp32_detect(struct pci_dev *pdev) 2591static int nsp32_detect(struct pci_dev *pdev)
2682#else
2683#define DETECT_OK 1
2684#define DETECT_NG 0
2685#define PCIDEV (data->Pci)
2686static int nsp32_detect(struct scsi_host_template *sht)
2687#endif
2688{ 2592{
2689 struct Scsi_Host *host; /* registered host structure */ 2593 struct Scsi_Host *host; /* registered host structure */
2690 struct resource *res; 2594 struct resource *res;
@@ -2697,11 +2601,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2697 /* 2601 /*
2698 * register this HBA as SCSI device 2602 * register this HBA as SCSI device
2699 */ 2603 */
2700#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
2701 host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data)); 2604 host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
2702#else
2703 host = scsi_register(sht, sizeof(nsp32_hw_data));
2704#endif
2705 if (host == NULL) { 2605 if (host == NULL) {
2706 nsp32_msg (KERN_ERR, "failed to scsi register"); 2606 nsp32_msg (KERN_ERR, "failed to scsi register");
2707 goto err; 2607 goto err;
@@ -2719,9 +2619,6 @@ static int nsp32_detect(struct scsi_host_template *sht)
2719 host->unique_id = data->BaseAddress; 2619 host->unique_id = data->BaseAddress;
2720 host->n_io_port = data->NumAddress; 2620 host->n_io_port = data->NumAddress;
2721 host->base = (unsigned long)data->MmioAddress; 2621 host->base = (unsigned long)data->MmioAddress;
2722#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,63))
2723 scsi_set_pci_device(host, PCIDEV);
2724#endif
2725 2622
2726 data->Host = host; 2623 data->Host = host;
2727 spin_lock_init(&(data->Lock)); 2624 spin_lock_init(&(data->Lock));
@@ -2776,7 +2673,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2776 /* 2673 /*
2777 * setup DMA 2674 * setup DMA
2778 */ 2675 */
2779 if (pci_set_dma_mask(PCIDEV, DMA_32BIT_MASK) != 0) { 2676 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
2780 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); 2677 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
2781 goto scsi_unregister; 2678 goto scsi_unregister;
2782 } 2679 }
@@ -2784,7 +2681,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2784 /* 2681 /*
2785 * allocate autoparam DMA resource. 2682 * allocate autoparam DMA resource.
2786 */ 2683 */
2787 data->autoparam = pci_alloc_consistent(PCIDEV, sizeof(nsp32_autoparam), &(data->auto_paddr)); 2684 data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
2788 if (data->autoparam == NULL) { 2685 if (data->autoparam == NULL) {
2789 nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); 2686 nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
2790 goto scsi_unregister; 2687 goto scsi_unregister;
@@ -2793,7 +2690,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2793 /* 2690 /*
2794 * allocate scatter-gather DMA resource. 2691 * allocate scatter-gather DMA resource.
2795 */ 2692 */
2796 data->sg_list = pci_alloc_consistent(PCIDEV, NSP32_SG_TABLE_SIZE, 2693 data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
2797 &(data->sg_paddr)); 2694 &(data->sg_paddr));
2798 if (data->sg_list == NULL) { 2695 if (data->sg_list == NULL) {
2799 nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); 2696 nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
@@ -2883,16 +2780,14 @@ static int nsp32_detect(struct scsi_host_template *sht)
2883 goto free_irq; 2780 goto free_irq;
2884 } 2781 }
2885 2782
2886#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73)) 2783 ret = scsi_add_host(host, &pdev->dev);
2887 ret = scsi_add_host(host, &PCIDEV->dev);
2888 if (ret) { 2784 if (ret) {
2889 nsp32_msg(KERN_ERR, "failed to add scsi host"); 2785 nsp32_msg(KERN_ERR, "failed to add scsi host");
2890 goto free_region; 2786 goto free_region;
2891 } 2787 }
2892 scsi_scan_host(host); 2788 scsi_scan_host(host);
2893#endif 2789 pci_set_drvdata(pdev, host);
2894 pci_set_drvdata(PCIDEV, host); 2790 return 0;
2895 return DETECT_OK;
2896 2791
2897 free_region: 2792 free_region:
2898 release_region(host->io_port, host->n_io_port); 2793 release_region(host->io_port, host->n_io_port);
@@ -2901,22 +2796,19 @@ static int nsp32_detect(struct scsi_host_template *sht)
2901 free_irq(host->irq, data); 2796 free_irq(host->irq, data);
2902 2797
2903 free_sg_list: 2798 free_sg_list:
2904 pci_free_consistent(PCIDEV, NSP32_SG_TABLE_SIZE, 2799 pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
2905 data->sg_list, data->sg_paddr); 2800 data->sg_list, data->sg_paddr);
2906 2801
2907 free_autoparam: 2802 free_autoparam:
2908 pci_free_consistent(PCIDEV, sizeof(nsp32_autoparam), 2803 pci_free_consistent(pdev, sizeof(nsp32_autoparam),
2909 data->autoparam, data->auto_paddr); 2804 data->autoparam, data->auto_paddr);
2910 2805
2911 scsi_unregister: 2806 scsi_unregister:
2912 scsi_host_put(host); 2807 scsi_host_put(host);
2913 2808
2914 err: 2809 err:
2915 return DETECT_NG; 2810 return 1;
2916} 2811}
2917#undef DETECT_OK
2918#undef DETECT_NG
2919#undef PCIDEV
2920 2812
2921static int nsp32_release(struct Scsi_Host *host) 2813static int nsp32_release(struct Scsi_Host *host)
2922{ 2814{
@@ -3516,11 +3408,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
3516 3408
3517 pci_set_master(pdev); 3409 pci_set_master(pdev);
3518 3410
3519#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
3520 ret = nsp32_detect(pdev); 3411 ret = nsp32_detect(pdev);
3521#else
3522 ret = scsi_register_host(&nsp32_template);
3523#endif
3524 3412
3525 nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s", 3413 nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
3526 pdev->irq, 3414 pdev->irq,
@@ -3535,25 +3423,17 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
3535 3423
3536static void __devexit nsp32_remove(struct pci_dev *pdev) 3424static void __devexit nsp32_remove(struct pci_dev *pdev)
3537{ 3425{
3538#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
3539 struct Scsi_Host *host = pci_get_drvdata(pdev); 3426 struct Scsi_Host *host = pci_get_drvdata(pdev);
3540#endif
3541 3427
3542 nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); 3428 nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
3543 3429
3544#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
3545 scsi_remove_host(host); 3430 scsi_remove_host(host);
3546 3431
3547 nsp32_release(host); 3432 nsp32_release(host);
3548 3433
3549 scsi_host_put(host); 3434 scsi_host_put(host);
3550#else
3551 scsi_unregister_host(&nsp32_template);
3552#endif
3553} 3435}
3554 3436
3555
3556
3557static struct pci_driver nsp32_driver = { 3437static struct pci_driver nsp32_driver = {
3558 .name = "nsp32", 3438 .name = "nsp32",
3559 .id_table = nsp32_pci_table, 3439 .id_table = nsp32_pci_table,
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index ffe75c431b25..2695b7187b2f 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -370,8 +370,6 @@ SYM53C500_intr(int irq, void *dev_id)
370 DEB(unsigned char seq_reg;) 370 DEB(unsigned char seq_reg;)
371 unsigned char status, int_reg; 371 unsigned char status, int_reg;
372 unsigned char pio_status; 372 unsigned char pio_status;
373 struct scatterlist *sglist;
374 unsigned int sgcount;
375 int port_base = dev->io_port; 373 int port_base = dev->io_port;
376 struct sym53c500_data *data = 374 struct sym53c500_data *data =
377 (struct sym53c500_data *)dev->hostdata; 375 (struct sym53c500_data *)dev->hostdata;
@@ -434,20 +432,19 @@ SYM53C500_intr(int irq, void *dev_id)
434 switch (status & 0x07) { /* scsi phase */ 432 switch (status & 0x07) { /* scsi phase */
435 case 0x00: /* DATA-OUT */ 433 case 0x00: /* DATA-OUT */
436 if (int_reg & 0x10) { /* Target requesting info transfer */ 434 if (int_reg & 0x10) { /* Target requesting info transfer */
435 struct scatterlist *sg;
436 int i;
437
437 curSC->SCp.phase = data_out; 438 curSC->SCp.phase = data_out;
438 VDEB(printk("SYM53C500: Data-Out phase\n")); 439 VDEB(printk("SYM53C500: Data-Out phase\n"));
439 outb(FLUSH_FIFO, port_base + CMD_REG); 440 outb(FLUSH_FIFO, port_base + CMD_REG);
440 LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ 441 LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
441 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); 442 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
442 if (!curSC->use_sg) /* Don't use scatter-gather */ 443
443 SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); 444 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
444 else { /* use scatter-gather */ 445 SYM53C500_pio_write(fast_pio, port_base,
445 sgcount = curSC->use_sg; 446 page_address(sg->page) + sg->offset,
446 sglist = curSC->request_buffer; 447 sg->length);
447 while (sgcount--) {
448 SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
449 sglist++;
450 }
451 } 448 }
452 REG0(port_base); 449 REG0(port_base);
453 } 450 }
@@ -455,20 +452,19 @@ SYM53C500_intr(int irq, void *dev_id)
455 452
456 case 0x01: /* DATA-IN */ 453 case 0x01: /* DATA-IN */
457 if (int_reg & 0x10) { /* Target requesting info transfer */ 454 if (int_reg & 0x10) { /* Target requesting info transfer */
455 struct scatterlist *sg;
456 int i;
457
458 curSC->SCp.phase = data_in; 458 curSC->SCp.phase = data_in;
459 VDEB(printk("SYM53C500: Data-In phase\n")); 459 VDEB(printk("SYM53C500: Data-In phase\n"));
460 outb(FLUSH_FIFO, port_base + CMD_REG); 460 outb(FLUSH_FIFO, port_base + CMD_REG);
461 LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ 461 LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
462 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); 462 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
463 if (!curSC->use_sg) /* Don't use scatter-gather */ 463
464 SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); 464 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
465 else { /* Use scatter-gather */ 465 SYM53C500_pio_read(fast_pio, port_base,
466 sgcount = curSC->use_sg; 466 page_address(sg->page) + sg->offset,
467 sglist = curSC->request_buffer; 467 sg->length);
468 while (sgcount--) {
469 SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
470 sglist++;
471 }
472 } 468 }
473 REG0(port_base); 469 REG0(port_base);
474 } 470 }
@@ -578,7 +574,7 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
578 574
579 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", 575 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
580 SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, 576 SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id,
581 SCpnt->device->lun, SCpnt->request_bufflen)); 577 SCpnt->device->lun, scsi_bufflen(SCpnt)));
582 578
583 VDEB(for (i = 0; i < SCpnt->cmd_len; i++) 579 VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
584 printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); 580 printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 584ba4d6e038..2f1fa1eb7e90 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -129,11 +129,11 @@ static inline int ppa_proc_write(ppa_struct *dev, char *buffer, int length)
129 if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) { 129 if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
130 x = simple_strtoul(buffer + 10, NULL, 0); 130 x = simple_strtoul(buffer + 10, NULL, 0);
131 dev->recon_tmo = x; 131 dev->recon_tmo = x;
132 printk("ppa: recon_tmo set to %ld\n", x); 132 printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x);
133 return length; 133 return length;
134 } 134 }
135 printk("ppa /proc: invalid variable\n"); 135 printk(KERN_WARNING "ppa /proc: invalid variable\n");
136 return (-EINVAL); 136 return -EINVAL;
137} 137}
138 138
139static int ppa_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) 139static int ppa_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout)
@@ -216,7 +216,7 @@ static unsigned char ppa_wait(ppa_struct *dev)
216 216
217 /* Counter expired - Time out occurred */ 217 /* Counter expired - Time out occurred */
218 ppa_fail(dev, DID_TIME_OUT); 218 ppa_fail(dev, DID_TIME_OUT);
219 printk("ppa timeout in ppa_wait\n"); 219 printk(KERN_WARNING "ppa timeout in ppa_wait\n");
220 return 0; /* command timed out */ 220 return 0; /* command timed out */
221} 221}
222 222
@@ -248,7 +248,7 @@ static inline void ecp_sync(ppa_struct *dev)
248 return; 248 return;
249 udelay(5); 249 udelay(5);
250 } 250 }
251 printk("ppa: ECP sync failed as data still present in FIFO.\n"); 251 printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n");
252 } 252 }
253} 253}
254 254
@@ -328,7 +328,7 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
328 break; 328 break;
329 329
330 default: 330 default:
331 printk("PPA: bug in ppa_out()\n"); 331 printk(KERN_ERR "PPA: bug in ppa_out()\n");
332 r = 0; 332 r = 0;
333 } 333 }
334 return r; 334 return r;
@@ -381,7 +381,7 @@ static int ppa_in(ppa_struct *dev, char *buffer, int len)
381 break; 381 break;
382 382
383 default: 383 default:
384 printk("PPA: bug in ppa_ins()\n"); 384 printk(KERN_ERR "PPA: bug in ppa_ins()\n");
385 r = 0; 385 r = 0;
386 break; 386 break;
387 } 387 }
@@ -633,7 +633,7 @@ static void ppa_interrupt(struct work_struct *work)
633 struct scsi_cmnd *cmd = dev->cur_cmd; 633 struct scsi_cmnd *cmd = dev->cur_cmd;
634 634
635 if (!cmd) { 635 if (!cmd) {
636 printk("PPA: bug in ppa_interrupt\n"); 636 printk(KERN_ERR "PPA: bug in ppa_interrupt\n");
637 return; 637 return;
638 } 638 }
639 if (ppa_engine(dev, cmd)) { 639 if (ppa_engine(dev, cmd)) {
@@ -646,31 +646,31 @@ static void ppa_interrupt(struct work_struct *work)
646 case DID_OK: 646 case DID_OK:
647 break; 647 break;
648 case DID_NO_CONNECT: 648 case DID_NO_CONNECT:
649 printk("ppa: no device at SCSI ID %i\n", cmd->device->target); 649 printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
650 break; 650 break;
651 case DID_BUS_BUSY: 651 case DID_BUS_BUSY:
652 printk("ppa: BUS BUSY - EPP timeout detected\n"); 652 printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
653 break; 653 break;
654 case DID_TIME_OUT: 654 case DID_TIME_OUT:
655 printk("ppa: unknown timeout\n"); 655 printk(KERN_DEBUG "ppa: unknown timeout\n");
656 break; 656 break;
657 case DID_ABORT: 657 case DID_ABORT:
658 printk("ppa: told to abort\n"); 658 printk(KERN_DEBUG "ppa: told to abort\n");
659 break; 659 break;
660 case DID_PARITY: 660 case DID_PARITY:
661 printk("ppa: parity error (???)\n"); 661 printk(KERN_DEBUG "ppa: parity error (???)\n");
662 break; 662 break;
663 case DID_ERROR: 663 case DID_ERROR:
664 printk("ppa: internal driver error\n"); 664 printk(KERN_DEBUG "ppa: internal driver error\n");
665 break; 665 break;
666 case DID_RESET: 666 case DID_RESET:
667 printk("ppa: told to reset device\n"); 667 printk(KERN_DEBUG "ppa: told to reset device\n");
668 break; 668 break;
669 case DID_BAD_INTR: 669 case DID_BAD_INTR:
670 printk("ppa: bad interrupt (???)\n"); 670 printk(KERN_WARNING "ppa: bad interrupt (???)\n");
671 break; 671 break;
672 default: 672 default:
673 printk("ppa: bad return code (%02x)\n", 673 printk(KERN_WARNING "ppa: bad return code (%02x)\n",
674 (cmd->result >> 16) & 0xff); 674 (cmd->result >> 16) & 0xff);
675 } 675 }
676#endif 676#endif
@@ -724,8 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
724 724
725 if (retv) { 725 if (retv) {
726 if (time_after(jiffies, dev->jstart + (1 * HZ))) { 726 if (time_after(jiffies, dev->jstart + (1 * HZ))) {
727 printk 727 printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n");
728 ("ppa: Parallel port cable is unplugged!!\n");
729 ppa_fail(dev, DID_BUS_BUSY); 728 ppa_fail(dev, DID_BUS_BUSY);
730 return 0; 729 return 0;
731 } else { 730 } else {
@@ -755,11 +754,9 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
755 case 4: /* Phase 4 - Setup scatter/gather buffers */ 754 case 4: /* Phase 4 - Setup scatter/gather buffers */
756 if (cmd->use_sg) { 755 if (cmd->use_sg) {
757 /* if many buffers are available, start filling the first */ 756 /* if many buffers are available, start filling the first */
758 cmd->SCp.buffer = 757 cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer;
759 (struct scatterlist *) cmd->request_buffer;
760 cmd->SCp.this_residual = cmd->SCp.buffer->length; 758 cmd->SCp.this_residual = cmd->SCp.buffer->length;
761 cmd->SCp.ptr = 759 cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) +
762 page_address(cmd->SCp.buffer->page) +
763 cmd->SCp.buffer->offset; 760 cmd->SCp.buffer->offset;
764 } else { 761 } else {
765 /* else fill the only available buffer */ 762 /* else fill the only available buffer */
@@ -800,7 +797,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
800 break; 797 break;
801 798
802 default: 799 default:
803 printk("ppa: Invalid scsi phase\n"); 800 printk(KERN_ERR "ppa: Invalid scsi phase\n");
804 } 801 }
805 return 0; 802 return 0;
806} 803}
@@ -811,7 +808,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd,
811 ppa_struct *dev = ppa_dev(cmd->device->host); 808 ppa_struct *dev = ppa_dev(cmd->device->host);
812 809
813 if (dev->cur_cmd) { 810 if (dev->cur_cmd) {
814 printk("PPA: bug in ppa_queuecommand\n"); 811 printk(KERN_ERR "PPA: bug in ppa_queuecommand\n");
815 return 0; 812 return 0;
816 } 813 }
817 dev->failed = 0; 814 dev->failed = 0;
@@ -899,7 +896,7 @@ static int device_check(ppa_struct *dev)
899 /* This routine looks for a device and then attempts to use EPP 896 /* This routine looks for a device and then attempts to use EPP
900 to send a command. If all goes as planned then EPP is available. */ 897 to send a command. If all goes as planned then EPP is available. */
901 898
902 static char cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 899 static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
903 int loop, old_mode, status, k, ppb = dev->base; 900 int loop, old_mode, status, k, ppb = dev->base;
904 unsigned char l; 901 unsigned char l;
905 902
@@ -909,14 +906,14 @@ static int device_check(ppa_struct *dev)
909 if ((ppb & 0x0007) == 0x0000) 906 if ((ppb & 0x0007) == 0x0000)
910 dev->mode = PPA_EPP_32; 907 dev->mode = PPA_EPP_32;
911 908
912 second_pass: 909second_pass:
913 ppa_connect(dev, CONNECT_EPP_MAYBE); 910 ppa_connect(dev, CONNECT_EPP_MAYBE);
914 /* Select SCSI device */ 911 /* Select SCSI device */
915 if (!ppa_select(dev, loop)) { 912 if (!ppa_select(dev, loop)) {
916 ppa_disconnect(dev); 913 ppa_disconnect(dev);
917 continue; 914 continue;
918 } 915 }
919 printk("ppa: Found device at ID %i, Attempting to use %s\n", 916 printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n",
920 loop, PPA_MODE_STRING[dev->mode]); 917 loop, PPA_MODE_STRING[dev->mode]);
921 918
922 /* Send SCSI command */ 919 /* Send SCSI command */
@@ -965,7 +962,7 @@ static int device_check(ppa_struct *dev)
965 return -EIO; 962 return -EIO;
966 } 963 }
967 ppa_disconnect(dev); 964 ppa_disconnect(dev);
968 printk("ppa: Communication established with ID %i using %s\n", 965 printk(KERN_INFO "ppa: Communication established with ID %i using %s\n",
969 loop, PPA_MODE_STRING[dev->mode]); 966 loop, PPA_MODE_STRING[dev->mode]);
970 ppa_connect(dev, CONNECT_EPP_MAYBE); 967 ppa_connect(dev, CONNECT_EPP_MAYBE);
971 ppa_reset_pulse(ppb); 968 ppa_reset_pulse(ppb);
@@ -1140,7 +1137,7 @@ static struct parport_driver ppa_driver = {
1140 1137
1141static int __init ppa_driver_init(void) 1138static int __init ppa_driver_init(void)
1142{ 1139{
1143 printk("ppa: Version %s\n", PPA_VERSION); 1140 printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
1144 return parport_register_driver(&ppa_driver); 1141 return parport_register_driver(&ppa_driver);
1145} 1142}
1146 1143
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 411663af7bb7..71ddb5db4944 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,4 +1,4 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o
3 3
4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 4obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 942db9de785e..3eb2208675ae 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -6,8 +6,11 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8 8
9#include <linux/kthread.h>
9#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
10 11
12int qla24xx_vport_disable(struct fc_vport *, bool);
13
11/* SYSFS attributes --------------------------------------------------------- */ 14/* SYSFS attributes --------------------------------------------------------- */
12 15
13static ssize_t 16static ssize_t
@@ -963,6 +966,122 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
963 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 966 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
964} 967}
965 968
969static int
970qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
971{
972 int ret = 0;
973 scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
974 scsi_qla_host_t *vha;
975
976 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
977 if (ret) {
978 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
979 "status %x\n", ret));
980 return (ret);
981 }
982
983 vha = qla24xx_create_vhost(fc_vport);
984 if (vha == NULL) {
985 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
986 vha));
987 return FC_VPORT_FAILED;
988 }
989 if (disable) {
990 atomic_set(&vha->vp_state, VP_OFFLINE);
991 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
992 } else
993 atomic_set(&vha->vp_state, VP_FAILED);
994
995 /* ready to create vport */
996 qla_printk(KERN_INFO, vha, "VP entry id %d assigned.\n", vha->vp_idx);
997
998 /* initialized vport states */
999 atomic_set(&vha->loop_state, LOOP_DOWN);
1000 vha->vp_err_state= VP_ERR_PORTDWN;
1001 vha->vp_prev_err_state= VP_ERR_UNKWN;
1002 /* Check if physical ha port is Up */
1003 if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
1004 atomic_read(&ha->loop_state) == LOOP_DEAD) {
1005 /* Don't retry or attempt login of this virtual port */
1006 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
1007 vha->host_no));
1008 atomic_set(&vha->loop_state, LOOP_DEAD);
1009 if (!disable)
1010 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1011 }
1012
1013 if (scsi_add_host(vha->host, &fc_vport->dev)) {
1014 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1015 vha->host_no, vha->vp_idx));
1016 goto vport_create_failed_2;
1017 }
1018
1019 /* initialize attributes */
1020 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1021 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1022 fc_host_supported_classes(vha->host) =
1023 fc_host_supported_classes(ha->host);
1024 fc_host_supported_speeds(vha->host) =
1025 fc_host_supported_speeds(ha->host);
1026
1027 qla24xx_vport_disable(fc_vport, disable);
1028
1029 return 0;
1030vport_create_failed_2:
1031 qla24xx_disable_vp(vha);
1032 qla24xx_deallocate_vp_id(vha);
1033 kfree(vha->port_name);
1034 kfree(vha->node_name);
1035 scsi_host_put(vha->host);
1036 return FC_VPORT_FAILED;
1037}
1038
1039int
1040qla24xx_vport_delete(struct fc_vport *fc_vport)
1041{
1042 scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
1043 scsi_qla_host_t *vha = fc_vport->dd_data;
1044
1045 qla24xx_disable_vp(vha);
1046 qla24xx_deallocate_vp_id(vha);
1047
1048 down(&ha->vport_sem);
1049 ha->cur_vport_count--;
1050 clear_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
1051 up(&ha->vport_sem);
1052
1053 kfree(vha->node_name);
1054 kfree(vha->port_name);
1055
1056 if (vha->timer_active) {
1057 qla2x00_vp_stop_timer(vha);
1058 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1059 "has stopped\n",
1060 vha->host_no, vha->vp_idx, vha));
1061 }
1062
1063 fc_remove_host(vha->host);
1064
1065 scsi_remove_host(vha->host);
1066
1067 scsi_host_put(vha->host);
1068
1069 return 0;
1070}
1071
1072int
1073qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1074{
1075 scsi_qla_host_t *vha = fc_vport->dd_data;
1076
1077 if (disable)
1078 qla24xx_disable_vp(vha);
1079 else
1080 qla24xx_enable_vp(vha);
1081
1082 return 0;
1083}
1084
966struct fc_function_template qla2xxx_transport_functions = { 1085struct fc_function_template qla2xxx_transport_functions = {
967 1086
968 .show_host_node_name = 1, 1087 .show_host_node_name = 1,
@@ -1000,6 +1119,49 @@ struct fc_function_template qla2xxx_transport_functions = {
1000 1119
1001 .issue_fc_host_lip = qla2x00_issue_lip, 1120 .issue_fc_host_lip = qla2x00_issue_lip,
1002 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1121 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1122
1123 .vport_create = qla24xx_vport_create,
1124 .vport_disable = qla24xx_vport_disable,
1125 .vport_delete = qla24xx_vport_delete,
1126};
1127
1128struct fc_function_template qla2xxx_transport_vport_functions = {
1129
1130 .show_host_node_name = 1,
1131 .show_host_port_name = 1,
1132 .show_host_supported_classes = 1,
1133
1134 .get_host_port_id = qla2x00_get_host_port_id,
1135 .show_host_port_id = 1,
1136 .get_host_speed = qla2x00_get_host_speed,
1137 .show_host_speed = 1,
1138 .get_host_port_type = qla2x00_get_host_port_type,
1139 .show_host_port_type = 1,
1140 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1141 .show_host_symbolic_name = 1,
1142 .set_host_system_hostname = qla2x00_set_host_system_hostname,
1143 .show_host_system_hostname = 1,
1144 .get_host_fabric_name = qla2x00_get_host_fabric_name,
1145 .show_host_fabric_name = 1,
1146 .get_host_port_state = qla2x00_get_host_port_state,
1147 .show_host_port_state = 1,
1148
1149 .dd_fcrport_size = sizeof(struct fc_port *),
1150 .show_rport_supported_classes = 1,
1151
1152 .get_starget_node_name = qla2x00_get_starget_node_name,
1153 .show_starget_node_name = 1,
1154 .get_starget_port_name = qla2x00_get_starget_port_name,
1155 .show_starget_port_name = 1,
1156 .get_starget_port_id = qla2x00_get_starget_port_id,
1157 .show_starget_port_id = 1,
1158
1159 .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1160 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1161 .show_rport_dev_loss_tmo = 1,
1162
1163 .issue_fc_host_lip = qla2x00_issue_lip,
1164 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1003}; 1165};
1004 1166
1005void 1167void
@@ -1008,4 +1170,6 @@ qla2x00_init_host_attr(scsi_qla_host_t *ha)
1008 fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name); 1170 fc_host_node_name(ha->host) = wwn_to_u64(ha->node_name);
1009 fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name); 1171 fc_host_port_name(ha->host) = wwn_to_u64(ha->port_name);
1010 fc_host_supported_classes(ha->host) = FC_COS_CLASS3; 1172 fc_host_supported_classes(ha->host) = FC_COS_CLASS3;
1173 fc_host_max_npiv_vports(ha->host) = MAX_NUM_VPORT_FABRIC;
1174 fc_host_npiv_vports_inuse(ha->host) = ha->cur_vport_count;
1011} 1175}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f6ed6962bc2b..996c47a63074 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1411,9 +1411,9 @@ qla2x00_print_scsi_cmd(struct scsi_cmnd * cmd)
1411 printk("0x%02x ", cmd->cmnd[i]); 1411 printk("0x%02x ", cmd->cmnd[i]);
1412 } 1412 }
1413 printk("\n seg_cnt=%d, allowed=%d, retries=%d\n", 1413 printk("\n seg_cnt=%d, allowed=%d, retries=%d\n",
1414 cmd->use_sg, cmd->allowed, cmd->retries); 1414 scsi_sg_count(cmd), cmd->allowed, cmd->retries);
1415 printk(" request buffer=0x%p, request buffer len=0x%x\n", 1415 printk(" request buffer=0x%p, request buffer len=0x%x\n",
1416 cmd->request_buffer, cmd->request_bufflen); 1416 scsi_sglist(cmd), scsi_bufflen(cmd));
1417 printk(" tag=%d, transfersize=0x%x\n", 1417 printk(" tag=%d, transfersize=0x%x\n",
1418 cmd->tag, cmd->transfersize); 1418 cmd->tag, cmd->transfersize);
1419 printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp); 1419 printk(" serial_number=%lx, SP=%p\n", cmd->serial_number, sp);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5b12278968e0..49dffeb78512 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -21,6 +21,7 @@
21/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */ 21/* #define QL_DEBUG_LEVEL_12 */ /* Output IP trace msgs */
22/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */ 22/* #define QL_DEBUG_LEVEL_13 */ /* Output fdmi function trace msgs */
23/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */ 23/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
24/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
24/* 25/*
25 * Local Macro Definitions. 26 * Local Macro Definitions.
26 */ 27 */
@@ -30,7 +31,8 @@
30 defined(QL_DEBUG_LEVEL_7) || defined(QL_DEBUG_LEVEL_8) || \ 31 defined(QL_DEBUG_LEVEL_7) || defined(QL_DEBUG_LEVEL_8) || \
31 defined(QL_DEBUG_LEVEL_9) || defined(QL_DEBUG_LEVEL_10) || \ 32 defined(QL_DEBUG_LEVEL_9) || defined(QL_DEBUG_LEVEL_10) || \
32 defined(QL_DEBUG_LEVEL_11) || defined(QL_DEBUG_LEVEL_12) || \ 33 defined(QL_DEBUG_LEVEL_11) || defined(QL_DEBUG_LEVEL_12) || \
33 defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14) 34 defined(QL_DEBUG_LEVEL_13) || defined(QL_DEBUG_LEVEL_14) || \
35 defined(QL_DEBUG_LEVEL_15)
34 #define QL_DEBUG_ROUTINES 36 #define QL_DEBUG_ROUTINES
35#endif 37#endif
36 38
@@ -125,6 +127,12 @@
125#define DEBUG14(x) do {} while (0) 127#define DEBUG14(x) do {} while (0)
126#endif 128#endif
127 129
130#if defined(QL_DEBUG_LEVEL_15)
131#define DEBUG15(x) do {x;} while (0)
132#else
133#define DEBUG15(x) do {} while (0)
134#endif
135
128/* 136/*
129 * Firmware Dump structure definition 137 * Firmware Dump structure definition
130 */ 138 */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e8948b679f5b..a1ca590ba447 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1551,6 +1551,9 @@ typedef struct fc_port {
1551 1551
1552 unsigned long last_queue_full; 1552 unsigned long last_queue_full;
1553 unsigned long last_ramp_up; 1553 unsigned long last_ramp_up;
1554
1555 struct list_head vp_fcport;
1556 uint16_t vp_idx;
1554} fc_port_t; 1557} fc_port_t;
1555 1558
1556/* 1559/*
@@ -1999,6 +2002,36 @@ struct gid_list_info {
1999}; 2002};
2000#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES) 2003#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
2001 2004
2005/* NPIV */
2006typedef struct vport_info {
2007 uint8_t port_name[WWN_SIZE];
2008 uint8_t node_name[WWN_SIZE];
2009 int vp_id;
2010 uint16_t loop_id;
2011 unsigned long host_no;
2012 uint8_t port_id[3];
2013 int loop_state;
2014} vport_info_t;
2015
2016typedef struct vport_params {
2017 uint8_t port_name[WWN_SIZE];
2018 uint8_t node_name[WWN_SIZE];
2019 uint32_t options;
2020#define VP_OPTS_RETRY_ENABLE BIT_0
2021#define VP_OPTS_VP_DISABLE BIT_1
2022} vport_params_t;
2023
2024/* NPIV - return codes of VP create and modify */
2025#define VP_RET_CODE_OK 0
2026#define VP_RET_CODE_FATAL 1
2027#define VP_RET_CODE_WRONG_ID 2
2028#define VP_RET_CODE_WWPN 3
2029#define VP_RET_CODE_RESOURCES 4
2030#define VP_RET_CODE_NO_MEM 5
2031#define VP_RET_CODE_NOT_FOUND 6
2032
2033#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
2034
2002/* 2035/*
2003 * ISP operations 2036 * ISP operations
2004 */ 2037 */
@@ -2073,6 +2106,16 @@ struct qla_msix_entry {
2073 uint16_t msix_entry; 2106 uint16_t msix_entry;
2074}; 2107};
2075 2108
2109#define WATCH_INTERVAL 1 /* number of seconds */
2110
2111/* NPIV */
2112#define MAX_MULTI_ID_LOOP 126
2113#define MAX_MULTI_ID_FABRIC 64
2114#define MAX_NUM_VPORT_LOOP (MAX_MULTI_ID_LOOP - 1)
2115#define MAX_NUM_VPORT_FABRIC (MAX_MULTI_ID_FABRIC - 1)
2116#define MAX_NUM_VHBA_LOOP (MAX_MULTI_ID_LOOP - 1)
2117#define MAX_NUM_VHBA_FABRIC (MAX_MULTI_ID_FABRIC - 1)
2118
2076/* 2119/*
2077 * Linux Host Adapter structure 2120 * Linux Host Adapter structure
2078 */ 2121 */
@@ -2108,6 +2151,8 @@ typedef struct scsi_qla_host {
2108 uint32_t msix_enabled :1; 2151 uint32_t msix_enabled :1;
2109 uint32_t disable_serdes :1; 2152 uint32_t disable_serdes :1;
2110 uint32_t gpsc_supported :1; 2153 uint32_t gpsc_supported :1;
2154 uint32_t vsan_enabled :1;
2155 uint32_t npiv_supported :1;
2111 } flags; 2156 } flags;
2112 2157
2113 atomic_t loop_state; 2158 atomic_t loop_state;
@@ -2147,6 +2192,7 @@ typedef struct scsi_qla_host {
2147#define BEACON_BLINK_NEEDED 25 2192#define BEACON_BLINK_NEEDED 25
2148#define REGISTER_FDMI_NEEDED 26 2193#define REGISTER_FDMI_NEEDED 26
2149#define FCPORT_UPDATE_NEEDED 27 2194#define FCPORT_UPDATE_NEEDED 27
2195#define VP_DPC_NEEDED 28 /* wake up for VP dpc handling */
2150 2196
2151 uint32_t device_flags; 2197 uint32_t device_flags;
2152#define DFLG_LOCAL_DEVICES BIT_0 2198#define DFLG_LOCAL_DEVICES BIT_0
@@ -2237,6 +2283,11 @@ typedef struct scsi_qla_host {
2237 2283
2238 /* ISP configuration data. */ 2284 /* ISP configuration data. */
2239 uint16_t loop_id; /* Host adapter loop id */ 2285 uint16_t loop_id; /* Host adapter loop id */
2286 uint16_t switch_cap;
2287#define FLOGI_SEQ_DEL BIT_8
2288#define FLOGI_MID_SUPPORT BIT_10
2289#define FLOGI_VSAN_SUPPORT BIT_12
2290#define FLOGI_SP_SUPPORT BIT_13
2240 uint16_t fb_rev; 2291 uint16_t fb_rev;
2241 2292
2242 port_id_t d_id; /* Host adapter port id */ 2293 port_id_t d_id; /* Host adapter port id */
@@ -2344,6 +2395,7 @@ typedef struct scsi_qla_host {
2344#define MBX_UPDATE_FLASH_ACTIVE 3 2395#define MBX_UPDATE_FLASH_ACTIVE 3
2345 2396
2346 struct semaphore mbx_cmd_sem; /* Serialialize mbx access */ 2397 struct semaphore mbx_cmd_sem; /* Serialialize mbx access */
2398 struct semaphore vport_sem; /* Virtual port synchronization */
2347 struct semaphore mbx_intr_sem; /* Used for completion notification */ 2399 struct semaphore mbx_intr_sem; /* Used for completion notification */
2348 2400
2349 uint32_t mbx_flags; 2401 uint32_t mbx_flags;
@@ -2428,6 +2480,37 @@ typedef struct scsi_qla_host {
2428 struct fc_host_statistics fc_host_stat; 2480 struct fc_host_statistics fc_host_stat;
2429 2481
2430 struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES]; 2482 struct qla_msix_entry msix_entries[QLA_MSIX_ENTRIES];
2483
2484 struct list_head vp_list; /* list of VP */
2485 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
2486 uint8_t vp_idx_map[16];
2487 uint16_t num_vhosts; /* number of vports created */
2488 uint16_t num_vsans; /* number of vsan created */
2489 uint16_t vp_idx; /* vport ID */
2490
2491 struct scsi_qla_host *parent; /* holds pport */
2492 unsigned long vp_flags;
2493 struct list_head vp_fcports; /* list of fcports */
2494#define VP_IDX_ACQUIRED 0 /* bit no 0 */
2495#define VP_CREATE_NEEDED 1
2496#define VP_BIND_NEEDED 2
2497#define VP_DELETE_NEEDED 3
2498#define VP_SCR_NEEDED 4 /* State Change Request registration */
2499 atomic_t vp_state;
2500#define VP_OFFLINE 0
2501#define VP_ACTIVE 1
2502#define VP_FAILED 2
2503// #define VP_DISABLE 3
2504 uint16_t vp_err_state;
2505 uint16_t vp_prev_err_state;
2506#define VP_ERR_UNKWN 0
2507#define VP_ERR_PORTDWN 1
2508#define VP_ERR_FAB_UNSUPPORTED 2
2509#define VP_ERR_FAB_NORESOURCES 3
2510#define VP_ERR_FAB_LOGOUT 4
2511#define VP_ERR_ADAP_NORESOURCES 5
2512 int max_npiv_vports; /* 63 or 125 per topoloty */
2513 int cur_vport_count;
2431} scsi_qla_host_t; 2514} scsi_qla_host_t;
2432 2515
2433 2516
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index a0a722cf4237..63a11fef5d1b 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -69,6 +69,16 @@ struct port_database_24xx {
69 uint8_t reserved_3[24]; 69 uint8_t reserved_3[24];
70}; 70};
71 71
72struct vp_database_24xx {
73 uint16_t vp_status;
74 uint8_t options;
75 uint8_t id;
76 uint8_t port_name[WWN_SIZE];
77 uint8_t node_name[WWN_SIZE];
78 uint16_t port_id_low;
79 uint16_t port_id_high;
80};
81
72struct nvram_24xx { 82struct nvram_24xx {
73 /* NVRAM header. */ 83 /* NVRAM header. */
74 uint8_t id[4]; 84 uint8_t id[4];
@@ -962,6 +972,25 @@ struct mid_db_24xx {
962 struct mid_db_entry_24xx entries[MAX_MID_VPS]; 972 struct mid_db_entry_24xx entries[MAX_MID_VPS];
963}; 973};
964 974
975 /*
976 * Virtual Fabric ID type definition.
977 */
978typedef struct vf_id {
979 uint16_t id : 12;
980 uint16_t priority : 4;
981} vf_id_t;
982
983/*
984 * Virtual Fabric HopCt type definition.
985 */
986typedef struct vf_hopct {
987 uint16_t reserved : 8;
988 uint16_t hopct : 8;
989} vf_hopct_t;
990
991/*
992 * Virtual Port Control IOCB
993 */
965#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */ 994#define VP_CTRL_IOCB_TYPE 0x30 /* Vitual Port Control entry. */
966struct vp_ctrl_entry_24xx { 995struct vp_ctrl_entry_24xx {
967 uint8_t entry_type; /* Entry type. */ 996 uint8_t entry_type; /* Entry type. */
@@ -974,6 +1003,7 @@ struct vp_ctrl_entry_24xx {
974 uint16_t vp_idx_failed; 1003 uint16_t vp_idx_failed;
975 1004
976 uint16_t comp_status; /* Completion status. */ 1005 uint16_t comp_status; /* Completion status. */
1006#define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */
977#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */ 1007#define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */
978#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */ 1008#define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */
979 1009
@@ -982,24 +1012,34 @@ struct vp_ctrl_entry_24xx {
982#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */ 1012#define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */
983#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */ 1013#define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */
984#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */ 1014#define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */
1015#define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */
985 1016
986 uint16_t vp_count; 1017 uint16_t vp_count;
987 1018
988 uint8_t vp_idx_map[16]; 1019 uint8_t vp_idx_map[16];
989 1020 uint16_t flags;
990 uint8_t reserved_4[32]; 1021 struct vf_id id;
1022 uint16_t reserved_4;
1023 struct vf_hopct hopct;
1024 uint8_t reserved_5[8];
991}; 1025};
992 1026
1027/*
1028 * Modify Virtual Port Configuration IOCB
1029 */
993#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */ 1030#define VP_CONFIG_IOCB_TYPE 0x31 /* Vitual Port Config entry. */
994struct vp_config_entry_24xx { 1031struct vp_config_entry_24xx {
995 uint8_t entry_type; /* Entry type. */ 1032 uint8_t entry_type; /* Entry type. */
996 uint8_t entry_count; /* Entry count. */ 1033 uint8_t entry_count; /* Entry count. */
997 uint8_t sys_define; /* System defined. */ 1034 uint8_t handle_count;
998 uint8_t entry_status; /* Entry Status. */ 1035 uint8_t entry_status; /* Entry Status. */
999 1036
1000 uint32_t handle; /* System handle. */ 1037 uint32_t handle; /* System handle. */
1001 1038
1002 uint16_t reserved_1; 1039 uint16_t flags;
1040#define CS_VF_BIND_VPORTS_TO_VF BIT_0
1041#define CS_VF_SET_QOS_OF_VPORTS BIT_1
1042#define CS_VF_SET_HOPS_OF_VPORTS BIT_2
1003 1043
1004 uint16_t comp_status; /* Completion status. */ 1044 uint16_t comp_status; /* Completion status. */
1005#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */ 1045#define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */
@@ -1009,27 +1049,29 @@ struct vp_config_entry_24xx {
1009#define CS_VCT_BUSY 0x05 /* Firmware not ready to accept cmd. */ 1049#define CS_VCT_BUSY 0x05 /* Firmware not ready to accept cmd. */
1010 1050
1011 uint8_t command; 1051 uint8_t command;
1012#define VCT_COMMAND_MOD_VPS 0x00 /* Enable VPs. */ 1052#define VCT_COMMAND_MOD_VPS 0x00 /* Modify VP configurations. */
1013#define VCT_COMMAND_MOD_ENABLE_VPS 0x08 /* Disable VPs. */ 1053#define VCT_COMMAND_MOD_ENABLE_VPS 0x01 /* Modify configuration & enable VPs. */
1014 1054
1015 uint8_t vp_count; 1055 uint8_t vp_count;
1016 1056
1017 uint8_t vp_idx1; 1057 uint8_t vp_index1;
1018 uint8_t vp_idx2; 1058 uint8_t vp_index2;
1019 1059
1020 uint8_t options_idx1; 1060 uint8_t options_idx1;
1021 uint8_t hard_address_idx1; 1061 uint8_t hard_address_idx1;
1022 uint16_t reserved_2; 1062 uint16_t reserved_vp1;
1023 uint8_t port_name_idx1[WWN_SIZE]; 1063 uint8_t port_name_idx1[WWN_SIZE];
1024 uint8_t node_name_idx1[WWN_SIZE]; 1064 uint8_t node_name_idx1[WWN_SIZE];
1025 1065
1026 uint8_t options_idx2; 1066 uint8_t options_idx2;
1027 uint8_t hard_address_idx2; 1067 uint8_t hard_address_idx2;
1028 uint16_t reserved_3; 1068 uint16_t reserved_vp2;
1029 uint8_t port_name_idx2[WWN_SIZE]; 1069 uint8_t port_name_idx2[WWN_SIZE];
1030 uint8_t node_name_idx2[WWN_SIZE]; 1070 uint8_t node_name_idx2[WWN_SIZE];
1031 1071 struct vf_id id;
1032 uint8_t reserved_4[8]; 1072 uint16_t reserved_4;
1073 struct vf_hopct hopct;
1074 uint8_t reserved_5;
1033}; 1075};
1034 1076
1035#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */ 1077#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
@@ -1054,5 +1096,30 @@ struct vp_rpt_id_entry_24xx {
1054 uint8_t reserved_4[32]; 1096 uint8_t reserved_4[32];
1055}; 1097};
1056 1098
1099#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
1100struct vf_evfp_entry_24xx {
1101 uint8_t entry_type; /* Entry type. */
1102 uint8_t entry_count; /* Entry count. */
1103 uint8_t sys_define; /* System defined. */
1104 uint8_t entry_status; /* Entry Status. */
1105
1106 uint32_t handle; /* System handle. */
1107 uint16_t comp_status; /* Completion status. */
1108 uint16_t timeout; /* timeout */
1109 uint16_t adim_tagging_mode;
1110
1111 uint16_t vfport_id;
1112 uint32_t exch_addr;
1113
1114 uint16_t nport_handle; /* N_PORT handle. */
1115 uint16_t control_flags;
1116 uint32_t io_parameter_0;
1117 uint32_t io_parameter_1;
1118 uint32_t tx_address[2]; /* Data segment 0 address. */
1119 uint32_t tx_len; /* Data segment 0 length. */
1120 uint32_t rx_address[2]; /* Data segment 1 address. */
1121 uint32_t rx_len; /* Data segment 1 length. */
1122};
1123
1057/* END MID Support ***********************************************************/ 1124/* END MID Support ***********************************************************/
1058#endif 1125#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 74544ae4b0e2..b44eff2803ce 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -62,6 +62,38 @@ extern int ql2xfdmienable;
62extern int ql2xallocfwdump; 62extern int ql2xallocfwdump;
63extern int ql2xextended_error_logging; 63extern int ql2xextended_error_logging;
64extern int ql2xqfullrampup; 64extern int ql2xqfullrampup;
65extern int num_hosts;
66
67/*
68 * Global Functions in qla_mid.c source file.
69 */
70extern struct scsi_host_template qla2x00_driver_template;
71extern struct scsi_host_template qla24xx_driver_template;
72extern struct scsi_transport_template *qla2xxx_transport_vport_template;
73extern uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
74extern void qla2x00_timer(scsi_qla_host_t *);
75extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long);
76extern void qla2x00_stop_timer(scsi_qla_host_t *);
77extern uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *);
78extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *);
79extern int qla24xx_disable_vp (scsi_qla_host_t *);
80extern int qla24xx_enable_vp (scsi_qla_host_t *);
81extern void qla2x00_mem_free(scsi_qla_host_t *);
82extern int qla24xx_control_vp(scsi_qla_host_t *, int );
83extern int qla24xx_modify_vp_config(scsi_qla_host_t *);
84extern int qla2x00_send_change_request(scsi_qla_host_t *, uint16_t, uint16_t);
85extern void qla2x00_vp_stop_timer(scsi_qla_host_t *);
86extern int qla24xx_configure_vhba (scsi_qla_host_t *);
87extern int qla24xx_get_vp_entry(scsi_qla_host_t *, uint16_t, int);
88extern int qla24xx_get_vp_database(scsi_qla_host_t *, uint16_t);
89extern int qla2x00_do_dpc_vp(scsi_qla_host_t *);
90extern void qla24xx_report_id_acquisition(scsi_qla_host_t *,
91 struct vp_rpt_id_entry_24xx *);
92extern scsi_qla_host_t * qla24xx_find_vhost_by_name(scsi_qla_host_t *,
93 uint8_t *);
94extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
95extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
96extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
65 97
66extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *); 98extern void qla2x00_sp_compl(scsi_qla_host_t *, srb_t *);
67 99
@@ -77,6 +109,10 @@ extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *);
77extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *); 109extern int qla2x00_wait_for_hba_online(scsi_qla_host_t *);
78 110
79extern void qla2xxx_wake_dpc(scsi_qla_host_t *); 111extern void qla2xxx_wake_dpc(scsi_qla_host_t *);
112extern void qla2x00_alert_all_vps(scsi_qla_host_t *, uint16_t *);
113extern void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
114extern void qla2x00_vp_abort_isp(scsi_qla_host_t *);
115extern int qla24xx_vport_delete(struct fc_vport *);
80 116
81/* 117/*
82 * Global Function Prototypes in qla_iocb.c source file. 118 * Global Function Prototypes in qla_iocb.c source file.
@@ -128,7 +164,7 @@ qla2x00_abort_target(fc_port_t *);
128 164
129extern int 165extern int
130qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, 166qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
131 uint8_t *, uint16_t *); 167 uint8_t *, uint16_t *, uint16_t *);
132 168
133extern int 169extern int
134qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *); 170qla2x00_get_retry_cnt(scsi_qla_host_t *, uint8_t *, uint8_t *, uint16_t *);
@@ -303,6 +339,7 @@ struct class_device_attribute;
303extern struct class_device_attribute *qla2x00_host_attrs[]; 339extern struct class_device_attribute *qla2x00_host_attrs[];
304struct fc_function_template; 340struct fc_function_template;
305extern struct fc_function_template qla2xxx_transport_functions; 341extern struct fc_function_template qla2xxx_transport_functions;
342extern struct fc_function_template qla2xxx_transport_vport_functions;
306extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); 343extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
307extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); 344extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
308extern void qla2x00_init_host_attr(scsi_qla_host_t *); 345extern void qla2x00_init_host_attr(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ec5b2dd90d6a..a086b3f0df65 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -88,6 +88,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size)
88 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 88 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
89 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 89 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
90 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 90 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
91 ct_pkt->vp_index = ha->vp_idx;
91 92
92 return (ct_pkt); 93 return (ct_pkt);
93} 94}
@@ -1186,6 +1187,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1186 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1187 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1187 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1188 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1188 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1189 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1190 ct_pkt->vp_index = ha->vp_idx;
1189 1191
1190 return ct_pkt; 1192 return ct_pkt;
1191} 1193}
@@ -1746,6 +1748,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size,
1746 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma)); 1748 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1747 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma)); 1749 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1748 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count; 1750 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1751 ct_pkt->vp_index = ha->vp_idx;
1749 1752
1750 return ct_pkt; 1753 return ct_pkt;
1751} 1754}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index cf94f8636ba5..cc6ebb609e98 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -899,6 +899,10 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
899 &ha->fw_subminor_version, 899 &ha->fw_subminor_version,
900 &ha->fw_attributes, &ha->fw_memory_size); 900 &ha->fw_attributes, &ha->fw_memory_size);
901 qla2x00_resize_request_q(ha); 901 qla2x00_resize_request_q(ha);
902 ha->flags.npiv_supported = 0;
903 if (IS_QLA24XX(ha) &&
904 (ha->fw_attributes & BIT_2))
905 ha->flags.npiv_supported = 1;
902 906
903 if (ql2xallocfwdump) 907 if (ql2xallocfwdump)
904 qla2x00_alloc_fw_dump(ha); 908 qla2x00_alloc_fw_dump(ha);
@@ -1101,6 +1105,8 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1101 int rval; 1105 int rval;
1102 unsigned long flags = 0; 1106 unsigned long flags = 0;
1103 int cnt; 1107 int cnt;
1108 struct mid_init_cb_24xx *mid_init_cb =
1109 (struct mid_init_cb_24xx *) ha->init_cb;
1104 1110
1105 spin_lock_irqsave(&ha->hardware_lock, flags); 1111 spin_lock_irqsave(&ha->hardware_lock, flags);
1106 1112
@@ -1132,6 +1138,10 @@ qla2x00_init_rings(scsi_qla_host_t *ha)
1132 ha->isp_ops.update_fw_options(ha); 1138 ha->isp_ops.update_fw_options(ha);
1133 1139
1134 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no)); 1140 DEBUG(printk("scsi(%ld): Issue init firmware.\n", ha->host_no));
1141
1142 mid_init_cb->count = MAX_NUM_VPORT_FABRIC;
1143 ha->max_npiv_vports = MAX_NUM_VPORT_FABRIC;
1144
1135 rval = qla2x00_init_firmware(ha, ha->init_cb_size); 1145 rval = qla2x00_init_firmware(ha, ha->init_cb_size);
1136 if (rval) { 1146 if (rval) {
1137 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n", 1147 DEBUG2_3(printk("scsi(%ld): Init firmware **** FAILED ****.\n",
@@ -1263,6 +1273,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1263 int rval; 1273 int rval;
1264 uint16_t loop_id; 1274 uint16_t loop_id;
1265 uint16_t topo; 1275 uint16_t topo;
1276 uint16_t sw_cap;
1266 uint8_t al_pa; 1277 uint8_t al_pa;
1267 uint8_t area; 1278 uint8_t area;
1268 uint8_t domain; 1279 uint8_t domain;
@@ -1270,7 +1281,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1270 1281
1271 /* Get host addresses. */ 1282 /* Get host addresses. */
1272 rval = qla2x00_get_adapter_id(ha, 1283 rval = qla2x00_get_adapter_id(ha,
1273 &loop_id, &al_pa, &area, &domain, &topo); 1284 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
1274 if (rval != QLA_SUCCESS) { 1285 if (rval != QLA_SUCCESS) {
1275 if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) || 1286 if (LOOP_TRANSITION(ha) || atomic_read(&ha->loop_down_timer) ||
1276 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1287 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
@@ -1295,6 +1306,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1295 /* initialize */ 1306 /* initialize */
1296 ha->min_external_loopid = SNS_FIRST_LOOP_ID; 1307 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
1297 ha->operating_mode = LOOP; 1308 ha->operating_mode = LOOP;
1309 ha->switch_cap = 0;
1298 1310
1299 switch (topo) { 1311 switch (topo) {
1300 case 0: 1312 case 0:
@@ -1307,6 +1319,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1307 case 1: 1319 case 1:
1308 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n", 1320 DEBUG3(printk("scsi(%ld): HBA in FL topology.\n",
1309 ha->host_no)); 1321 ha->host_no));
1322 ha->switch_cap = sw_cap;
1310 ha->current_topology = ISP_CFG_FL; 1323 ha->current_topology = ISP_CFG_FL;
1311 strcpy(connect_type, "(FL_Port)"); 1324 strcpy(connect_type, "(FL_Port)");
1312 break; 1325 break;
@@ -1322,6 +1335,7 @@ qla2x00_configure_hba(scsi_qla_host_t *ha)
1322 case 3: 1335 case 3:
1323 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n", 1336 DEBUG3(printk("scsi(%ld): HBA in F P2P topology.\n",
1324 ha->host_no)); 1337 ha->host_no));
1338 ha->switch_cap = sw_cap;
1325 ha->operating_mode = P2P; 1339 ha->operating_mode = P2P;
1326 ha->current_topology = ISP_CFG_F; 1340 ha->current_topology = ISP_CFG_F;
1327 strcpy(connect_type, "(F_Port)"); 1341 strcpy(connect_type, "(F_Port)");
@@ -1743,7 +1757,6 @@ qla2x00_rport_del(void *data)
1743 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1757 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1744 if (rport) 1758 if (rport)
1745 fc_remote_port_delete(rport); 1759 fc_remote_port_delete(rport);
1746
1747} 1760}
1748 1761
1749/** 1762/**
@@ -1765,6 +1778,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1765 /* Setup fcport template structure. */ 1778 /* Setup fcport template structure. */
1766 memset(fcport, 0, sizeof (fc_port_t)); 1779 memset(fcport, 0, sizeof (fc_port_t));
1767 fcport->ha = ha; 1780 fcport->ha = ha;
1781 fcport->vp_idx = ha->vp_idx;
1768 fcport->port_type = FCT_UNKNOWN; 1782 fcport->port_type = FCT_UNKNOWN;
1769 fcport->loop_id = FC_NO_LOOP_ID; 1783 fcport->loop_id = FC_NO_LOOP_ID;
1770 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1784 atomic_set(&fcport->state, FCS_UNCONFIGURED);
@@ -1911,6 +1925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
1911 char *id_iter; 1925 char *id_iter;
1912 uint16_t loop_id; 1926 uint16_t loop_id;
1913 uint8_t domain, area, al_pa; 1927 uint8_t domain, area, al_pa;
1928 scsi_qla_host_t *pha = to_qla_parent(ha);
1914 1929
1915 found_devs = 0; 1930 found_devs = 0;
1916 new_fcport = NULL; 1931 new_fcport = NULL;
@@ -1942,7 +1957,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
1942 /* 1957 /*
1943 * Mark local devices that were present with FCF_DEVICE_LOST for now. 1958 * Mark local devices that were present with FCF_DEVICE_LOST for now.
1944 */ 1959 */
1945 list_for_each_entry(fcport, &ha->fcports, list) { 1960 list_for_each_entry(fcport, &pha->fcports, list) {
1961 if (fcport->vp_idx != ha->vp_idx)
1962 continue;
1963
1946 if (atomic_read(&fcport->state) == FCS_ONLINE && 1964 if (atomic_read(&fcport->state) == FCS_ONLINE &&
1947 fcport->port_type != FCT_BROADCAST && 1965 fcport->port_type != FCT_BROADCAST &&
1948 (fcport->flags & FCF_FABRIC_DEVICE) == 0) { 1966 (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
@@ -1988,6 +2006,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
1988 new_fcport->d_id.b.area = area; 2006 new_fcport->d_id.b.area = area;
1989 new_fcport->d_id.b.al_pa = al_pa; 2007 new_fcport->d_id.b.al_pa = al_pa;
1990 new_fcport->loop_id = loop_id; 2008 new_fcport->loop_id = loop_id;
2009 new_fcport->vp_idx = ha->vp_idx;
1991 rval2 = qla2x00_get_port_database(ha, new_fcport, 0); 2010 rval2 = qla2x00_get_port_database(ha, new_fcport, 0);
1992 if (rval2 != QLA_SUCCESS) { 2011 if (rval2 != QLA_SUCCESS) {
1993 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport " 2012 DEBUG2(printk("scsi(%ld): Failed to retrieve fcport "
@@ -2003,7 +2022,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2003 /* Check for matching device in port list. */ 2022 /* Check for matching device in port list. */
2004 found = 0; 2023 found = 0;
2005 fcport = NULL; 2024 fcport = NULL;
2006 list_for_each_entry(fcport, &ha->fcports, list) { 2025 list_for_each_entry(fcport, &pha->fcports, list) {
2026 if (fcport->vp_idx != ha->vp_idx)
2027 continue;
2028
2007 if (memcmp(new_fcport->port_name, fcport->port_name, 2029 if (memcmp(new_fcport->port_name, fcport->port_name,
2008 WWN_SIZE)) 2030 WWN_SIZE))
2009 continue; 2031 continue;
@@ -2023,7 +2045,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *ha)
2023 if (!found) { 2045 if (!found) {
2024 /* New device, add to fcports list. */ 2046 /* New device, add to fcports list. */
2025 new_fcport->flags &= ~FCF_PERSISTENT_BOUND; 2047 new_fcport->flags &= ~FCF_PERSISTENT_BOUND;
2026 list_add_tail(&new_fcport->list, &ha->fcports); 2048 if (ha->parent) {
2049 new_fcport->ha = ha;
2050 new_fcport->vp_idx = ha->vp_idx;
2051 list_add_tail(&new_fcport->vp_fcport,
2052 &ha->vp_fcports);
2053 }
2054 list_add_tail(&new_fcport->list, &pha->fcports);
2027 2055
2028 /* Allocate a new replacement fcport. */ 2056 /* Allocate a new replacement fcport. */
2029 fcport = new_fcport; 2057 fcport = new_fcport;
@@ -2199,11 +2227,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2199void 2227void
2200qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport) 2228qla2x00_update_fcport(scsi_qla_host_t *ha, fc_port_t *fcport)
2201{ 2229{
2230 scsi_qla_host_t *pha = to_qla_parent(ha);
2231
2202 fcport->ha = ha; 2232 fcport->ha = ha;
2203 fcport->login_retry = 0; 2233 fcport->login_retry = 0;
2204 fcport->port_login_retry_count = ha->port_down_retry_count * 2234 fcport->port_login_retry_count = pha->port_down_retry_count *
2205 PORT_RETRY_TIME; 2235 PORT_RETRY_TIME;
2206 atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * 2236 atomic_set(&fcport->port_down_timer, pha->port_down_retry_count *
2207 PORT_RETRY_TIME); 2237 PORT_RETRY_TIME);
2208 fcport->flags &= ~FCF_LOGIN_NEEDED; 2238 fcport->flags &= ~FCF_LOGIN_NEEDED;
2209 2239
@@ -2234,6 +2264,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2234 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2264 uint16_t mb[MAILBOX_REGISTER_COUNT];
2235 uint16_t loop_id; 2265 uint16_t loop_id;
2236 LIST_HEAD(new_fcports); 2266 LIST_HEAD(new_fcports);
2267 scsi_qla_host_t *pha = to_qla_parent(ha);
2237 2268
2238 /* If FL port exists, then SNS is present */ 2269 /* If FL port exists, then SNS is present */
2239 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) 2270 if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
@@ -2307,7 +2338,10 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2307 * Logout all previous fabric devices marked lost, except 2338 * Logout all previous fabric devices marked lost, except
2308 * tape devices. 2339 * tape devices.
2309 */ 2340 */
2310 list_for_each_entry(fcport, &ha->fcports, list) { 2341 list_for_each_entry(fcport, &pha->fcports, list) {
2342 if (fcport->vp_idx !=ha->vp_idx)
2343 continue;
2344
2311 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2345 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2312 break; 2346 break;
2313 2347
@@ -2332,13 +2366,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2332 } 2366 }
2333 2367
2334 /* Starting free loop ID. */ 2368 /* Starting free loop ID. */
2335 next_loopid = ha->min_external_loopid; 2369 next_loopid = pha->min_external_loopid;
2336 2370
2337 /* 2371 /*
2338 * Scan through our port list and login entries that need to be 2372 * Scan through our port list and login entries that need to be
2339 * logged in. 2373 * logged in.
2340 */ 2374 */
2341 list_for_each_entry(fcport, &ha->fcports, list) { 2375 list_for_each_entry(fcport, &pha->fcports, list) {
2376 if (fcport->vp_idx != ha->vp_idx)
2377 continue;
2378
2342 if (atomic_read(&ha->loop_down_timer) || 2379 if (atomic_read(&ha->loop_down_timer) ||
2343 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) 2380 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
2344 break; 2381 break;
@@ -2380,11 +2417,18 @@ qla2x00_configure_fabric(scsi_qla_host_t *ha)
2380 break; 2417 break;
2381 } 2418 }
2382 2419
2383 /* Remove device from the new list and add it to DB */
2384 list_move_tail(&fcport->list, &ha->fcports);
2385
2386 /* Login and update database */ 2420 /* Login and update database */
2387 qla2x00_fabric_dev_login(ha, fcport, &next_loopid); 2421 qla2x00_fabric_dev_login(ha, fcport, &next_loopid);
2422
2423 if (ha->parent) {
2424 fcport->ha = ha;
2425 fcport->vp_idx = ha->vp_idx;
2426 list_add_tail(&fcport->vp_fcport,
2427 &ha->vp_fcports);
2428 list_move_tail(&fcport->list,
2429 &ha->parent->fcports);
2430 } else
2431 list_move_tail(&fcport->list, &ha->fcports);
2388 } 2432 }
2389 } while (0); 2433 } while (0);
2390 2434
@@ -2428,6 +2472,11 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2428 int swl_idx; 2472 int swl_idx;
2429 int first_dev, last_dev; 2473 int first_dev, last_dev;
2430 port_id_t wrap, nxt_d_id; 2474 port_id_t wrap, nxt_d_id;
2475 int vp_index;
2476 int empty_vp_index;
2477 int found_vp;
2478 scsi_qla_host_t *vha;
2479 scsi_qla_host_t *pha = to_qla_parent(ha);
2431 2480
2432 rval = QLA_SUCCESS; 2481 rval = QLA_SUCCESS;
2433 2482
@@ -2461,13 +2510,13 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2461 return (QLA_MEMORY_ALLOC_FAILED); 2510 return (QLA_MEMORY_ALLOC_FAILED);
2462 } 2511 }
2463 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2512 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2464 2513 new_fcport->vp_idx = ha->vp_idx;
2465 /* Set start port ID scan at adapter ID. */ 2514 /* Set start port ID scan at adapter ID. */
2466 first_dev = 1; 2515 first_dev = 1;
2467 last_dev = 0; 2516 last_dev = 0;
2468 2517
2469 /* Starting free loop ID. */ 2518 /* Starting free loop ID. */
2470 loop_id = ha->min_external_loopid; 2519 loop_id = pha->min_external_loopid;
2471 for (; loop_id <= ha->last_loop_id; loop_id++) { 2520 for (; loop_id <= ha->last_loop_id; loop_id++) {
2472 if (qla2x00_is_reserved_id(ha, loop_id)) 2521 if (qla2x00_is_reserved_id(ha, loop_id))
2473 continue; 2522 continue;
@@ -2521,10 +2570,42 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2521 break; 2570 break;
2522 } 2571 }
2523 2572
2524 /* Bypass if host adapter. */ 2573 /* Bypass if same physical adapter. */
2525 if (new_fcport->d_id.b24 == ha->d_id.b24) 2574 if (new_fcport->d_id.b24 == pha->d_id.b24)
2526 continue; 2575 continue;
2527 2576
2577 /* Bypass virtual ports of the same host. */
2578 if (pha->num_vhosts) {
2579 vp_index = find_next_bit(
2580 (unsigned long *)pha->vp_idx_map,
2581 MAX_MULTI_ID_FABRIC + 1, 1);
2582
2583 for (;vp_index <= MAX_MULTI_ID_FABRIC;
2584 vp_index = find_next_bit(
2585 (unsigned long *)pha->vp_idx_map,
2586 MAX_MULTI_ID_FABRIC + 1, vp_index + 1)) {
2587 empty_vp_index = 1;
2588 found_vp = 0;
2589 list_for_each_entry(vha, &pha->vp_list,
2590 vp_list) {
2591 if (vp_index == vha->vp_idx) {
2592 empty_vp_index = 0;
2593 found_vp = 1;
2594 break;
2595 }
2596 }
2597
2598 if (empty_vp_index)
2599 continue;
2600
2601 if (found_vp &&
2602 new_fcport->d_id.b24 == vha->d_id.b24)
2603 break;
2604 }
2605 if (vp_index <= MAX_MULTI_ID_FABRIC)
2606 continue;
2607 }
2608
2528 /* Bypass if same domain and area of adapter. */ 2609 /* Bypass if same domain and area of adapter. */
2529 if (((new_fcport->d_id.b24 & 0xffff00) == 2610 if (((new_fcport->d_id.b24 & 0xffff00) ==
2530 (ha->d_id.b24 & 0xffff00)) && ha->current_topology == 2611 (ha->d_id.b24 & 0xffff00)) && ha->current_topology ==
@@ -2537,7 +2618,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2537 2618
2538 /* Locate matching device in database. */ 2619 /* Locate matching device in database. */
2539 found = 0; 2620 found = 0;
2540 list_for_each_entry(fcport, &ha->fcports, list) { 2621 list_for_each_entry(fcport, &pha->fcports, list) {
2622 if (new_fcport->vp_idx != fcport->vp_idx)
2623 continue;
2541 if (memcmp(new_fcport->port_name, fcport->port_name, 2624 if (memcmp(new_fcport->port_name, fcport->port_name,
2542 WWN_SIZE)) 2625 WWN_SIZE))
2543 continue; 2626 continue;
@@ -2605,6 +2688,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2605 } 2688 }
2606 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 2689 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
2607 new_fcport->d_id.b24 = nxt_d_id.b24; 2690 new_fcport->d_id.b24 = nxt_d_id.b24;
2691 new_fcport->vp_idx = ha->vp_idx;
2608 } 2692 }
2609 2693
2610 kfree(swl); 2694 kfree(swl);
@@ -2637,6 +2721,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2637 int found; 2721 int found;
2638 fc_port_t *fcport; 2722 fc_port_t *fcport;
2639 uint16_t first_loop_id; 2723 uint16_t first_loop_id;
2724 scsi_qla_host_t *pha = to_qla_parent(ha);
2640 2725
2641 rval = QLA_SUCCESS; 2726 rval = QLA_SUCCESS;
2642 2727
@@ -2663,7 +2748,7 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *ha, fc_port_t *dev)
2663 /* Check for loop ID being already in use. */ 2748 /* Check for loop ID being already in use. */
2664 found = 0; 2749 found = 0;
2665 fcport = NULL; 2750 fcport = NULL;
2666 list_for_each_entry(fcport, &ha->fcports, list) { 2751 list_for_each_entry(fcport, &pha->fcports, list) {
2667 if (fcport->loop_id == dev->loop_id && fcport != dev) { 2752 if (fcport->loop_id == dev->loop_id && fcport != dev) {
2668 /* ID possibly in use */ 2753 /* ID possibly in use */
2669 found++; 2754 found++;
@@ -2710,6 +2795,7 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2710 uint8_t rscn_out_iter; 2795 uint8_t rscn_out_iter;
2711 uint8_t format; 2796 uint8_t format;
2712 port_id_t d_id; 2797 port_id_t d_id;
2798 scsi_qla_host_t *pha = to_qla_parent(ha);
2713 2799
2714 rval = QLA_RSCNS_HANDLED; 2800 rval = QLA_RSCNS_HANDLED;
2715 2801
@@ -2776,7 +2862,10 @@ qla2x00_device_resync(scsi_qla_host_t *ha)
2776 2862
2777 rval = QLA_SUCCESS; 2863 rval = QLA_SUCCESS;
2778 2864
2779 list_for_each_entry(fcport, &ha->fcports, list) { 2865 list_for_each_entry(fcport, &pha->fcports, list) {
2866 if (fcport->vp_idx != ha->vp_idx)
2867 continue;
2868
2780 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || 2869 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
2781 (fcport->d_id.b24 & mask) != d_id.b24 || 2870 (fcport->d_id.b24 & mask) != d_id.b24 ||
2782 fcport->port_type == FCT_BROADCAST) 2871 fcport->port_type == FCT_BROADCAST)
@@ -3940,3 +4029,40 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
3940 ret = qla2x00_stop_firmware(ha); 4029 ret = qla2x00_stop_firmware(ha);
3941 } 4030 }
3942} 4031}
4032
4033int
4034qla24xx_configure_vhba(scsi_qla_host_t *ha)
4035{
4036 int rval = QLA_SUCCESS;
4037 uint16_t mb[MAILBOX_REGISTER_COUNT];
4038
4039 if (!ha->parent)
4040 return -EINVAL;
4041
4042 rval = qla2x00_fw_ready(ha);
4043 if (rval == QLA_SUCCESS) {
4044 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
4045 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
4046 }
4047
4048 ha->flags.management_server_logged_in = 0;
4049
4050 /* Login to SNS first */
4051 qla24xx_login_fabric(ha, NPH_SNS, 0xff, 0xff, 0xfc,
4052 mb, BIT_1);
4053 if (mb[0] != MBS_COMMAND_COMPLETE) {
4054 DEBUG15(qla_printk(KERN_INFO, ha,
4055 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4056 "mb[2]=%x mb[6]=%x mb[7]=%x\n", NPH_SNS,
4057 mb[0], mb[1], mb[2], mb[6], mb[7]));
4058 return (QLA_FUNCTION_FAILED);
4059 }
4060
4061 atomic_set(&ha->loop_down_timer, 0);
4062 atomic_set(&ha->loop_state, LOOP_UP);
4063 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
4064 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
4065 rval = qla2x00_loop_resync(ha);
4066
4067 return rval;
4068}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c5b3c610a32a..c71863ff5489 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -155,6 +155,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
155 uint32_t *cur_dsd; 155 uint32_t *cur_dsd;
156 scsi_qla_host_t *ha; 156 scsi_qla_host_t *ha;
157 struct scsi_cmnd *cmd; 157 struct scsi_cmnd *cmd;
158 struct scatterlist *sg;
159 int i;
158 160
159 cmd = sp->cmd; 161 cmd = sp->cmd;
160 162
@@ -163,7 +165,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
163 __constant_cpu_to_le32(COMMAND_TYPE); 165 __constant_cpu_to_le32(COMMAND_TYPE);
164 166
165 /* No data transfer */ 167 /* No data transfer */
166 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 168 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
167 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 169 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
168 return; 170 return;
169 } 171 }
@@ -177,35 +179,23 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
177 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 179 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
178 180
179 /* Load data segments */ 181 /* Load data segments */
180 if (cmd->use_sg != 0) { 182 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
181 struct scatterlist *cur_seg; 183 cont_entry_t *cont_pkt;
182 struct scatterlist *end_seg; 184
183 185 /* Allocate additional continuation packets? */
184 cur_seg = (struct scatterlist *)cmd->request_buffer; 186 if (avail_dsds == 0) {
185 end_seg = cur_seg + tot_dsds; 187 /*
186 while (cur_seg < end_seg) { 188 * Seven DSDs are available in the Continuation
187 cont_entry_t *cont_pkt; 189 * Type 0 IOCB.
188 190 */
189 /* Allocate additional continuation packets? */ 191 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
190 if (avail_dsds == 0) { 192 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
191 /* 193 avail_dsds = 7;
192 * Seven DSDs are available in the Continuation
193 * Type 0 IOCB.
194 */
195 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
196 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 avail_dsds = 7;
198 }
199
200 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
201 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
202 avail_dsds--;
203
204 cur_seg++;
205 } 194 }
206 } else { 195
207 *cur_dsd++ = cpu_to_le32(sp->dma_handle); 196 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
208 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 197 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
198 avail_dsds--;
209 } 199 }
210} 200}
211 201
@@ -224,6 +214,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
224 uint32_t *cur_dsd; 214 uint32_t *cur_dsd;
225 scsi_qla_host_t *ha; 215 scsi_qla_host_t *ha;
226 struct scsi_cmnd *cmd; 216 struct scsi_cmnd *cmd;
217 struct scatterlist *sg;
218 int i;
227 219
228 cmd = sp->cmd; 220 cmd = sp->cmd;
229 221
@@ -232,7 +224,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
232 __constant_cpu_to_le32(COMMAND_A64_TYPE); 224 __constant_cpu_to_le32(COMMAND_A64_TYPE);
233 225
234 /* No data transfer */ 226 /* No data transfer */
235 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 227 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
236 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 228 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
237 return; 229 return;
238 } 230 }
@@ -246,39 +238,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
246 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 238 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
247 239
248 /* Load data segments */ 240 /* Load data segments */
249 if (cmd->use_sg != 0) { 241 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
250 struct scatterlist *cur_seg; 242 dma_addr_t sle_dma;
251 struct scatterlist *end_seg; 243 cont_a64_entry_t *cont_pkt;
252 244
253 cur_seg = (struct scatterlist *)cmd->request_buffer; 245 /* Allocate additional continuation packets? */
254 end_seg = cur_seg + tot_dsds; 246 if (avail_dsds == 0) {
255 while (cur_seg < end_seg) { 247 /*
256 dma_addr_t sle_dma; 248 * Five DSDs are available in the Continuation
257 cont_a64_entry_t *cont_pkt; 249 * Type 1 IOCB.
258 250 */
259 /* Allocate additional continuation packets? */ 251 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
260 if (avail_dsds == 0) { 252 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
261 /* 253 avail_dsds = 5;
262 * Five DSDs are available in the Continuation
263 * Type 1 IOCB.
264 */
265 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
266 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
267 avail_dsds = 5;
268 }
269
270 sle_dma = sg_dma_address(cur_seg);
271 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
272 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
273 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
274 avail_dsds--;
275
276 cur_seg++;
277 } 254 }
278 } else { 255
279 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); 256 sle_dma = sg_dma_address(sg);
280 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); 257 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
281 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 258 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
259 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
260 avail_dsds--;
282 } 261 }
283} 262}
284 263
@@ -291,7 +270,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
291int 270int
292qla2x00_start_scsi(srb_t *sp) 271qla2x00_start_scsi(srb_t *sp)
293{ 272{
294 int ret; 273 int ret, nseg;
295 unsigned long flags; 274 unsigned long flags;
296 scsi_qla_host_t *ha; 275 scsi_qla_host_t *ha;
297 struct scsi_cmnd *cmd; 276 struct scsi_cmnd *cmd;
@@ -299,7 +278,6 @@ qla2x00_start_scsi(srb_t *sp)
299 uint32_t index; 278 uint32_t index;
300 uint32_t handle; 279 uint32_t handle;
301 cmd_entry_t *cmd_pkt; 280 cmd_entry_t *cmd_pkt;
302 struct scatterlist *sg;
303 uint16_t cnt; 281 uint16_t cnt;
304 uint16_t req_cnt; 282 uint16_t req_cnt;
305 uint16_t tot_dsds; 283 uint16_t tot_dsds;
@@ -337,23 +315,15 @@ qla2x00_start_scsi(srb_t *sp)
337 goto queuing_error; 315 goto queuing_error;
338 316
339 /* Map the sg table so we have an accurate count of sg entries needed */ 317 /* Map the sg table so we have an accurate count of sg entries needed */
340 if (cmd->use_sg) { 318 if (scsi_sg_count(cmd)) {
341 sg = (struct scatterlist *) cmd->request_buffer; 319 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
342 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 320 scsi_sg_count(cmd), cmd->sc_data_direction);
343 cmd->sc_data_direction); 321 if (unlikely(!nseg))
344 if (tot_dsds == 0)
345 goto queuing_error;
346 } else if (cmd->request_bufflen) {
347 dma_addr_t req_dma;
348
349 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
350 cmd->request_bufflen, cmd->sc_data_direction);
351 if (dma_mapping_error(req_dma))
352 goto queuing_error; 322 goto queuing_error;
323 } else
324 nseg = 0;
353 325
354 sp->dma_handle = req_dma; 326 tot_dsds = nseg;
355 tot_dsds = 1;
356 }
357 327
358 /* Calculate the number of request entries needed. */ 328 /* Calculate the number of request entries needed. */
359 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); 329 req_cnt = ha->isp_ops.calc_req_entries(tot_dsds);
@@ -391,7 +361,7 @@ qla2x00_start_scsi(srb_t *sp)
391 361
392 /* Load SCSI command packet. */ 362 /* Load SCSI command packet. */
393 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 363 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
394 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 364 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
395 365
396 /* Build IOCB segments */ 366 /* Build IOCB segments */
397 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); 367 ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds);
@@ -423,14 +393,9 @@ qla2x00_start_scsi(srb_t *sp)
423 return (QLA_SUCCESS); 393 return (QLA_SUCCESS);
424 394
425queuing_error: 395queuing_error:
426 if (cmd->use_sg && tot_dsds) { 396 if (tot_dsds)
427 sg = (struct scatterlist *) cmd->request_buffer; 397 scsi_dma_unmap(cmd);
428 pci_unmap_sg(ha->pdev, sg, cmd->use_sg, 398
429 cmd->sc_data_direction);
430 } else if (tot_dsds) {
431 pci_unmap_single(ha->pdev, sp->dma_handle,
432 cmd->request_bufflen, cmd->sc_data_direction);
433 }
434 spin_unlock_irqrestore(&ha->hardware_lock, flags); 399 spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 400
436 return (QLA_FUNCTION_FAILED); 401 return (QLA_FUNCTION_FAILED);
@@ -453,9 +418,10 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
453{ 418{
454 mrk_entry_t *mrk; 419 mrk_entry_t *mrk;
455 struct mrk_entry_24xx *mrk24; 420 struct mrk_entry_24xx *mrk24;
421 scsi_qla_host_t *pha = to_qla_parent(ha);
456 422
457 mrk24 = NULL; 423 mrk24 = NULL;
458 mrk = (mrk_entry_t *)qla2x00_req_pkt(ha); 424 mrk = (mrk_entry_t *)qla2x00_req_pkt(pha);
459 if (mrk == NULL) { 425 if (mrk == NULL) {
460 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", 426 DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
461 __func__, ha->host_no)); 427 __func__, ha->host_no));
@@ -472,6 +438,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
472 mrk24->lun[1] = LSB(lun); 438 mrk24->lun[1] = LSB(lun);
473 mrk24->lun[2] = MSB(lun); 439 mrk24->lun[2] = MSB(lun);
474 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); 440 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
441 mrk24->vp_index = ha->vp_idx;
475 } else { 442 } else {
476 SET_TARGET_ID(ha, mrk->target, loop_id); 443 SET_TARGET_ID(ha, mrk->target, loop_id);
477 mrk->lun = cpu_to_le16(lun); 444 mrk->lun = cpu_to_le16(lun);
@@ -479,7 +446,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
479 } 446 }
480 wmb(); 447 wmb();
481 448
482 qla2x00_isp_cmd(ha); 449 qla2x00_isp_cmd(pha);
483 450
484 return (QLA_SUCCESS); 451 return (QLA_SUCCESS);
485} 452}
@@ -642,6 +609,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
642 uint32_t *cur_dsd; 609 uint32_t *cur_dsd;
643 scsi_qla_host_t *ha; 610 scsi_qla_host_t *ha;
644 struct scsi_cmnd *cmd; 611 struct scsi_cmnd *cmd;
612 struct scatterlist *sg;
613 int i;
645 614
646 cmd = sp->cmd; 615 cmd = sp->cmd;
647 616
@@ -650,7 +619,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
650 __constant_cpu_to_le32(COMMAND_TYPE_7); 619 __constant_cpu_to_le32(COMMAND_TYPE_7);
651 620
652 /* No data transfer */ 621 /* No data transfer */
653 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 622 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
654 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 623 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
655 return; 624 return;
656 } 625 }
@@ -670,39 +639,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
670 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; 639 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
671 640
672 /* Load data segments */ 641 /* Load data segments */
673 if (cmd->use_sg != 0) { 642
674 struct scatterlist *cur_seg; 643 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
675 struct scatterlist *end_seg; 644 dma_addr_t sle_dma;
676 645 cont_a64_entry_t *cont_pkt;
677 cur_seg = (struct scatterlist *)cmd->request_buffer; 646
678 end_seg = cur_seg + tot_dsds; 647 /* Allocate additional continuation packets? */
679 while (cur_seg < end_seg) { 648 if (avail_dsds == 0) {
680 dma_addr_t sle_dma; 649 /*
681 cont_a64_entry_t *cont_pkt; 650 * Five DSDs are available in the Continuation
682 651 * Type 1 IOCB.
683 /* Allocate additional continuation packets? */ 652 */
684 if (avail_dsds == 0) { 653 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
685 /* 654 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
686 * Five DSDs are available in the Continuation 655 avail_dsds = 5;
687 * Type 1 IOCB.
688 */
689 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
690 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
691 avail_dsds = 5;
692 }
693
694 sle_dma = sg_dma_address(cur_seg);
695 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
696 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
697 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
698 avail_dsds--;
699
700 cur_seg++;
701 } 656 }
702 } else { 657
703 *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); 658 sle_dma = sg_dma_address(sg);
704 *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); 659 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
705 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); 660 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
662 avail_dsds--;
706 } 663 }
707} 664}
708 665
@@ -716,7 +673,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
716int 673int
717qla24xx_start_scsi(srb_t *sp) 674qla24xx_start_scsi(srb_t *sp)
718{ 675{
719 int ret; 676 int ret, nseg;
720 unsigned long flags; 677 unsigned long flags;
721 scsi_qla_host_t *ha; 678 scsi_qla_host_t *ha;
722 struct scsi_cmnd *cmd; 679 struct scsi_cmnd *cmd;
@@ -724,7 +681,6 @@ qla24xx_start_scsi(srb_t *sp)
724 uint32_t index; 681 uint32_t index;
725 uint32_t handle; 682 uint32_t handle;
726 struct cmd_type_7 *cmd_pkt; 683 struct cmd_type_7 *cmd_pkt;
727 struct scatterlist *sg;
728 uint16_t cnt; 684 uint16_t cnt;
729 uint16_t req_cnt; 685 uint16_t req_cnt;
730 uint16_t tot_dsds; 686 uint16_t tot_dsds;
@@ -762,23 +718,15 @@ qla24xx_start_scsi(srb_t *sp)
762 goto queuing_error; 718 goto queuing_error;
763 719
764 /* Map the sg table so we have an accurate count of sg entries needed */ 720 /* Map the sg table so we have an accurate count of sg entries needed */
765 if (cmd->use_sg) { 721 if (scsi_sg_count(cmd)) {
766 sg = (struct scatterlist *) cmd->request_buffer; 722 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
767 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 723 scsi_sg_count(cmd), cmd->sc_data_direction);
768 cmd->sc_data_direction); 724 if (unlikely(!nseg))
769 if (tot_dsds == 0)
770 goto queuing_error;
771 } else if (cmd->request_bufflen) {
772 dma_addr_t req_dma;
773
774 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
775 cmd->request_bufflen, cmd->sc_data_direction);
776 if (dma_mapping_error(req_dma))
777 goto queuing_error; 725 goto queuing_error;
726 } else
727 nseg = 0;
778 728
779 sp->dma_handle = req_dma; 729 tot_dsds = nseg;
780 tot_dsds = 1;
781 }
782 730
783 req_cnt = qla24xx_calc_iocbs(tot_dsds); 731 req_cnt = qla24xx_calc_iocbs(tot_dsds);
784 if (ha->req_q_cnt < (req_cnt + 2)) { 732 if (ha->req_q_cnt < (req_cnt + 2)) {
@@ -813,6 +761,7 @@ qla24xx_start_scsi(srb_t *sp)
813 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 761 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
814 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 762 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
815 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 763 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
764 cmd_pkt->vp_index = sp->fcport->vp_idx;
816 765
817 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 766 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
818 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 767 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -821,7 +770,7 @@ qla24xx_start_scsi(srb_t *sp)
821 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); 770 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
822 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); 771 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
823 772
824 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); 773 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
825 774
826 /* Build IOCB segments */ 775 /* Build IOCB segments */
827 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); 776 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
@@ -853,14 +802,9 @@ qla24xx_start_scsi(srb_t *sp)
853 return QLA_SUCCESS; 802 return QLA_SUCCESS;
854 803
855queuing_error: 804queuing_error:
856 if (cmd->use_sg && tot_dsds) { 805 if (tot_dsds)
857 sg = (struct scatterlist *) cmd->request_buffer; 806 scsi_dma_unmap(cmd);
858 pci_unmap_sg(ha->pdev, sg, cmd->use_sg, 807
859 cmd->sc_data_direction);
860 } else if (tot_dsds) {
861 pci_unmap_single(ha->pdev, sp->dma_handle,
862 cmd->request_bufflen, cmd->sc_data_direction);
863 }
864 spin_unlock_irqrestore(&ha->hardware_lock, flags); 808 spin_unlock_irqrestore(&ha->hardware_lock, flags);
865 809
866 return QLA_FUNCTION_FAILED; 810 return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ca463469063d..0ba4c8d37879 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,7 +9,6 @@
9#include <scsi/scsi_tcq.h> 9#include <scsi/scsi_tcq.h>
10 10
11static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 11static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
12static void qla2x00_async_event(scsi_qla_host_t *, uint16_t *);
13static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); 12static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14static void qla2x00_status_entry(scsi_qla_host_t *, void *); 13static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 14static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
@@ -244,7 +243,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
244 * @ha: SCSI driver HA context 243 * @ha: SCSI driver HA context
245 * @mb: Mailbox registers (0 - 3) 244 * @mb: Mailbox registers (0 - 3)
246 */ 245 */
247static void 246void
248qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 247qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
249{ 248{
250#define LS_UNKNOWN 2 249#define LS_UNKNOWN 2
@@ -386,6 +385,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
386 qla2x00_mark_all_devices_lost(ha, 1); 385 qla2x00_mark_all_devices_lost(ha, 1);
387 } 386 }
388 387
388 if (ha->parent) {
389 atomic_set(&ha->vp_state, VP_FAILED);
390 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
391 }
392
389 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 393 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
390 394
391 ha->flags.management_server_logged_in = 0; 395 ha->flags.management_server_logged_in = 0;
@@ -422,6 +426,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
422 qla2x00_mark_all_devices_lost(ha, 1); 426 qla2x00_mark_all_devices_lost(ha, 1);
423 } 427 }
424 428
429 if (ha->parent) {
430 atomic_set(&ha->vp_state, VP_FAILED);
431 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
432 }
433
425 ha->flags.management_server_logged_in = 0; 434 ha->flags.management_server_logged_in = 0;
426 ha->link_data_rate = PORT_SPEED_UNKNOWN; 435 ha->link_data_rate = PORT_SPEED_UNKNOWN;
427 if (ql2xfdmienable) 436 if (ql2xfdmienable)
@@ -440,6 +449,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
440 qla2x00_mark_all_devices_lost(ha, 1); 449 qla2x00_mark_all_devices_lost(ha, 1);
441 } 450 }
442 451
452 if (ha->parent) {
453 atomic_set(&ha->vp_state, VP_FAILED);
454 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
455 }
456
443 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 457 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
444 458
445 ha->operating_mode = LOOP; 459 ha->operating_mode = LOOP;
@@ -465,6 +479,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
465 qla2x00_mark_all_devices_lost(ha, 1); 479 qla2x00_mark_all_devices_lost(ha, 1);
466 } 480 }
467 481
482 if (ha->parent) {
483 atomic_set(&ha->vp_state, VP_FAILED);
484 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
485 }
486
468 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 487 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
469 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 488 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
470 } 489 }
@@ -491,6 +510,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
491 qla2x00_mark_all_devices_lost(ha, 1); 510 qla2x00_mark_all_devices_lost(ha, 1);
492 } 511 }
493 512
513 if (ha->parent) {
514 atomic_set(&ha->vp_state, VP_FAILED);
515 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
516 }
517
494 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 518 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
495 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 519 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
496 break; 520 break;
@@ -530,6 +554,10 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
530 break; 554 break;
531 555
532 case MBA_RSCN_UPDATE: /* State Change Registration */ 556 case MBA_RSCN_UPDATE: /* State Change Registration */
557 /* Check if the Vport has issued a SCR */
558 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
559 break;
560
533 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 561 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
534 ha->host_no)); 562 ha->host_no));
535 DEBUG(printk(KERN_INFO 563 DEBUG(printk(KERN_INFO
@@ -589,6 +617,9 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
589 ha->host_no, mb[1], mb[2])); 617 ha->host_no, mb[1], mb[2]));
590 break; 618 break;
591 } 619 }
620
621 if (!ha->parent && ha->num_vhosts)
622 qla2x00_alert_all_vps(ha, mb);
592} 623}
593 624
594static void 625static void
@@ -889,19 +920,19 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
889 } 920 }
890 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 921 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
891 resid = resid_len; 922 resid = resid_len;
892 cp->resid = resid; 923 scsi_set_resid(cp, resid);
893 CMD_RESID_LEN(cp) = resid; 924 CMD_RESID_LEN(cp) = resid;
894 925
895 if (!lscsi_status && 926 if (!lscsi_status &&
896 ((unsigned)(cp->request_bufflen - resid) < 927 ((unsigned)(scsi_bufflen(cp) - resid) <
897 cp->underflow)) { 928 cp->underflow)) {
898 qla_printk(KERN_INFO, ha, 929 qla_printk(KERN_INFO, ha,
899 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 930 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
900 "detected (%x of %x bytes)...returning " 931 "detected (%x of %x bytes)...returning "
901 "error status.\n", ha->host_no, 932 "error status.\n", ha->host_no,
902 cp->device->channel, cp->device->id, 933 cp->device->channel, cp->device->id,
903 cp->device->lun, resid, 934 cp->device->lun, resid,
904 cp->request_bufflen); 935 scsi_bufflen(cp));
905 936
906 cp->result = DID_ERROR << 16; 937 cp->result = DID_ERROR << 16;
907 break; 938 break;
@@ -963,7 +994,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
963 resid = fw_resid_len; 994 resid = fw_resid_len;
964 995
965 if (scsi_status & SS_RESIDUAL_UNDER) { 996 if (scsi_status & SS_RESIDUAL_UNDER) {
966 cp->resid = resid; 997 scsi_set_resid(cp, resid);
967 CMD_RESID_LEN(cp) = resid; 998 CMD_RESID_LEN(cp) = resid;
968 } else { 999 } else {
969 DEBUG2(printk(KERN_INFO 1000 DEBUG2(printk(KERN_INFO
@@ -1042,26 +1073,26 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1042 */ 1073 */
1043 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1074 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1044 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1075 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1045 "frame(s) detected (%x of %x bytes)..." 1076 "frame(s) detected (%x of %x bytes)..."
1046 "retrying command.\n", ha->host_no, 1077 "retrying command.\n", ha->host_no,
1047 cp->device->channel, cp->device->id, 1078 cp->device->channel, cp->device->id,
1048 cp->device->lun, resid, 1079 cp->device->lun, resid,
1049 cp->request_bufflen)); 1080 scsi_bufflen(cp)));
1050 1081
1051 cp->result = DID_BUS_BUSY << 16; 1082 cp->result = DID_BUS_BUSY << 16;
1052 break; 1083 break;
1053 } 1084 }
1054 1085
1055 /* Handle mid-layer underflow */ 1086 /* Handle mid-layer underflow */
1056 if ((unsigned)(cp->request_bufflen - resid) < 1087 if ((unsigned)(scsi_bufflen(cp) - resid) <
1057 cp->underflow) { 1088 cp->underflow) {
1058 qla_printk(KERN_INFO, ha, 1089 qla_printk(KERN_INFO, ha,
1059 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1090 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1060 "detected (%x of %x bytes)...returning " 1091 "detected (%x of %x bytes)...returning "
1061 "error status.\n", ha->host_no, 1092 "error status.\n", ha->host_no,
1062 cp->device->channel, cp->device->id, 1093 cp->device->channel, cp->device->id,
1063 cp->device->lun, resid, 1094 cp->device->lun, resid,
1064 cp->request_bufflen); 1095 scsi_bufflen(cp));
1065 1096
1066 cp->result = DID_ERROR << 16; 1097 cp->result = DID_ERROR << 16;
1067 break; 1098 break;
@@ -1084,7 +1115,7 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
1084 DEBUG2(printk(KERN_INFO 1115 DEBUG2(printk(KERN_INFO
1085 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR " 1116 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1086 "status!\n", 1117 "status!\n",
1087 cp->serial_number, cp->request_bufflen, resid_len)); 1118 cp->serial_number, scsi_bufflen(cp), resid_len));
1088 1119
1089 cp->result = DID_ERROR << 16; 1120 cp->result = DID_ERROR << 16;
1090 break; 1121 break;
@@ -1393,6 +1424,10 @@ qla24xx_process_response_queue(struct scsi_qla_host *ha)
1393 case MS_IOCB_TYPE: 1424 case MS_IOCB_TYPE:
1394 qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt); 1425 qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
1395 break; 1426 break;
1427 case VP_RPT_ID_IOCB_TYPE:
1428 qla24xx_report_id_acquisition(ha,
1429 (struct vp_rpt_id_entry_24xx *)pkt);
1430 break;
1396 default: 1431 default:
1397 /* Type Not Supported. */ 1432 /* Type Not Supported. */
1398 DEBUG4(printk(KERN_WARNING 1433 DEBUG4(printk(KERN_WARNING
@@ -1633,7 +1668,7 @@ struct qla_init_msix_entry {
1633 uint16_t entry; 1668 uint16_t entry;
1634 uint16_t index; 1669 uint16_t index;
1635 const char *name; 1670 const char *name;
1636 irqreturn_t (*handler)(int, void *); 1671 irq_handler_t handler;
1637}; 1672};
1638 1673
1639static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1674static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 71e32a248528..2cd0cff25928 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -42,25 +42,29 @@ qla2x00_mbx_sem_timeout(unsigned long data)
42 * Kernel context. 42 * Kernel context.
43 */ 43 */
44static int 44static int
45qla2x00_mailbox_command(scsi_qla_host_t *ha, mbx_cmd_t *mcp) 45qla2x00_mailbox_command(scsi_qla_host_t *pvha, mbx_cmd_t *mcp)
46{ 46{
47 int rval; 47 int rval;
48 unsigned long flags = 0; 48 unsigned long flags = 0;
49 device_reg_t __iomem *reg = ha->iobase; 49 device_reg_t __iomem *reg;
50 struct timer_list tmp_intr_timer; 50 struct timer_list tmp_intr_timer;
51 uint8_t abort_active; 51 uint8_t abort_active;
52 uint8_t io_lock_on = ha->flags.init_done; 52 uint8_t io_lock_on;
53 uint16_t command; 53 uint16_t command;
54 uint16_t *iptr; 54 uint16_t *iptr;
55 uint16_t __iomem *optr; 55 uint16_t __iomem *optr;
56 uint32_t cnt; 56 uint32_t cnt;
57 uint32_t mboxes; 57 uint32_t mboxes;
58 unsigned long wait_time; 58 unsigned long wait_time;
59 scsi_qla_host_t *ha = to_qla_parent(pvha);
60
61 reg = ha->iobase;
62 io_lock_on = ha->flags.init_done;
59 63
60 rval = QLA_SUCCESS; 64 rval = QLA_SUCCESS;
61 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 65 abort_active = test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
62 66
63 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 67 DEBUG11(printk("%s(%ld): entered.\n", __func__, pvha->host_no));
64 68
65 /* 69 /*
66 * Wait for active mailbox commands to finish by waiting at most tov 70 * Wait for active mailbox commands to finish by waiting at most tov
@@ -889,7 +893,7 @@ qla2x00_abort_target(fc_port_t *fcport)
889 */ 893 */
890int 894int
891qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa, 895qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
892 uint8_t *area, uint8_t *domain, uint16_t *top) 896 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
893{ 897{
894 int rval; 898 int rval;
895 mbx_cmd_t mc; 899 mbx_cmd_t mc;
@@ -899,8 +903,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
899 ha->host_no)); 903 ha->host_no));
900 904
901 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 905 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
906 mcp->mb[9] = ha->vp_idx;
902 mcp->out_mb = MBX_0; 907 mcp->out_mb = MBX_0;
903 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 908 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
904 mcp->tov = 30; 909 mcp->tov = 30;
905 mcp->flags = 0; 910 mcp->flags = 0;
906 rval = qla2x00_mailbox_command(ha, mcp); 911 rval = qla2x00_mailbox_command(ha, mcp);
@@ -913,6 +918,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
913 *area = MSB(mcp->mb[2]); 918 *area = MSB(mcp->mb[2]);
914 *domain = LSB(mcp->mb[3]); 919 *domain = LSB(mcp->mb[3]);
915 *top = mcp->mb[6]; 920 *top = mcp->mb[6];
921 *sw_cap = mcp->mb[7];
916 922
917 if (rval != QLA_SUCCESS) { 923 if (rval != QLA_SUCCESS) {
918 /*EMPTY*/ 924 /*EMPTY*/
@@ -1009,7 +1015,11 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
1009 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n", 1015 DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
1010 ha->host_no)); 1016 ha->host_no));
1011 1017
1012 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE; 1018 if (ha->flags.npiv_supported)
1019 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1020 else
1021 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1022
1013 mcp->mb[2] = MSW(ha->init_cb_dma); 1023 mcp->mb[2] = MSW(ha->init_cb_dma);
1014 mcp->mb[3] = LSW(ha->init_cb_dma); 1024 mcp->mb[3] = LSW(ha->init_cb_dma);
1015 mcp->mb[4] = 0; 1025 mcp->mb[4] = 0;
@@ -1081,7 +1091,8 @@ qla2x00_get_port_database(scsi_qla_host_t *ha, fc_port_t *fcport, uint8_t opt)
1081 mcp->mb[3] = LSW(pd_dma); 1091 mcp->mb[3] = LSW(pd_dma);
1082 mcp->mb[6] = MSW(MSD(pd_dma)); 1092 mcp->mb[6] = MSW(MSD(pd_dma));
1083 mcp->mb[7] = LSW(MSD(pd_dma)); 1093 mcp->mb[7] = LSW(MSD(pd_dma));
1084 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; 1094 mcp->mb[9] = ha->vp_idx;
1095 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1085 mcp->in_mb = MBX_0; 1096 mcp->in_mb = MBX_0;
1086 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { 1097 if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
1087 mcp->mb[1] = fcport->loop_id; 1098 mcp->mb[1] = fcport->loop_id;
@@ -1259,7 +1270,8 @@ qla2x00_get_port_name(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t *name,
1259 ha->host_no)); 1270 ha->host_no));
1260 1271
1261 mcp->mb[0] = MBC_GET_PORT_NAME; 1272 mcp->mb[0] = MBC_GET_PORT_NAME;
1262 mcp->out_mb = MBX_1|MBX_0; 1273 mcp->mb[9] = ha->vp_idx;
1274 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1263 if (HAS_EXTENDED_IDS(ha)) { 1275 if (HAS_EXTENDED_IDS(ha)) {
1264 mcp->mb[1] = loop_id; 1276 mcp->mb[1] = loop_id;
1265 mcp->mb[10] = opt; 1277 mcp->mb[10] = opt;
@@ -1447,6 +1459,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1447 lg->port_id[0] = al_pa; 1459 lg->port_id[0] = al_pa;
1448 lg->port_id[1] = area; 1460 lg->port_id[1] = area;
1449 lg->port_id[2] = domain; 1461 lg->port_id[2] = domain;
1462 lg->vp_index = cpu_to_le16(ha->vp_idx);
1450 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1463 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1451 if (rval != QLA_SUCCESS) { 1464 if (rval != QLA_SUCCESS) {
1452 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB " 1465 DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1701,6 +1714,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
1701 lg->port_id[0] = al_pa; 1714 lg->port_id[0] = al_pa;
1702 lg->port_id[1] = area; 1715 lg->port_id[1] = area;
1703 lg->port_id[2] = domain; 1716 lg->port_id[2] = domain;
1717 lg->vp_index = cpu_to_le16(ha->vp_idx);
1704 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0); 1718 rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
1705 if (rval != QLA_SUCCESS) { 1719 if (rval != QLA_SUCCESS) {
1706 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB " 1720 DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -1863,7 +1877,8 @@ qla2x00_get_id_list(scsi_qla_host_t *ha, void *id_list, dma_addr_t id_list_dma,
1863 mcp->mb[6] = MSW(MSD(id_list_dma)); 1877 mcp->mb[6] = MSW(MSD(id_list_dma));
1864 mcp->mb[7] = LSW(MSD(id_list_dma)); 1878 mcp->mb[7] = LSW(MSD(id_list_dma));
1865 mcp->mb[8] = 0; 1879 mcp->mb[8] = 0;
1866 mcp->out_mb |= MBX_8|MBX_7|MBX_6|MBX_3|MBX_2; 1880 mcp->mb[9] = ha->vp_idx;
1881 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
1867 } else { 1882 } else {
1868 mcp->mb[1] = MSW(id_list_dma); 1883 mcp->mb[1] = MSW(id_list_dma);
1869 mcp->mb[2] = LSW(id_list_dma); 1884 mcp->mb[2] = LSW(id_list_dma);
@@ -2212,6 +2227,7 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2212 abt->port_id[0] = fcport->d_id.b.al_pa; 2227 abt->port_id[0] = fcport->d_id.b.al_pa;
2213 abt->port_id[1] = fcport->d_id.b.area; 2228 abt->port_id[1] = fcport->d_id.b.area;
2214 abt->port_id[2] = fcport->d_id.b.domain; 2229 abt->port_id[2] = fcport->d_id.b.domain;
2230 abt->vp_index = fcport->vp_idx;
2215 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0); 2231 rval = qla2x00_issue_iocb(ha, abt, abt_dma, 0);
2216 if (rval != QLA_SUCCESS) { 2232 if (rval != QLA_SUCCESS) {
2217 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n", 2233 DEBUG2_3_11(printk("%s(%ld): failed to issue IOCB (%x).\n",
@@ -2249,7 +2265,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2249 int rval; 2265 int rval;
2250 struct tsk_mgmt_cmd *tsk; 2266 struct tsk_mgmt_cmd *tsk;
2251 dma_addr_t tsk_dma; 2267 dma_addr_t tsk_dma;
2252 scsi_qla_host_t *ha; 2268 scsi_qla_host_t *ha, *pha;
2253 2269
2254 if (fcport == NULL) 2270 if (fcport == NULL)
2255 return 0; 2271 return 0;
@@ -2257,7 +2273,8 @@ qla24xx_abort_target(fc_port_t *fcport)
2257 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no)); 2273 DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->ha->host_no));
2258 2274
2259 ha = fcport->ha; 2275 ha = fcport->ha;
2260 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); 2276 pha = to_qla_parent(ha);
2277 tsk = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2261 if (tsk == NULL) { 2278 if (tsk == NULL) {
2262 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " 2279 DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
2263 "IOCB.\n", __func__, ha->host_no)); 2280 "IOCB.\n", __func__, ha->host_no));
@@ -2273,6 +2290,8 @@ qla24xx_abort_target(fc_port_t *fcport)
2273 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 2290 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2274 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 2291 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2275 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 2292 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2293 tsk->p.tsk.vp_index = fcport->vp_idx;
2294
2276 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0); 2295 rval = qla2x00_issue_iocb(ha, tsk, tsk_dma, 0);
2277 if (rval != QLA_SUCCESS) { 2296 if (rval != QLA_SUCCESS) {
2278 DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB " 2297 DEBUG2_3_11(printk("%s(%ld): failed to issue Target Reset IOCB "
@@ -2303,7 +2322,7 @@ qla24xx_abort_target(fc_port_t *fcport)
2303 } 2322 }
2304 2323
2305atarget_done: 2324atarget_done:
2306 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2325 dma_pool_free(pha->s_dma_pool, tsk, tsk_dma);
2307 2326
2308 return rval; 2327 return rval;
2309} 2328}
@@ -2610,3 +2629,354 @@ qla2x00_set_idma_speed(scsi_qla_host_t *ha, uint16_t loop_id,
2610 2629
2611 return rval; 2630 return rval;
2612} 2631}
2632
2633/*
2634 * qla24xx_get_vp_database
2635 * Get the VP's database for all configured ports.
2636 *
2637 * Input:
2638 * ha = adapter block pointer.
2639 * size = size of initialization control block.
2640 *
2641 * Returns:
2642 * qla2x00 local function return status code.
2643 *
2644 * Context:
2645 * Kernel context.
2646 */
2647int
2648qla24xx_get_vp_database(scsi_qla_host_t *ha, uint16_t size)
2649{
2650 int rval;
2651 mbx_cmd_t mc;
2652 mbx_cmd_t *mcp = &mc;
2653
2654 DEBUG11(printk("scsi(%ld):%s - entered.\n",
2655 ha->host_no, __func__));
2656
2657 mcp->mb[0] = MBC_MID_GET_VP_DATABASE;
2658 mcp->mb[2] = MSW(ha->init_cb_dma);
2659 mcp->mb[3] = LSW(ha->init_cb_dma);
2660 mcp->mb[4] = 0;
2661 mcp->mb[5] = 0;
2662 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
2663 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
2664 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2665 mcp->in_mb = MBX_1|MBX_0;
2666 mcp->buf_size = size;
2667 mcp->flags = MBX_DMA_OUT;
2668 mcp->tov = MBX_TOV_SECONDS;
2669 rval = qla2x00_mailbox_command(ha, mcp);
2670
2671 if (rval != QLA_SUCCESS) {
2672 /*EMPTY*/
2673 DEBUG2_3_11(printk("%s(%ld): failed=%x "
2674 "mb0=%x.\n",
2675 __func__, ha->host_no, rval, mcp->mb[0]));
2676 } else {
2677 /*EMPTY*/
2678 DEBUG11(printk("%s(%ld): done.\n",
2679 __func__, ha->host_no));
2680 }
2681
2682 return rval;
2683}
2684
2685int
2686qla24xx_get_vp_entry(scsi_qla_host_t *ha, uint16_t size, int vp_id)
2687{
2688 int rval;
2689 mbx_cmd_t mc;
2690 mbx_cmd_t *mcp = &mc;
2691
2692 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2693
2694 mcp->mb[0] = MBC_MID_GET_VP_ENTRY;
2695 mcp->mb[2] = MSW(ha->init_cb_dma);
2696 mcp->mb[3] = LSW(ha->init_cb_dma);
2697 mcp->mb[4] = 0;
2698 mcp->mb[5] = 0;
2699 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
2700 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
2701 mcp->mb[9] = vp_id;
2702 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2703 mcp->in_mb = MBX_0;
2704 mcp->buf_size = size;
2705 mcp->flags = MBX_DMA_OUT;
2706 mcp->tov = 30;
2707 rval = qla2x00_mailbox_command(ha, mcp);
2708
2709 if (rval != QLA_SUCCESS) {
2710 /*EMPTY*/
2711 DEBUG2_3_11(printk("qla24xx_get_vp_entry(%ld): failed=%x "
2712 "mb0=%x.\n",
2713 ha->host_no, rval, mcp->mb[0]));
2714 } else {
2715 /*EMPTY*/
2716 DEBUG11(printk("qla24xx_get_vp_entry(%ld): done.\n",
2717 ha->host_no));
2718 }
2719
2720 return rval;
2721}
2722
2723void
2724qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
2725 struct vp_rpt_id_entry_24xx *rptid_entry)
2726{
2727 uint8_t vp_idx;
2728 scsi_qla_host_t *vha;
2729
2730 if (rptid_entry->entry_status != 0)
2731 return;
2732 if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
2733 return;
2734
2735 if (rptid_entry->format == 0) {
2736 DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
2737 " number of VPs acquired %d\n", __func__, ha->host_no,
2738 MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
2739 DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
2740 rptid_entry->port_id[2], rptid_entry->port_id[1],
2741 rptid_entry->port_id[0]));
2742 } else if (rptid_entry->format == 1) {
2743 vp_idx = LSB(rptid_entry->vp_idx);
2744 DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
2745 "- status %d - "
2746 "with port id %02x%02x%02x\n",__func__,ha->host_no,
2747 vp_idx, MSB(rptid_entry->vp_idx),
2748 rptid_entry->port_id[2], rptid_entry->port_id[1],
2749 rptid_entry->port_id[0]));
2750 if (vp_idx == 0)
2751 return;
2752
2753 if (MSB(rptid_entry->vp_idx) == 1)
2754 return;
2755
2756 list_for_each_entry(vha, &ha->vp_list, vp_list)
2757 if (vp_idx == vha->vp_idx)
2758 break;
2759
2760 if (!vha)
2761 return;
2762
2763 vha->d_id.b.domain = rptid_entry->port_id[2];
2764 vha->d_id.b.area = rptid_entry->port_id[1];
2765 vha->d_id.b.al_pa = rptid_entry->port_id[0];
2766
2767 /*
2768 * Cannot configure here as we are still sitting on the
2769 * response queue. Handle it in dpc context.
2770 */
2771 set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
2772 set_bit(VP_DPC_NEEDED, &ha->dpc_flags);
2773
2774 wake_up_process(ha->dpc_thread);
2775 }
2776}
2777
2778/*
2779 * qla24xx_modify_vp_config
2780 * Change VP configuration for vha
2781 *
2782 * Input:
2783 * vha = adapter block pointer.
2784 *
2785 * Returns:
2786 * qla2xxx local function return status code.
2787 *
2788 * Context:
2789 * Kernel context.
2790 */
2791int
2792qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2793{
2794 int rval;
2795 struct vp_config_entry_24xx *vpmod;
2796 dma_addr_t vpmod_dma;
2797 scsi_qla_host_t *pha;
2798
2799 /* This can be called by the parent */
2800 pha = to_qla_parent(vha);
2801
2802 vpmod = dma_pool_alloc(pha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2803 if (!vpmod) {
2804 DEBUG2_3(printk("%s(%ld): failed to allocate Modify VP "
2805 "IOCB.\n", __func__, pha->host_no));
2806 return QLA_MEMORY_ALLOC_FAILED;
2807 }
2808
2809 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
2810 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
2811 vpmod->entry_count = 1;
2812 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
2813 vpmod->vp_count = 1;
2814 vpmod->vp_index1 = vha->vp_idx;
2815 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
2816 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
2817 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2818 vpmod->entry_count = 1;
2819
2820 rval = qla2x00_issue_iocb(pha, vpmod, vpmod_dma, 0);
2821 if (rval != QLA_SUCCESS) {
2822 DEBUG2_3_11(printk("%s(%ld): failed to issue VP config IOCB"
2823 "(%x).\n", __func__, pha->host_no, rval));
2824 } else if (vpmod->comp_status != 0) {
2825 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2826 "-- error status (%x).\n", __func__, pha->host_no,
2827 vpmod->comp_status));
2828 rval = QLA_FUNCTION_FAILED;
2829 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2830 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2831 "-- completion status (%x).\n", __func__, pha->host_no,
2832 le16_to_cpu(vpmod->comp_status)));
2833 rval = QLA_FUNCTION_FAILED;
2834 } else {
2835 /* EMPTY */
2836 DEBUG11(printk("%s(%ld): done.\n", __func__, pha->host_no));
2837 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2838 }
2839 dma_pool_free(pha->s_dma_pool, vpmod, vpmod_dma);
2840
2841 return rval;
2842}
2843
2844/*
2845 * qla24xx_control_vp
2846 * Enable a virtual port for given host
2847 *
2848 * Input:
2849 * ha = adapter block pointer.
2850 * vhba = virtual adapter (unused)
2851 * index = index number for enabled VP
2852 *
2853 * Returns:
2854 * qla2xxx local function return status code.
2855 *
2856 * Context:
2857 * Kernel context.
2858 */
2859int
2860qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2861{
2862 int rval;
2863 int map, pos;
2864 struct vp_ctrl_entry_24xx *vce;
2865 dma_addr_t vce_dma;
2866 scsi_qla_host_t *ha = vha->parent;
2867 int vp_index = vha->vp_idx;
2868
2869 DEBUG11(printk("%s(%ld): entered. Enabling index %d\n", __func__,
2870 ha->host_no, vp_index));
2871
2872 if (vp_index == 0 || vp_index >= MAX_MULTI_ID_LOOP)
2873 return QLA_PARAMETER_ERROR;
2874
2875 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
2876 if (!vce) {
2877 DEBUG2_3(printk("%s(%ld): "
2878 "failed to allocate VP Control IOCB.\n", __func__,
2879 ha->host_no));
2880 return QLA_MEMORY_ALLOC_FAILED;
2881 }
2882 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
2883
2884 vce->entry_type = VP_CTRL_IOCB_TYPE;
2885 vce->entry_count = 1;
2886 vce->command = cpu_to_le16(cmd);
2887 vce->vp_count = __constant_cpu_to_le16(1);
2888
2889 /* index map in firmware starts with 1; decrement index
2890 * this is ok as we never use index 0
2891 */
2892 map = (vp_index - 1) / 8;
2893 pos = (vp_index - 1) & 7;
2894 down(&ha->vport_sem);
2895 vce->vp_idx_map[map] |= 1 << pos;
2896 up(&ha->vport_sem);
2897
2898 rval = qla2x00_issue_iocb(ha, vce, vce_dma, 0);
2899 if (rval != QLA_SUCCESS) {
2900 DEBUG2_3_11(printk("%s(%ld): failed to issue VP control IOCB"
2901 "(%x).\n", __func__, ha->host_no, rval));
2902 printk("%s(%ld): failed to issue VP control IOCB"
2903 "(%x).\n", __func__, ha->host_no, rval);
2904 } else if (vce->entry_status != 0) {
2905 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2906 "-- error status (%x).\n", __func__, ha->host_no,
2907 vce->entry_status));
2908 printk("%s(%ld): failed to complete IOCB "
2909 "-- error status (%x).\n", __func__, ha->host_no,
2910 vce->entry_status);
2911 rval = QLA_FUNCTION_FAILED;
2912 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2913 DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
2914 "-- completion status (%x).\n", __func__, ha->host_no,
2915 le16_to_cpu(vce->comp_status)));
2916 printk("%s(%ld): failed to complete IOCB "
2917 "-- completion status (%x).\n", __func__, ha->host_no,
2918 le16_to_cpu(vce->comp_status));
2919 rval = QLA_FUNCTION_FAILED;
2920 } else {
2921 DEBUG2(printk("%s(%ld): done.\n", __func__, ha->host_no));
2922 }
2923
2924 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
2925
2926 return rval;
2927}
2928
2929/*
2930 * qla2x00_send_change_request
2931 * Receive or disable RSCN request from fabric controller
2932 *
2933 * Input:
2934 * ha = adapter block pointer
2935 * format = registration format:
2936 * 0 - Reserved
2937 * 1 - Fabric detected registration
2938 * 2 - N_port detected registration
2939 * 3 - Full registration
2940 * FF - clear registration
2941 * vp_idx = Virtual port index
2942 *
2943 * Returns:
2944 * qla2x00 local function return status code.
2945 *
2946 * Context:
2947 * Kernel Context
2948 */
2949
2950int
2951qla2x00_send_change_request(scsi_qla_host_t *ha, uint16_t format,
2952 uint16_t vp_idx)
2953{
2954 int rval;
2955 mbx_cmd_t mc;
2956 mbx_cmd_t *mcp = &mc;
2957
2958 /*
2959 * This command is implicitly executed by firmware during login for the
2960 * physical hosts
2961 */
2962 if (vp_idx == 0)
2963 return QLA_FUNCTION_FAILED;
2964
2965 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
2966 mcp->mb[1] = format;
2967 mcp->mb[9] = vp_idx;
2968 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2969 mcp->in_mb = MBX_0|MBX_1;
2970 mcp->tov = MBX_TOV_SECONDS;
2971 mcp->flags = 0;
2972 rval = qla2x00_mailbox_command(ha, mcp);
2973
2974 if (rval == QLA_SUCCESS) {
2975 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2976 rval = BIT_1;
2977 }
2978 } else
2979 rval = BIT_1;
2980
2981 return rval;
2982}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
new file mode 100644
index 000000000000..54dc415d8b53
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -0,0 +1,497 @@
1/*
2 * QLOGIC LINUX SOFTWARE
3 *
4 * QLogic ISP2x00 device driver for Linux 2.6.x
5 * Copyright (C) 2003-2005 QLogic Corporation
6 * (www.qlogic.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
11 * later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 */
19#include "qla_def.h"
20
21#include <linux/version.h>
22#include <linux/moduleparam.h>
23#include <linux/vmalloc.h>
24#include <linux/smp_lock.h>
25#include <linux/list.h>
26
27#include <scsi/scsi_tcq.h>
28#include <scsi/scsicam.h>
29#include <linux/delay.h>
30
31void qla2x00_vp_stop_timer(scsi_qla_host_t *);
32
33void
34qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
35{
36 if (vha->parent && vha->timer_active) {
37 del_timer_sync(&vha->timer);
38 vha->timer_active = 0;
39 }
40}
41
42uint32_t
43qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
44{
45 uint32_t vp_id;
46 scsi_qla_host_t *ha = vha->parent;
47
48 /* Find an empty slot and assign an vp_id */
49 down(&ha->vport_sem);
50 vp_id = find_first_zero_bit((unsigned long *)ha->vp_idx_map,
51 MAX_MULTI_ID_FABRIC);
52 if (vp_id > MAX_MULTI_ID_FABRIC) {
53 DEBUG15(printk ("vp_id %d is bigger than MAX_MULTI_ID_FABRID\n",
54 vp_id));
55 up(&ha->vport_sem);
56 return vp_id;
57 }
58
59 set_bit(vp_id, (unsigned long *)ha->vp_idx_map);
60 ha->num_vhosts++;
61 vha->vp_idx = vp_id;
62 list_add_tail(&vha->vp_list, &ha->vp_list);
63 up(&ha->vport_sem);
64 return vp_id;
65}
66
67void
68qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
69{
70 uint16_t vp_id;
71 scsi_qla_host_t *ha = vha->parent;
72
73 down(&ha->vport_sem);
74 vp_id = vha->vp_idx;
75 ha->num_vhosts--;
76 clear_bit(vp_id, (unsigned long *)ha->vp_idx_map);
77 list_del(&vha->vp_list);
78 up(&ha->vport_sem);
79}
80
81scsi_qla_host_t *
82qla24xx_find_vhost_by_name(scsi_qla_host_t *ha, uint8_t *port_name)
83{
84 scsi_qla_host_t *vha;
85
86 /* Locate matching device in database. */
87 list_for_each_entry(vha, &ha->vp_list, vp_list) {
88 if (!memcmp(port_name, vha->port_name, WWN_SIZE))
89 return vha;
90 }
91 return NULL;
92}
93
94/*
95 * qla2x00_mark_vp_devices_dead
96 * Updates fcport state when device goes offline.
97 *
98 * Input:
99 * ha = adapter block pointer.
100 * fcport = port structure pointer.
101 *
102 * Return:
103 * None.
104 *
105 * Context:
106 */
107void
108qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
109{
110 fc_port_t *fcport;
111 scsi_qla_host_t *pha = to_qla_parent(vha);
112
113 list_for_each_entry(fcport, &pha->fcports, list) {
114 if (fcport->vp_idx != vha->vp_idx)
115 continue;
116
117 DEBUG15(printk("scsi(%ld): Marking port dead, "
118 "loop_id=0x%04x :%x\n",
119 vha->host_no, fcport->loop_id, fcport->vp_idx));
120
121 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
122 qla2x00_mark_device_lost(vha, fcport, 0, 0);
123 }
124}
125
126int
127qla24xx_disable_vp(scsi_qla_host_t *vha)
128{
129 int ret;
130
131 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
132 atomic_set(&vha->loop_state, LOOP_DOWN);
133 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
134
135 /* Delete all vp's fcports from parent's list */
136 qla2x00_mark_vp_devices_dead(vha);
137 atomic_set(&vha->vp_state, VP_FAILED);
138 vha->flags.management_server_logged_in = 0;
139 if (ret == QLA_SUCCESS) {
140 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
141 } else {
142 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
143 return -1;
144 }
145 return 0;
146}
147
148int
149qla24xx_enable_vp(scsi_qla_host_t *vha)
150{
151 int ret;
152 scsi_qla_host_t *ha = vha->parent;
153
154 /* Check if physical ha port is Up */
155 if (atomic_read(&ha->loop_state) == LOOP_DOWN ||
156 atomic_read(&ha->loop_state) == LOOP_DEAD ) {
157 vha->vp_err_state = VP_ERR_PORTDWN;
158 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
159 goto enable_failed;
160 }
161
162 /* Initialize the new vport unless it is a persistent port */
163 down(&ha->vport_sem);
164 ret = qla24xx_modify_vp_config(vha);
165 up(&ha->vport_sem);
166
167 if (ret != QLA_SUCCESS) {
168 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
169 goto enable_failed;
170 }
171
172 DEBUG15(qla_printk(KERN_INFO, ha,
173 "Virtual port with id: %d - Enabled\n", vha->vp_idx));
174 return 0;
175
176enable_failed:
177 DEBUG15(qla_printk(KERN_INFO, ha,
178 "Virtual port with id: %d - Disabled\n", vha->vp_idx));
179 return 1;
180}
181
182/**
183 * qla24xx_modify_vport() - Modifies the virtual fabric port's configuration
184 * @ha: HA context
185 * @vp: pointer to buffer of virtual port parameters.
186 * @ret_code: return error code:
187 *
188 * Returns the virtual port id, or MAX_VSAN_ID, if couldn't create.
189 */
190uint32_t
191qla24xx_modify_vhba(scsi_qla_host_t *ha, vport_params_t *vp, uint32_t *vp_id)
192{
193 scsi_qla_host_t *vha;
194
195 vha = qla24xx_find_vhost_by_name(ha, vp->port_name);
196 if (!vha) {
197 *vp_id = MAX_NUM_VPORT_LOOP;
198 return VP_RET_CODE_WWPN;
199 }
200
201 if (qla24xx_enable_vp(vha)) {
202 scsi_host_put(vha->host);
203 qla2x00_mem_free(vha);
204 *vp_id = MAX_NUM_VPORT_LOOP;
205 return VP_RET_CODE_RESOURCES;
206 }
207
208 *vp_id = vha->vp_idx;
209 return VP_RET_CODE_OK;
210}
211
212void
213qla24xx_configure_vp(scsi_qla_host_t *vha)
214{
215 struct fc_vport *fc_vport;
216 int ret;
217
218 fc_vport = vha->fc_vport;
219
220 DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
221 vha->host_no, __func__));
222 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
223 if (ret != QLA_SUCCESS) {
224 DEBUG15(qla_printk(KERN_ERR, vha, "Failed to enable receiving"
225 " of RSCN requests: 0x%x\n", ret));
226 return;
227 } else {
228 /* Corresponds to SCR enabled */
229 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
230 }
231
232 vha->flags.online = 1;
233 if (qla24xx_configure_vhba(vha))
234 return;
235
236 atomic_set(&vha->vp_state, VP_ACTIVE);
237 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
238}
239
240void
241qla2x00_alert_all_vps(scsi_qla_host_t *ha, uint16_t *mb)
242{
243 int i, vp_idx_matched;
244 scsi_qla_host_t *vha;
245
246 if (ha->parent)
247 return;
248
249 i = find_next_bit((unsigned long *)ha->vp_idx_map,
250 MAX_MULTI_ID_FABRIC + 1, 1);
251 for (;i <= MAX_MULTI_ID_FABRIC;
252 i = find_next_bit((unsigned long *)ha->vp_idx_map,
253 MAX_MULTI_ID_FABRIC + 1, i + 1)) {
254 vp_idx_matched = 0;
255
256 list_for_each_entry(vha, &ha->vp_list, vp_list) {
257 if (i == vha->vp_idx) {
258 vp_idx_matched = 1;
259 break;
260 }
261 }
262
263 if (vp_idx_matched) {
264 switch (mb[0]) {
265 case MBA_LIP_OCCURRED:
266 case MBA_LOOP_UP:
267 case MBA_LOOP_DOWN:
268 case MBA_LIP_RESET:
269 case MBA_POINT_TO_POINT:
270 case MBA_CHG_IN_CONNECTION:
271 case MBA_PORT_UPDATE:
272 case MBA_RSCN_UPDATE:
273 DEBUG15(printk("scsi(%ld)%s: Async_event for"
274 " VP[%d], mb = 0x%x, vha=%p\n",
275 vha->host_no, __func__,i, *mb, vha));
276 qla2x00_async_event(vha, mb);
277 break;
278 }
279 }
280 }
281}
282
283void
284qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
285{
286 /*
287 * Physical port will do most of the abort and recovery work. We can
288 * just treat it as a loop down
289 */
290 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
291 atomic_set(&vha->loop_state, LOOP_DOWN);
292 qla2x00_mark_all_devices_lost(vha, 0);
293 } else {
294 if (!atomic_read(&vha->loop_down_timer))
295 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
296 }
297
298 DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
299 vha->host_no, vha->vp_idx));
300 qla24xx_enable_vp(vha);
301}
302
303int
304qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
305{
306 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
307 /* VP acquired. complete port configuration */
308 qla24xx_configure_vp(vha);
309 return 0;
310 }
311
312 if (test_and_clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
313 qla2x00_vp_abort_isp(vha);
314
315 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
316 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
317 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
318 }
319
320 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
321 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
322 qla2x00_loop_resync(vha);
323 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
324 }
325 }
326
327 return 0;
328}
329
330void
331qla2x00_do_dpc_all_vps(scsi_qla_host_t *ha)
332{
333 int ret;
334 int i, vp_idx_matched;
335 scsi_qla_host_t *vha;
336
337 if (ha->parent)
338 return;
339 if (list_empty(&ha->vp_list))
340 return;
341
342 clear_bit(VP_DPC_NEEDED, &ha->dpc_flags);
343
344 i = find_next_bit((unsigned long *)ha->vp_idx_map,
345 MAX_MULTI_ID_FABRIC + 1, 1);
346 for (;i <= MAX_MULTI_ID_FABRIC;
347 i = find_next_bit((unsigned long *)ha->vp_idx_map,
348 MAX_MULTI_ID_FABRIC + 1, i + 1)) {
349 vp_idx_matched = 0;
350
351 list_for_each_entry(vha, &ha->vp_list, vp_list) {
352 if (i == vha->vp_idx) {
353 vp_idx_matched = 1;
354 break;
355 }
356 }
357
358 if (vp_idx_matched)
359 ret = qla2x00_do_dpc_vp(vha);
360 }
361}
362
363int
364qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
365{
366 scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
367 scsi_qla_host_t *vha;
368 uint8_t port_name[WWN_SIZE];
369
370 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
371 return VPCERR_UNSUPPORTED;
372
373 /* Check up the F/W and H/W support NPIV */
374 if (!ha->flags.npiv_supported)
375 return VPCERR_UNSUPPORTED;
376
377 /* Check up whether npiv supported switch presented */
378 if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
379 return VPCERR_NO_FABRIC_SUPP;
380
381 /* Check up unique WWPN */
382 u64_to_wwn(fc_vport->port_name, port_name);
383 vha = qla24xx_find_vhost_by_name(ha, port_name);
384 if (vha)
385 return VPCERR_BAD_WWN;
386
387 /* Check up max-npiv-supports */
388 if (ha->num_vhosts > ha->max_npiv_vports) {
389 DEBUG15(printk("scsi(%ld): num_vhosts %d is bigger than "
390 "max_npv_vports %d.\n", ha->host_no,
391 (uint16_t) ha->num_vhosts, (int) ha->max_npiv_vports));
392 return VPCERR_UNSUPPORTED;
393 }
394 return 0;
395}
396
397scsi_qla_host_t *
398qla24xx_create_vhost(struct fc_vport *fc_vport)
399{
400 scsi_qla_host_t *ha = (scsi_qla_host_t *) fc_vport->shost->hostdata;
401 scsi_qla_host_t *vha;
402 struct Scsi_Host *host;
403
404 host = scsi_host_alloc(&qla24xx_driver_template,
405 sizeof(scsi_qla_host_t));
406 if (!host) {
407 printk(KERN_WARNING
408 "qla2xxx: scsi_host_alloc() failed for vport\n");
409 return(NULL);
410 }
411
412 vha = (scsi_qla_host_t *)host->hostdata;
413
414 /* clone the parent hba */
415 memcpy(vha, ha, sizeof (scsi_qla_host_t));
416
417 fc_vport->dd_data = vha;
418
419 vha->node_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
420 if (!vha->node_name)
421 goto create_vhost_failed_1;
422
423 vha->port_name = kmalloc(WWN_SIZE * sizeof(char), GFP_KERNEL);
424 if (!vha->port_name)
425 goto create_vhost_failed_2;
426
427 /* New host info */
428 u64_to_wwn(fc_vport->node_name, vha->node_name);
429 u64_to_wwn(fc_vport->port_name, vha->port_name);
430
431 vha->host = host;
432 vha->host_no = host->host_no;
433 vha->parent = ha;
434 vha->fc_vport = fc_vport;
435 vha->device_flags = 0;
436 vha->instance = num_hosts;
437 vha->vp_idx = qla24xx_allocate_vp_id(vha);
438 if (vha->vp_idx > ha->max_npiv_vports) {
439 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
440 vha->host_no));
441 goto create_vhost_failed_3;
442 }
443 vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
444
445 init_MUTEX(&vha->mbx_cmd_sem);
446 init_MUTEX_LOCKED(&vha->mbx_intr_sem);
447
448 INIT_LIST_HEAD(&vha->list);
449 INIT_LIST_HEAD(&vha->fcports);
450 INIT_LIST_HEAD(&vha->vp_fcports);
451
452 vha->dpc_flags = 0L;
453 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
454 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
455
456 /*
457 * To fix the issue of processing a parent's RSCN for the vport before
458 * its SCR is complete.
459 */
460 set_bit(VP_SCR_NEEDED, &vha->vp_flags);
461 atomic_set(&vha->loop_state, LOOP_DOWN);
462 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
463
464 qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
465
466 host->can_queue = vha->request_q_length + 128;
467 host->this_id = 255;
468 host->cmd_per_lun = 3;
469 host->max_cmd_len = MAX_CMDSZ;
470 host->max_channel = MAX_BUSES - 1;
471 host->max_lun = MAX_LUNS;
472 host->unique_id = vha->instance;
473 host->max_id = MAX_TARGETS_2200;
474 host->transportt = qla2xxx_transport_vport_template;
475
476 DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
477 vha->host_no, vha));
478
479 vha->flags.init_done = 1;
480 num_hosts++;
481
482 down(&ha->vport_sem);
483 set_bit(vha->vp_idx, (unsigned long *)ha->vp_idx_map);
484 ha->cur_vport_count++;
485 up(&ha->vport_sem);
486
487 return vha;
488
489create_vhost_failed_3:
490 kfree(vha->port_name);
491
492create_vhost_failed_2:
493 kfree(vha->node_name);
494
495create_vhost_failed_1:
496 return NULL;
497}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b98136adaaae..b5a77b0c0deb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -29,8 +29,7 @@ static struct kmem_cache *srb_cachep;
29/* 29/*
30 * Ioctl related information. 30 * Ioctl related information.
31 */ 31 */
32static int num_hosts; 32int num_hosts;
33
34int ql2xlogintimeout = 20; 33int ql2xlogintimeout = 20;
35module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 34module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
36MODULE_PARM_DESC(ql2xlogintimeout, 35MODULE_PARM_DESC(ql2xlogintimeout,
@@ -112,7 +111,7 @@ static int qla2x00_device_reset(scsi_qla_host_t *, fc_port_t *);
112static int qla2x00_change_queue_depth(struct scsi_device *, int); 111static int qla2x00_change_queue_depth(struct scsi_device *, int);
113static int qla2x00_change_queue_type(struct scsi_device *, int); 112static int qla2x00_change_queue_type(struct scsi_device *, int);
114 113
115static struct scsi_host_template qla2x00_driver_template = { 114struct scsi_host_template qla2x00_driver_template = {
116 .module = THIS_MODULE, 115 .module = THIS_MODULE,
117 .name = QLA2XXX_DRIVER_NAME, 116 .name = QLA2XXX_DRIVER_NAME,
118 .queuecommand = qla2x00_queuecommand, 117 .queuecommand = qla2x00_queuecommand,
@@ -143,7 +142,7 @@ static struct scsi_host_template qla2x00_driver_template = {
143 .shost_attrs = qla2x00_host_attrs, 142 .shost_attrs = qla2x00_host_attrs,
144}; 143};
145 144
146static struct scsi_host_template qla24xx_driver_template = { 145struct scsi_host_template qla24xx_driver_template = {
147 .module = THIS_MODULE, 146 .module = THIS_MODULE,
148 .name = QLA2XXX_DRIVER_NAME, 147 .name = QLA2XXX_DRIVER_NAME,
149 .queuecommand = qla24xx_queuecommand, 148 .queuecommand = qla24xx_queuecommand,
@@ -171,21 +170,21 @@ static struct scsi_host_template qla24xx_driver_template = {
171}; 170};
172 171
173static struct scsi_transport_template *qla2xxx_transport_template = NULL; 172static struct scsi_transport_template *qla2xxx_transport_template = NULL;
173struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
174 174
175/* TODO Convert to inlines 175/* TODO Convert to inlines
176 * 176 *
177 * Timer routines 177 * Timer routines
178 */ 178 */
179#define WATCH_INTERVAL 1 /* number of seconds */
180 179
181static void qla2x00_timer(scsi_qla_host_t *); 180void qla2x00_timer(scsi_qla_host_t *);
182 181
183static __inline__ void qla2x00_start_timer(scsi_qla_host_t *, 182__inline__ void qla2x00_start_timer(scsi_qla_host_t *,
184 void *, unsigned long); 183 void *, unsigned long);
185static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long); 184static __inline__ void qla2x00_restart_timer(scsi_qla_host_t *, unsigned long);
186static __inline__ void qla2x00_stop_timer(scsi_qla_host_t *); 185__inline__ void qla2x00_stop_timer(scsi_qla_host_t *);
187 186
188static inline void 187__inline__ void
189qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval) 188qla2x00_start_timer(scsi_qla_host_t *ha, void *func, unsigned long interval)
190{ 189{
191 init_timer(&ha->timer); 190 init_timer(&ha->timer);
@@ -202,7 +201,7 @@ qla2x00_restart_timer(scsi_qla_host_t *ha, unsigned long interval)
202 mod_timer(&ha->timer, jiffies + interval * HZ); 201 mod_timer(&ha->timer, jiffies + interval * HZ);
203} 202}
204 203
205static __inline__ void 204__inline__ void
206qla2x00_stop_timer(scsi_qla_host_t *ha) 205qla2x00_stop_timer(scsi_qla_host_t *ha)
207{ 206{
208 del_timer_sync(&ha->timer); 207 del_timer_sync(&ha->timer);
@@ -213,8 +212,8 @@ static int qla2x00_do_dpc(void *data);
213 212
214static void qla2x00_rst_aen(scsi_qla_host_t *); 213static void qla2x00_rst_aen(scsi_qla_host_t *);
215 214
216static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *); 215uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
217static void qla2x00_mem_free(scsi_qla_host_t *ha); 216void qla2x00_mem_free(scsi_qla_host_t *ha);
218static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha); 217static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
219static void qla2x00_free_sp_pool(scsi_qla_host_t *ha); 218static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
220static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *); 219static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
@@ -438,6 +437,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
438 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); 437 struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
439 srb_t *sp; 438 srb_t *sp;
440 int rval; 439 int rval;
440 scsi_qla_host_t *pha = to_qla_parent(ha);
441 441
442 rval = fc_remote_port_chkready(rport); 442 rval = fc_remote_port_chkready(rport);
443 if (rval) { 443 if (rval) {
@@ -453,7 +453,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
453 453
454 if (atomic_read(&fcport->state) != FCS_ONLINE) { 454 if (atomic_read(&fcport->state) != FCS_ONLINE) {
455 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 455 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
456 atomic_read(&ha->loop_state) == LOOP_DEAD) { 456 atomic_read(&pha->loop_state) == LOOP_DEAD) {
457 cmd->result = DID_NO_CONNECT << 16; 457 cmd->result = DID_NO_CONNECT << 16;
458 goto qc24_fail_command; 458 goto qc24_fail_command;
459 } 459 }
@@ -462,7 +462,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
462 462
463 spin_unlock_irq(ha->host->host_lock); 463 spin_unlock_irq(ha->host->host_lock);
464 464
465 sp = qla2x00_get_new_sp(ha, fcport, cmd, done); 465 sp = qla2x00_get_new_sp(pha, fcport, cmd, done);
466 if (!sp) 466 if (!sp)
467 goto qc24_host_busy_lock; 467 goto qc24_host_busy_lock;
468 468
@@ -475,8 +475,8 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
475 return 0; 475 return 0;
476 476
477qc24_host_busy_free_sp: 477qc24_host_busy_free_sp:
478 qla2x00_sp_free_dma(ha, sp); 478 qla2x00_sp_free_dma(pha, sp);
479 mempool_free(sp, ha->srb_mempool); 479 mempool_free(sp, pha->srb_mempool);
480 480
481qc24_host_busy_lock: 481qc24_host_busy_lock:
482 spin_lock_irq(ha->host->host_lock); 482 spin_lock_irq(ha->host->host_lock);
@@ -548,16 +548,17 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *ha)
548{ 548{
549 int return_status; 549 int return_status;
550 unsigned long wait_online; 550 unsigned long wait_online;
551 scsi_qla_host_t *pha = to_qla_parent(ha);
551 552
552 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); 553 wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
553 while (((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags)) || 554 while (((test_bit(ISP_ABORT_NEEDED, &pha->dpc_flags)) ||
554 test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags) || 555 test_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags) ||
555 test_bit(ISP_ABORT_RETRY, &ha->dpc_flags) || 556 test_bit(ISP_ABORT_RETRY, &pha->dpc_flags) ||
556 ha->dpc_active) && time_before(jiffies, wait_online)) { 557 pha->dpc_active) && time_before(jiffies, wait_online)) {
557 558
558 msleep(1000); 559 msleep(1000);
559 } 560 }
560 if (ha->flags.online) 561 if (pha->flags.online)
561 return_status = QLA_SUCCESS; 562 return_status = QLA_SUCCESS;
562 else 563 else
563 return_status = QLA_FUNCTION_FAILED; 564 return_status = QLA_FUNCTION_FAILED;
@@ -588,14 +589,15 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
588{ 589{
589 int return_status = QLA_SUCCESS; 590 int return_status = QLA_SUCCESS;
590 unsigned long loop_timeout ; 591 unsigned long loop_timeout ;
592 scsi_qla_host_t *pha = to_qla_parent(ha);
591 593
592 /* wait for 5 min at the max for loop to be ready */ 594 /* wait for 5 min at the max for loop to be ready */
593 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ); 595 loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
594 596
595 while ((!atomic_read(&ha->loop_down_timer) && 597 while ((!atomic_read(&pha->loop_down_timer) &&
596 atomic_read(&ha->loop_state) == LOOP_DOWN) || 598 atomic_read(&pha->loop_state) == LOOP_DOWN) ||
597 atomic_read(&ha->loop_state) != LOOP_READY) { 599 atomic_read(&pha->loop_state) != LOOP_READY) {
598 if (atomic_read(&ha->loop_state) == LOOP_DEAD) { 600 if (atomic_read(&pha->loop_state) == LOOP_DEAD) {
599 return_status = QLA_FUNCTION_FAILED; 601 return_status = QLA_FUNCTION_FAILED;
600 break; 602 break;
601 } 603 }
@@ -650,6 +652,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
650 unsigned long serial; 652 unsigned long serial;
651 unsigned long flags; 653 unsigned long flags;
652 int wait = 0; 654 int wait = 0;
655 scsi_qla_host_t *pha = to_qla_parent(ha);
653 656
654 qla2x00_block_error_handler(cmd); 657 qla2x00_block_error_handler(cmd);
655 658
@@ -663,9 +666,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
663 serial = cmd->serial_number; 666 serial = cmd->serial_number;
664 667
665 /* Check active list for command command. */ 668 /* Check active list for command command. */
666 spin_lock_irqsave(&ha->hardware_lock, flags); 669 spin_lock_irqsave(&pha->hardware_lock, flags);
667 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { 670 for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
668 sp = ha->outstanding_cmds[i]; 671 sp = pha->outstanding_cmds[i];
669 672
670 if (sp == NULL) 673 if (sp == NULL)
671 continue; 674 continue;
@@ -677,7 +680,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
677 __func__, ha->host_no, sp, serial)); 680 __func__, ha->host_no, sp, serial));
678 DEBUG3(qla2x00_print_scsi_cmd(cmd)); 681 DEBUG3(qla2x00_print_scsi_cmd(cmd));
679 682
680 spin_unlock_irqrestore(&ha->hardware_lock, flags); 683 spin_unlock_irqrestore(&pha->hardware_lock, flags);
681 if (ha->isp_ops.abort_command(ha, sp)) { 684 if (ha->isp_ops.abort_command(ha, sp)) {
682 DEBUG2(printk("%s(%ld): abort_command " 685 DEBUG2(printk("%s(%ld): abort_command "
683 "mbx failed.\n", __func__, ha->host_no)); 686 "mbx failed.\n", __func__, ha->host_no));
@@ -686,11 +689,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
686 "mbx success.\n", __func__, ha->host_no)); 689 "mbx success.\n", __func__, ha->host_no));
687 wait = 1; 690 wait = 1;
688 } 691 }
689 spin_lock_irqsave(&ha->hardware_lock, flags); 692 spin_lock_irqsave(&pha->hardware_lock, flags);
690 693
691 break; 694 break;
692 } 695 }
693 spin_unlock_irqrestore(&ha->hardware_lock, flags); 696 spin_unlock_irqrestore(&pha->hardware_lock, flags);
694 697
695 /* Wait for the command to be returned. */ 698 /* Wait for the command to be returned. */
696 if (wait) { 699 if (wait) {
@@ -731,6 +734,7 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
731 srb_t *sp; 734 srb_t *sp;
732 struct scsi_cmnd *cmd; 735 struct scsi_cmnd *cmd;
733 unsigned long flags; 736 unsigned long flags;
737 scsi_qla_host_t *pha = to_qla_parent(ha);
734 738
735 status = 0; 739 status = 0;
736 740
@@ -739,19 +743,20 @@ qla2x00_eh_wait_for_pending_target_commands(scsi_qla_host_t *ha, unsigned int t)
739 * array 743 * array
740 */ 744 */
741 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 745 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
742 spin_lock_irqsave(&ha->hardware_lock, flags); 746 spin_lock_irqsave(&pha->hardware_lock, flags);
743 sp = ha->outstanding_cmds[cnt]; 747 sp = pha->outstanding_cmds[cnt];
744 if (sp) { 748 if (sp) {
745 cmd = sp->cmd; 749 cmd = sp->cmd;
746 spin_unlock_irqrestore(&ha->hardware_lock, flags); 750 spin_unlock_irqrestore(&pha->hardware_lock, flags);
747 if (cmd->device->id == t) { 751 if (cmd->device->id == t &&
752 ha->vp_idx == sp->ha->vp_idx) {
748 if (!qla2x00_eh_wait_on_command(ha, cmd)) { 753 if (!qla2x00_eh_wait_on_command(ha, cmd)) {
749 status = 1; 754 status = 1;
750 break; 755 break;
751 } 756 }
752 } 757 }
753 } else { 758 } else {
754 spin_unlock_irqrestore(&ha->hardware_lock, flags); 759 spin_unlock_irqrestore(&pha->hardware_lock, flags);
755 } 760 }
756 } 761 }
757 return (status); 762 return (status);
@@ -782,14 +787,12 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
782{ 787{
783 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 788 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
784 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 789 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
785 int ret; 790 int ret = FAILED;
786 unsigned int id, lun; 791 unsigned int id, lun;
787 unsigned long serial; 792 unsigned long serial;
788 793
789 qla2x00_block_error_handler(cmd); 794 qla2x00_block_error_handler(cmd);
790 795
791 ret = FAILED;
792
793 id = cmd->device->id; 796 id = cmd->device->id;
794 lun = cmd->device->lun; 797 lun = cmd->device->lun;
795 serial = cmd->serial_number; 798 serial = cmd->serial_number;
@@ -912,15 +915,14 @@ static int
912qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) 915qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
913{ 916{
914 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 917 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
918 scsi_qla_host_t *pha = to_qla_parent(ha);
915 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 919 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
916 int ret; 920 int ret = FAILED;
917 unsigned int id, lun; 921 unsigned int id, lun;
918 unsigned long serial; 922 unsigned long serial;
919 923
920 qla2x00_block_error_handler(cmd); 924 qla2x00_block_error_handler(cmd);
921 925
922 ret = FAILED;
923
924 id = cmd->device->id; 926 id = cmd->device->id;
925 lun = cmd->device->lun; 927 lun = cmd->device->lun;
926 serial = cmd->serial_number; 928 serial = cmd->serial_number;
@@ -944,7 +946,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
944 goto eh_bus_reset_done; 946 goto eh_bus_reset_done;
945 947
946 /* Flush outstanding commands. */ 948 /* Flush outstanding commands. */
947 if (!qla2x00_eh_wait_for_pending_commands(ha)) 949 if (!qla2x00_eh_wait_for_pending_commands(pha))
948 ret = FAILED; 950 ret = FAILED;
949 951
950eh_bus_reset_done: 952eh_bus_reset_done:
@@ -974,14 +976,13 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
974{ 976{
975 scsi_qla_host_t *ha = to_qla_host(cmd->device->host); 977 scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
976 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; 978 fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
977 int ret; 979 int ret = FAILED;
978 unsigned int id, lun; 980 unsigned int id, lun;
979 unsigned long serial; 981 unsigned long serial;
982 scsi_qla_host_t *pha = to_qla_parent(ha);
980 983
981 qla2x00_block_error_handler(cmd); 984 qla2x00_block_error_handler(cmd);
982 985
983 ret = FAILED;
984
985 id = cmd->device->id; 986 id = cmd->device->id;
986 lun = cmd->device->lun; 987 lun = cmd->device->lun;
987 serial = cmd->serial_number; 988 serial = cmd->serial_number;
@@ -1004,21 +1005,24 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1004 * while dpc is stuck for the mailbox to complete. 1005 * while dpc is stuck for the mailbox to complete.
1005 */ 1006 */
1006 qla2x00_wait_for_loop_ready(ha); 1007 qla2x00_wait_for_loop_ready(ha);
1007 set_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 1008 set_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
1008 if (qla2x00_abort_isp(ha)) { 1009 if (qla2x00_abort_isp(pha)) {
1009 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 1010 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
1010 /* failed. schedule dpc to try */ 1011 /* failed. schedule dpc to try */
1011 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1012 set_bit(ISP_ABORT_NEEDED, &pha->dpc_flags);
1012 1013
1013 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS) 1014 if (qla2x00_wait_for_hba_online(ha) != QLA_SUCCESS)
1014 goto eh_host_reset_lock; 1015 goto eh_host_reset_lock;
1015 } 1016 }
1016 clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags); 1017 clear_bit(ABORT_ISP_ACTIVE, &pha->dpc_flags);
1017 1018
1018 /* Waiting for our command in done_queue to be returned to OS.*/ 1019 /* Waiting for our command in done_queue to be returned to OS.*/
1019 if (qla2x00_eh_wait_for_pending_commands(ha)) 1020 if (qla2x00_eh_wait_for_pending_commands(pha))
1020 ret = SUCCESS; 1021 ret = SUCCESS;
1021 1022
1023 if (ha->parent)
1024 qla2x00_vp_abort_isp(ha);
1025
1022eh_host_reset_lock: 1026eh_host_reset_lock:
1023 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__, 1027 qla_printk(KERN_INFO, ha, "%s: reset %s\n", __func__,
1024 (ret == FAILED) ? "failed" : "succeded"); 1028 (ret == FAILED) ? "failed" : "succeded");
@@ -1435,6 +1439,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1435 ha->host = host; 1439 ha->host = host;
1436 ha->host_no = host->host_no; 1440 ha->host_no = host->host_no;
1437 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no); 1441 sprintf(ha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, ha->host_no);
1442 ha->parent = NULL;
1438 1443
1439 /* Set ISP-type information. */ 1444 /* Set ISP-type information. */
1440 qla2x00_set_isp_flags(ha); 1445 qla2x00_set_isp_flags(ha);
@@ -1452,7 +1457,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1452 1457
1453 ha->prev_topology = 0; 1458 ha->prev_topology = 0;
1454 ha->init_cb_size = sizeof(init_cb_t); 1459 ha->init_cb_size = sizeof(init_cb_t);
1455 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER; 1460 ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
1456 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1461 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1457 ha->optrom_size = OPTROM_SIZE_2300; 1462 ha->optrom_size = OPTROM_SIZE_2300;
1458 1463
@@ -1524,8 +1529,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1524 ha->request_q_length = REQUEST_ENTRY_CNT_24XX; 1529 ha->request_q_length = REQUEST_ENTRY_CNT_24XX;
1525 ha->response_q_length = RESPONSE_ENTRY_CNT_2300; 1530 ha->response_q_length = RESPONSE_ENTRY_CNT_2300;
1526 ha->last_loop_id = SNS_LAST_LOOP_ID_2300; 1531 ha->last_loop_id = SNS_LAST_LOOP_ID_2300;
1527 ha->init_cb_size = sizeof(struct init_cb_24xx); 1532 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
1528 ha->mgmt_svr_loop_id = 10; 1533 ha->mgmt_svr_loop_id = 10 + ha->vp_idx;
1529 ha->isp_ops.pci_config = qla24xx_pci_config; 1534 ha->isp_ops.pci_config = qla24xx_pci_config;
1530 ha->isp_ops.reset_chip = qla24xx_reset_chip; 1535 ha->isp_ops.reset_chip = qla24xx_reset_chip;
1531 ha->isp_ops.chip_diag = qla24xx_chip_diag; 1536 ha->isp_ops.chip_diag = qla24xx_chip_diag;
@@ -1563,10 +1568,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1563 ha->instance = num_hosts; 1568 ha->instance = num_hosts;
1564 1569
1565 init_MUTEX(&ha->mbx_cmd_sem); 1570 init_MUTEX(&ha->mbx_cmd_sem);
1571 init_MUTEX(&ha->vport_sem);
1566 init_MUTEX_LOCKED(&ha->mbx_intr_sem); 1572 init_MUTEX_LOCKED(&ha->mbx_intr_sem);
1567 1573
1568 INIT_LIST_HEAD(&ha->list); 1574 INIT_LIST_HEAD(&ha->list);
1569 INIT_LIST_HEAD(&ha->fcports); 1575 INIT_LIST_HEAD(&ha->fcports);
1576 INIT_LIST_HEAD(&ha->vp_list);
1577
1578 set_bit(0, (unsigned long *) ha->vp_idx_map);
1570 1579
1571 qla2x00_config_dma_addressing(ha); 1580 qla2x00_config_dma_addressing(ha);
1572 if (qla2x00_mem_alloc(ha)) { 1581 if (qla2x00_mem_alloc(ha)) {
@@ -1789,7 +1798,8 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1789void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport, 1798void qla2x00_mark_device_lost(scsi_qla_host_t *ha, fc_port_t *fcport,
1790 int do_login, int defer) 1799 int do_login, int defer)
1791{ 1800{
1792 if (atomic_read(&fcport->state) == FCS_ONLINE) 1801 if (atomic_read(&fcport->state) == FCS_ONLINE &&
1802 ha->vp_idx == fcport->vp_idx)
1793 qla2x00_schedule_rport_del(ha, fcport, defer); 1803 qla2x00_schedule_rport_del(ha, fcport, defer);
1794 1804
1795 /* 1805 /*
@@ -1840,19 +1850,23 @@ void
1840qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer) 1850qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1841{ 1851{
1842 fc_port_t *fcport; 1852 fc_port_t *fcport;
1853 scsi_qla_host_t *pha = to_qla_parent(ha);
1843 1854
1844 list_for_each_entry(fcport, &ha->fcports, list) { 1855 list_for_each_entry(fcport, &pha->fcports, list) {
1845 if (fcport->port_type != FCT_TARGET) 1856 if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx)
1846 continue; 1857 continue;
1847
1848 /* 1858 /*
1849 * No point in marking the device as lost, if the device is 1859 * No point in marking the device as lost, if the device is
1850 * already DEAD. 1860 * already DEAD.
1851 */ 1861 */
1852 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1862 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1853 continue; 1863 continue;
1854 if (atomic_read(&fcport->state) == FCS_ONLINE) 1864 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1855 qla2x00_schedule_rport_del(ha, fcport, defer); 1865 if (defer)
1866 qla2x00_schedule_rport_del(ha, fcport, defer);
1867 else if (ha->vp_idx == fcport->vp_idx)
1868 qla2x00_schedule_rport_del(ha, fcport, defer);
1869 }
1856 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1870 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1857 } 1871 }
1858 1872
@@ -1868,7 +1882,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1868* 0 = success. 1882* 0 = success.
1869* 1 = failure. 1883* 1 = failure.
1870*/ 1884*/
1871static uint8_t 1885uint8_t
1872qla2x00_mem_alloc(scsi_qla_host_t *ha) 1886qla2x00_mem_alloc(scsi_qla_host_t *ha)
1873{ 1887{
1874 char name[16]; 1888 char name[16];
@@ -1920,33 +1934,33 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
1920 continue; 1934 continue;
1921 } 1935 }
1922 1936
1923 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME, 1937 /* get consistent memory allocated for init control block */
1924 ha->host_no); 1938 ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
1925 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev, 1939 ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
1926 DMA_POOL_SIZE, 8, 0); 1940 if (ha->init_cb == NULL) {
1927 if (ha->s_dma_pool == NULL) {
1928 qla_printk(KERN_WARNING, ha, 1941 qla_printk(KERN_WARNING, ha,
1929 "Memory Allocation failed - s_dma_pool\n"); 1942 "Memory Allocation failed - init_cb\n");
1930 1943
1931 qla2x00_mem_free(ha); 1944 qla2x00_mem_free(ha);
1932 msleep(100); 1945 msleep(100);
1933 1946
1934 continue; 1947 continue;
1935 } 1948 }
1949 memset(ha->init_cb, 0, ha->init_cb_size);
1936 1950
1937 /* get consistent memory allocated for init control block */ 1951 snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
1938 ha->init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 1952 ha->host_no);
1939 &ha->init_cb_dma); 1953 ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
1940 if (ha->init_cb == NULL) { 1954 DMA_POOL_SIZE, 8, 0);
1955 if (ha->s_dma_pool == NULL) {
1941 qla_printk(KERN_WARNING, ha, 1956 qla_printk(KERN_WARNING, ha,
1942 "Memory Allocation failed - init_cb\n"); 1957 "Memory Allocation failed - s_dma_pool\n");
1943 1958
1944 qla2x00_mem_free(ha); 1959 qla2x00_mem_free(ha);
1945 msleep(100); 1960 msleep(100);
1946 1961
1947 continue; 1962 continue;
1948 } 1963 }
1949 memset(ha->init_cb, 0, ha->init_cb_size);
1950 1964
1951 if (qla2x00_allocate_sp_pool(ha)) { 1965 if (qla2x00_allocate_sp_pool(ha)) {
1952 qla_printk(KERN_WARNING, ha, 1966 qla_printk(KERN_WARNING, ha,
@@ -2052,7 +2066,7 @@ qla2x00_mem_alloc(scsi_qla_host_t *ha)
2052* Input: 2066* Input:
2053* ha = adapter block pointer. 2067* ha = adapter block pointer.
2054*/ 2068*/
2055static void 2069void
2056qla2x00_mem_free(scsi_qla_host_t *ha) 2070qla2x00_mem_free(scsi_qla_host_t *ha)
2057{ 2071{
2058 struct list_head *fcpl, *fcptemp; 2072 struct list_head *fcpl, *fcptemp;
@@ -2088,12 +2102,13 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
2088 if (ha->ms_iocb) 2102 if (ha->ms_iocb)
2089 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 2103 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
2090 2104
2091 if (ha->init_cb)
2092 dma_pool_free(ha->s_dma_pool, ha->init_cb, ha->init_cb_dma);
2093
2094 if (ha->s_dma_pool) 2105 if (ha->s_dma_pool)
2095 dma_pool_destroy(ha->s_dma_pool); 2106 dma_pool_destroy(ha->s_dma_pool);
2096 2107
2108 if (ha->init_cb)
2109 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
2110 ha->init_cb, ha->init_cb_dma);
2111
2097 if (ha->gid_list) 2112 if (ha->gid_list)
2098 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 2113 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
2099 ha->gid_list_dma); 2114 ha->gid_list_dma);
@@ -2199,6 +2214,7 @@ qla2x00_free_sp_pool( scsi_qla_host_t *ha)
2199static int 2214static int
2200qla2x00_do_dpc(void *data) 2215qla2x00_do_dpc(void *data)
2201{ 2216{
2217 int rval;
2202 scsi_qla_host_t *ha; 2218 scsi_qla_host_t *ha;
2203 fc_port_t *fcport; 2219 fc_port_t *fcport;
2204 uint8_t status; 2220 uint8_t status;
@@ -2347,7 +2363,7 @@ qla2x00_do_dpc(void *data)
2347 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 2363 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
2348 &ha->dpc_flags))) { 2364 &ha->dpc_flags))) {
2349 2365
2350 qla2x00_loop_resync(ha); 2366 rval = qla2x00_loop_resync(ha);
2351 2367
2352 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); 2368 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
2353 } 2369 }
@@ -2374,6 +2390,8 @@ qla2x00_do_dpc(void *data)
2374 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags)) 2390 if (test_and_clear_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags))
2375 ha->isp_ops.beacon_blink(ha); 2391 ha->isp_ops.beacon_blink(ha);
2376 2392
2393 qla2x00_do_dpc_all_vps(ha);
2394
2377 ha->dpc_active = 0; 2395 ha->dpc_active = 0;
2378 } /* End of while(1) */ 2396 } /* End of while(1) */
2379 2397
@@ -2426,13 +2444,7 @@ qla2x00_sp_free_dma(scsi_qla_host_t *ha, srb_t *sp)
2426 struct scsi_cmnd *cmd = sp->cmd; 2444 struct scsi_cmnd *cmd = sp->cmd;
2427 2445
2428 if (sp->flags & SRB_DMA_VALID) { 2446 if (sp->flags & SRB_DMA_VALID) {
2429 if (cmd->use_sg) { 2447 scsi_dma_unmap(cmd);
2430 dma_unmap_sg(&ha->pdev->dev, cmd->request_buffer,
2431 cmd->use_sg, cmd->sc_data_direction);
2432 } else if (cmd->request_bufflen) {
2433 dma_unmap_single(&ha->pdev->dev, sp->dma_handle,
2434 cmd->request_bufflen, cmd->sc_data_direction);
2435 }
2436 sp->flags &= ~SRB_DMA_VALID; 2448 sp->flags &= ~SRB_DMA_VALID;
2437 } 2449 }
2438 CMD_SP(cmd) = NULL; 2450 CMD_SP(cmd) = NULL;
@@ -2458,7 +2470,7 @@ qla2x00_sp_compl(scsi_qla_host_t *ha, srb_t *sp)
2458* 2470*
2459* Context: Interrupt 2471* Context: Interrupt
2460***************************************************************************/ 2472***************************************************************************/
2461static void 2473void
2462qla2x00_timer(scsi_qla_host_t *ha) 2474qla2x00_timer(scsi_qla_host_t *ha)
2463{ 2475{
2464 unsigned long cpu_flags = 0; 2476 unsigned long cpu_flags = 0;
@@ -2467,6 +2479,7 @@ qla2x00_timer(scsi_qla_host_t *ha)
2467 int index; 2479 int index;
2468 srb_t *sp; 2480 srb_t *sp;
2469 int t; 2481 int t;
2482 scsi_qla_host_t *pha = to_qla_parent(ha);
2470 2483
2471 /* 2484 /*
2472 * Ports - Port down timer. 2485 * Ports - Port down timer.
@@ -2512,23 +2525,29 @@ qla2x00_timer(scsi_qla_host_t *ha)
2512 atomic_set(&ha->loop_state, LOOP_DEAD); 2525 atomic_set(&ha->loop_state, LOOP_DEAD);
2513 2526
2514 /* Schedule an ISP abort to return any tape commands. */ 2527 /* Schedule an ISP abort to return any tape commands. */
2515 spin_lock_irqsave(&ha->hardware_lock, cpu_flags); 2528 /* NPIV - scan physical port only */
2516 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; 2529 if (!ha->parent) {
2517 index++) { 2530 spin_lock_irqsave(&ha->hardware_lock,
2518 fc_port_t *sfcp; 2531 cpu_flags);
2532 for (index = 1;
2533 index < MAX_OUTSTANDING_COMMANDS;
2534 index++) {
2535 fc_port_t *sfcp;
2536
2537 sp = ha->outstanding_cmds[index];
2538 if (!sp)
2539 continue;
2540 sfcp = sp->fcport;
2541 if (!(sfcp->flags & FCF_TAPE_PRESENT))
2542 continue;
2519 2543
2520 sp = ha->outstanding_cmds[index]; 2544 set_bit(ISP_ABORT_NEEDED,
2521 if (!sp) 2545 &ha->dpc_flags);
2522 continue; 2546 break;
2523 sfcp = sp->fcport; 2547 }
2524 if (!(sfcp->flags & FCF_TAPE_PRESENT)) 2548 spin_unlock_irqrestore(&ha->hardware_lock,
2525 continue; 2549 cpu_flags);
2526
2527 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
2528 break;
2529 } 2550 }
2530 spin_unlock_irqrestore(&ha->hardware_lock, cpu_flags);
2531
2532 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags); 2551 set_bit(ABORT_QUEUES_NEEDED, &ha->dpc_flags);
2533 start_dpc++; 2552 start_dpc++;
2534 } 2553 }
@@ -2572,8 +2591,9 @@ qla2x00_timer(scsi_qla_host_t *ha)
2572 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) || 2591 test_bit(LOGIN_RETRY_NEEDED, &ha->dpc_flags) ||
2573 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || 2592 test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
2574 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) || 2593 test_bit(BEACON_BLINK_NEEDED, &ha->dpc_flags) ||
2594 test_bit(VP_DPC_NEEDED, &ha->dpc_flags) ||
2575 test_bit(RELOGIN_NEEDED, &ha->dpc_flags))) 2595 test_bit(RELOGIN_NEEDED, &ha->dpc_flags)))
2576 qla2xxx_wake_dpc(ha); 2596 qla2xxx_wake_dpc(pha);
2577 2597
2578 qla2x00_restart_timer(ha, WATCH_INTERVAL); 2598 qla2x00_restart_timer(ha, WATCH_INTERVAL);
2579} 2599}
@@ -2717,14 +2737,24 @@ qla2x00_module_init(void)
2717 2737
2718 qla2xxx_transport_template = 2738 qla2xxx_transport_template =
2719 fc_attach_transport(&qla2xxx_transport_functions); 2739 fc_attach_transport(&qla2xxx_transport_functions);
2720 if (!qla2xxx_transport_template) 2740 if (!qla2xxx_transport_template) {
2741 kmem_cache_destroy(srb_cachep);
2721 return -ENODEV; 2742 return -ENODEV;
2743 }
2744 qla2xxx_transport_vport_template =
2745 fc_attach_transport(&qla2xxx_transport_vport_functions);
2746 if (!qla2xxx_transport_vport_template) {
2747 kmem_cache_destroy(srb_cachep);
2748 fc_release_transport(qla2xxx_transport_template);
2749 return -ENODEV;
2750 }
2722 2751
2723 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n"); 2752 printk(KERN_INFO "QLogic Fibre Channel HBA Driver\n");
2724 ret = pci_register_driver(&qla2xxx_pci_driver); 2753 ret = pci_register_driver(&qla2xxx_pci_driver);
2725 if (ret) { 2754 if (ret) {
2726 kmem_cache_destroy(srb_cachep); 2755 kmem_cache_destroy(srb_cachep);
2727 fc_release_transport(qla2xxx_transport_template); 2756 fc_release_transport(qla2xxx_transport_template);
2757 fc_release_transport(qla2xxx_transport_vport_template);
2728 } 2758 }
2729 return ret; 2759 return ret;
2730} 2760}
@@ -2739,6 +2769,7 @@ qla2x00_module_exit(void)
2739 qla2x00_release_firmware(); 2769 qla2x00_release_firmware();
2740 kmem_cache_destroy(srb_cachep); 2770 kmem_cache_destroy(srb_cachep);
2741 fc_release_transport(qla2xxx_transport_template); 2771 fc_release_transport(qla2xxx_transport_template);
2772 fc_release_transport(qla2xxx_transport_vport_template);
2742} 2773}
2743 2774
2744module_init(qla2x00_module_init); 2775module_init(qla2x00_module_init);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c375a4efbc71..fd2f10a25348 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.01.07-k7" 10#define QLA2XXX_VERSION "8.02.00-k1"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 1 13#define QLA_DRIVER_MINOR_VER 2
14#define QLA_DRIVER_PATCH_VER 7 14#define QLA_DRIVER_PATCH_VER 0
15#define QLA_DRIVER_BETA_VER 0 15#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 6437d024b0dd..fcc184cd066d 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -6,176 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include <scsi/scsi_dbg.h> 9#include "ql4_glbl.h"
10 10#include "ql4_dbg.h"
11#if 0 11#include "ql4_inline.h"
12
13static void qla4xxx_print_srb_info(struct srb * srb)
14{
15 printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
16 printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
17 __func__, srb->cmd, (unsigned long) srb->dma_handle);
18 printk("%s: fw_ddb_index = %d, lun = %d\n",
19 __func__, srb->fw_ddb_index, srb->cmd->device->lun);
20 printk("%s: iocb_tov = %d\n",
21 __func__, srb->iocb_tov);
22 printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
23 __func__, srb->cc_stat, srb->r_start, srb->u_start);
24}
25
26void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
27{
28 printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
29 printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
30 cmd->device->channel, cmd->device->id, cmd->device->lun,
31 cmd->cmd_len);
32 scsi_print_command(cmd);
33 printk(" seg_cnt = %d\n", cmd->use_sg);
34 printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
35 cmd->request_buffer, cmd->request_bufflen);
36 if (cmd->use_sg) {
37 struct scatterlist *sg;
38 sg = (struct scatterlist *)cmd->request_buffer;
39 printk(" SG buffer: \n");
40 qla4xxx_dump_buffer((caddr_t) sg,
41 (cmd->use_sg * sizeof(*sg)));
42 }
43 printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
44 cmd->transfersize);
45 printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
46 printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
47 cmd->sc_data_direction);
48 printk(" Current time (jiffies) = 0x%lx, "
49 "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
50 qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
51}
52
53void __dump_registers(struct scsi_qla_host *ha)
54{
55 uint8_t i;
56 for (i = 0; i < MBOX_REG_COUNT; i++) {
57 printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
58 (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
59 readw(&ha->reg->mailbox[i]));
60 }
61 printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
62 (uint8_t) offsetof(struct isp_reg, flash_address),
63 readw(&ha->reg->flash_address));
64 printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
65 (uint8_t) offsetof(struct isp_reg, flash_data),
66 readw(&ha->reg->flash_data));
67 printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
68 (uint8_t) offsetof(struct isp_reg, ctrl_status),
69 readw(&ha->reg->ctrl_status));
70 if (is_qla4010(ha)) {
71 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
72 (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
73 readw(&ha->reg->u1.isp4010.nvram));
74 }
75
76 else if (is_qla4022(ha) | is_qla4032(ha)) {
77 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
78 (uint8_t) offsetof(struct isp_reg,
79 u1.isp4022.intr_mask),
80 readw(&ha->reg->u1.isp4022.intr_mask));
81 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
82 (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
83 readw(&ha->reg->u1.isp4022.nvram));
84 printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
85 (uint8_t) offsetof(struct isp_reg,
86 u1.isp4022.semaphore),
87 readw(&ha->reg->u1.isp4022.semaphore));
88 }
89 printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
90 (uint8_t) offsetof(struct isp_reg, req_q_in),
91 readw(&ha->reg->req_q_in));
92 printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
93 (uint8_t) offsetof(struct isp_reg, rsp_q_out),
94 readw(&ha->reg->rsp_q_out));
95 if (is_qla4010(ha)) {
96 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
97 (uint8_t) offsetof(struct isp_reg,
98 u2.isp4010.ext_hw_conf),
99 readw(&ha->reg->u2.isp4010.ext_hw_conf));
100 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
101 (uint8_t) offsetof(struct isp_reg,
102 u2.isp4010.port_ctrl),
103 readw(&ha->reg->u2.isp4010.port_ctrl));
104 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
105 (uint8_t) offsetof(struct isp_reg,
106 u2.isp4010.port_status),
107 readw(&ha->reg->u2.isp4010.port_status));
108 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
109 (uint8_t) offsetof(struct isp_reg,
110 u2.isp4010.req_q_out),
111 readw(&ha->reg->u2.isp4010.req_q_out));
112 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
113 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
114 readw(&ha->reg->u2.isp4010.gp_out));
115 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
116 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
117 readw(&ha->reg->u2.isp4010.gp_in));
118 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
119 (uint8_t) offsetof(struct isp_reg,
120 u2.isp4010.port_err_status),
121 readw(&ha->reg->u2.isp4010.port_err_status));
122 }
123
124 else if (is_qla4022(ha) | is_qla4032(ha)) {
125 printk(KERN_INFO "Page 0 Registers:\n");
126 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
127 (uint8_t) offsetof(struct isp_reg,
128 u2.isp4022.p0.ext_hw_conf),
129 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
130 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
131 (uint8_t) offsetof(struct isp_reg,
132 u2.isp4022.p0.port_ctrl),
133 readw(&ha->reg->u2.isp4022.p0.port_ctrl));
134 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
135 (uint8_t) offsetof(struct isp_reg,
136 u2.isp4022.p0.port_status),
137 readw(&ha->reg->u2.isp4022.p0.port_status));
138 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
139 (uint8_t) offsetof(struct isp_reg,
140 u2.isp4022.p0.gp_out),
141 readw(&ha->reg->u2.isp4022.p0.gp_out));
142 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
143 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
144 readw(&ha->reg->u2.isp4022.p0.gp_in));
145 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
146 (uint8_t) offsetof(struct isp_reg,
147 u2.isp4022.p0.port_err_status),
148 readw(&ha->reg->u2.isp4022.p0.port_err_status));
149 printk(KERN_INFO "Page 1 Registers:\n");
150 writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
151 &ha->reg->ctrl_status);
152 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
153 (uint8_t) offsetof(struct isp_reg,
154 u2.isp4022.p1.req_q_out),
155 readw(&ha->reg->u2.isp4022.p1.req_q_out));
156 writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
157 &ha->reg->ctrl_status);
158 }
159}
160
161void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
162{
163 unsigned long flags = 0;
164 int i = 0;
165 spin_lock_irqsave(&ha->hardware_lock, flags);
166 for (i = 1; i < MBOX_REG_COUNT; i++)
167 printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
168 readw(&ha->reg->mailbox[i]));
169 spin_unlock_irqrestore(&ha->hardware_lock, flags);
170}
171
172void qla4xxx_dump_registers(struct scsi_qla_host *ha)
173{
174 unsigned long flags = 0;
175 spin_lock_irqsave(&ha->hardware_lock, flags);
176 __dump_registers(ha);
177 spin_unlock_irqrestore(&ha->hardware_lock, flags);
178}
179 12
180void qla4xxx_dump_buffer(void *b, uint32_t size) 13void qla4xxx_dump_buffer(void *b, uint32_t size)
181{ 14{
@@ -198,4 +31,3 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
198 printk(KERN_DEBUG "\n"); 31 printk(KERN_DEBUG "\n");
199} 32}
200 33
201#endif /* 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 6f4cf2dd2f4a..accaf690eaf0 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -122,8 +122,7 @@
122 122
123#define ISCSI_IPADDR_SIZE 4 /* IP address size */ 123#define ISCSI_IPADDR_SIZE 4 /* IP address size */
124#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */ 124#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
125#define ISCSI_NAME_SIZE 255 /* ISCSI Name size - 125#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
126 * usually a string */
127 126
128#define LSDW(x) ((u32)((u64)(x))) 127#define LSDW(x) ((u32)((u64)(x)))
129#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 128#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -187,9 +186,21 @@ struct srb {
187 u_long u_start; /* Time when we handed the cmd to F/W */ 186 u_long u_start; /* Time when we handed the cmd to F/W */
188}; 187};
189 188
190 /* 189/*
191 * Device Database (DDB) structure 190 * Asynchronous Event Queue structure
192 */ 191 */
192struct aen {
193 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
194};
195
196struct ql4_aen_log {
197 int count;
198 struct aen entry[MAX_AEN_ENTRIES];
199};
200
201/*
202 * Device Database (DDB) structure
203 */
193struct ddb_entry { 204struct ddb_entry {
194 struct list_head list; /* ddb list */ 205 struct list_head list; /* ddb list */
195 struct scsi_qla_host *ha; 206 struct scsi_qla_host *ha;
@@ -254,13 +265,6 @@ struct ddb_entry {
254#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ 265#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
255#define DF_FO_MASKED 3 266#define DF_FO_MASKED 3
256 267
257/*
258 * Asynchronous Event Queue structure
259 */
260struct aen {
261 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
262};
263
264 268
265#include "ql4_fw.h" 269#include "ql4_fw.h"
266#include "ql4_nvram.h" 270#include "ql4_nvram.h"
@@ -270,31 +274,31 @@ struct aen {
270 */ 274 */
271struct scsi_qla_host { 275struct scsi_qla_host {
272 /* Linux adapter configuration data */ 276 /* Linux adapter configuration data */
273 struct Scsi_Host *host; /* pointer to host data */
274 uint32_t tot_ddbs;
275 unsigned long flags; 277 unsigned long flags;
276 278
277#define AF_ONLINE 0 /* 0x00000001 */ 279#define AF_ONLINE 0 /* 0x00000001 */
278#define AF_INIT_DONE 1 /* 0x00000002 */ 280#define AF_INIT_DONE 1 /* 0x00000002 */
279#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 281#define AF_MBOX_COMMAND 2 /* 0x00000004 */
280#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 282#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
281#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */ 283#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
282#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 284#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
283#define AF_LINK_UP 8 /* 0x00000100 */ 285#define AF_LINK_UP 8 /* 0x00000100 */
284#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 286#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
285#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */ 287#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
286#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
287 288
288 unsigned long dpc_flags; 289 unsigned long dpc_flags;
289 290
290#define DPC_RESET_HA 1 /* 0x00000002 */ 291#define DPC_RESET_HA 1 /* 0x00000002 */
291#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */ 292#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
292#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */ 293#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
293#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */ 294#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
294#define DPC_RESET_HA_INTR 5 /* 0x00000020 */ 295#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
295#define DPC_ISNS_RESTART 7 /* 0x00000080 */ 296#define DPC_ISNS_RESTART 7 /* 0x00000080 */
296#define DPC_AEN 9 /* 0x00000200 */ 297#define DPC_AEN 9 /* 0x00000200 */
297#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ 298#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
299
300 struct Scsi_Host *host; /* pointer to host data */
301 uint32_t tot_ddbs;
298 302
299 uint16_t iocb_cnt; 303 uint16_t iocb_cnt;
300 uint16_t iocb_hiwat; 304 uint16_t iocb_hiwat;
@@ -344,6 +348,7 @@ struct scsi_qla_host {
344 uint32_t firmware_version[2]; 348 uint32_t firmware_version[2];
345 uint32_t patch_number; 349 uint32_t patch_number;
346 uint32_t build_number; 350 uint32_t build_number;
351 uint32_t board_id;
347 352
348 /* --- From Init_FW --- */ 353 /* --- From Init_FW --- */
349 /* init_cb_t *init_cb; */ 354 /* init_cb_t *init_cb; */
@@ -363,7 +368,6 @@ struct scsi_qla_host {
363 368
364 /* --- From GetFwState --- */ 369 /* --- From GetFwState --- */
365 uint32_t firmware_state; 370 uint32_t firmware_state;
366 uint32_t board_id;
367 uint32_t addl_fw_state; 371 uint32_t addl_fw_state;
368 372
369 /* Linux kernel thread */ 373 /* Linux kernel thread */
@@ -414,6 +418,8 @@ struct scsi_qla_host {
414 uint16_t aen_out; 418 uint16_t aen_out;
415 struct aen aen_q[MAX_AEN_ENTRIES]; 419 struct aen aen_q[MAX_AEN_ENTRIES];
416 420
421 struct ql4_aen_log aen_log;/* tracks all aens */
422
417 /* This mutex protects several threads to do mailbox commands 423 /* This mutex protects several threads to do mailbox commands
418 * concurrently. 424 * concurrently.
419 */ 425 */
@@ -585,10 +591,4 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
585#define FLUSH_DDB_CHANGED_AENS 1 591#define FLUSH_DDB_CHANGED_AENS 1
586#define RELOGIN_DDB_CHANGED_AENS 2 592#define RELOGIN_DDB_CHANGED_AENS 2
587 593
588#include "ql4_version.h"
589#include "ql4_glbl.h"
590#include "ql4_dbg.h"
591#include "ql4_inline.h"
592
593
594#endif /*_QLA4XXX_H */ 594#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4eea8c571916..9bb3d1d2a925 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -20,143 +20,23 @@
20 *************************************************************************/ 20 *************************************************************************/
21 21
22struct port_ctrl_stat_regs { 22struct port_ctrl_stat_regs {
23 __le32 ext_hw_conf; /* 80 x50 R/W */ 23 __le32 ext_hw_conf; /* 0x50 R/W */
24 __le32 intChipConfiguration; /* 84 x54 */ 24 __le32 rsrvd0; /* 0x54 */
25 __le32 port_ctrl; /* 88 x58 */ 25 __le32 port_ctrl; /* 0x58 */
26 __le32 port_status; /* 92 x5c */ 26 __le32 port_status; /* 0x5c */
27 __le32 HostPrimMACHi; /* 96 x60 */ 27 __le32 rsrvd1[32]; /* 0x60-0xdf */
28 __le32 HostPrimMACLow; /* 100 x64 */ 28 __le32 gp_out; /* 0xe0 */
29 __le32 HostSecMACHi; /* 104 x68 */ 29 __le32 gp_in; /* 0xe4 */
30 __le32 HostSecMACLow; /* 108 x6c */ 30 __le32 rsrvd2[5]; /* 0xe8-0xfb */
31 __le32 EPPrimMACHi; /* 112 x70 */ 31 __le32 port_err_status; /* 0xfc */
32 __le32 EPPrimMACLow; /* 116 x74 */
33 __le32 EPSecMACHi; /* 120 x78 */
34 __le32 EPSecMACLow; /* 124 x7c */
35 __le32 HostPrimIPHi; /* 128 x80 */
36 __le32 HostPrimIPMidHi; /* 132 x84 */
37 __le32 HostPrimIPMidLow; /* 136 x88 */
38 __le32 HostPrimIPLow; /* 140 x8c */
39 __le32 HostSecIPHi; /* 144 x90 */
40 __le32 HostSecIPMidHi; /* 148 x94 */
41 __le32 HostSecIPMidLow; /* 152 x98 */
42 __le32 HostSecIPLow; /* 156 x9c */
43 __le32 EPPrimIPHi; /* 160 xa0 */
44 __le32 EPPrimIPMidHi; /* 164 xa4 */
45 __le32 EPPrimIPMidLow; /* 168 xa8 */
46 __le32 EPPrimIPLow; /* 172 xac */
47 __le32 EPSecIPHi; /* 176 xb0 */
48 __le32 EPSecIPMidHi; /* 180 xb4 */
49 __le32 EPSecIPMidLow; /* 184 xb8 */
50 __le32 EPSecIPLow; /* 188 xbc */
51 __le32 IPReassemblyTimeout; /* 192 xc0 */
52 __le32 EthMaxFramePayload; /* 196 xc4 */
53 __le32 TCPMaxWindowSize; /* 200 xc8 */
54 __le32 TCPCurrentTimestampHi; /* 204 xcc */
55 __le32 TCPCurrentTimestampLow; /* 208 xd0 */
56 __le32 LocalRAMAddress; /* 212 xd4 */
57 __le32 LocalRAMData; /* 216 xd8 */
58 __le32 PCSReserved1; /* 220 xdc */
59 __le32 gp_out; /* 224 xe0 */
60 __le32 gp_in; /* 228 xe4 */
61 __le32 ProbeMuxAddr; /* 232 xe8 */
62 __le32 ProbeMuxData; /* 236 xec */
63 __le32 ERMQueueBaseAddr0; /* 240 xf0 */
64 __le32 ERMQueueBaseAddr1; /* 244 xf4 */
65 __le32 MACConfiguration; /* 248 xf8 */
66 __le32 port_err_status; /* 252 xfc COR */
67}; 32};
68 33
69struct host_mem_cfg_regs { 34struct host_mem_cfg_regs {
70 __le32 NetRequestQueueOut; /* 80 x50 */ 35 __le32 rsrvd0[12]; /* 0x50-0x79 */
71 __le32 NetRequestQueueOutAddrHi; /* 84 x54 */ 36 __le32 req_q_out; /* 0x80 */
72 __le32 NetRequestQueueOutAddrLow; /* 88 x58 */ 37 __le32 rsrvd1[31]; /* 0x84-0xFF */
73 __le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
74 __le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
75 __le32 NetRequestQueueLength; /* 100 x64 */
76 __le32 NetResponseQueueIn; /* 104 x68 */
77 __le32 NetResponseQueueInAddrHi; /* 108 x6c */
78 __le32 NetResponseQueueInAddrLow; /* 112 x70 */
79 __le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
80 __le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
81 __le32 NetResponseQueueLength; /* 124 x7c */
82 __le32 req_q_out; /* 128 x80 */
83 __le32 RequestQueueOutAddrHi; /* 132 x84 */
84 __le32 RequestQueueOutAddrLow; /* 136 x88 */
85 __le32 RequestQueueBaseAddrHi; /* 140 x8c */
86 __le32 RequestQueueBaseAddrLow; /* 144 x90 */
87 __le32 RequestQueueLength; /* 148 x94 */
88 __le32 ResponseQueueIn; /* 152 x98 */
89 __le32 ResponseQueueInAddrHi; /* 156 x9c */
90 __le32 ResponseQueueInAddrLow; /* 160 xa0 */
91 __le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
92 __le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
93 __le32 ResponseQueueLength; /* 172 xac */
94 __le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
95 __le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
96 __le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
97 __le32 NetRxLargeBufferQueueLength; /* 188 xbc */
98 __le32 NetRxLargeBufferLength; /* 192 xc0 */
99 __le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
100 __le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
101 __le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
102 __le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
103 __le32 NetRxSmallBufferLength; /* 212 xd4 */
104 __le32 HMCReserved0[10]; /* 216 xd8 */
105}; 38};
106 39
107struct local_ram_cfg_regs {
108 __le32 BufletSize; /* 80 x50 */
109 __le32 BufletMaxCount; /* 84 x54 */
110 __le32 BufletCurrCount; /* 88 x58 */
111 __le32 BufletPauseThresholdCount; /* 92 x5c */
112 __le32 BufletTCPWinThresholdHi; /* 96 x60 */
113 __le32 BufletTCPWinThresholdLow; /* 100 x64 */
114 __le32 IPHashTableBaseAddr; /* 104 x68 */
115 __le32 IPHashTableSize; /* 108 x6c */
116 __le32 TCPHashTableBaseAddr; /* 112 x70 */
117 __le32 TCPHashTableSize; /* 116 x74 */
118 __le32 NCBAreaBaseAddr; /* 120 x78 */
119 __le32 NCBMaxCount; /* 124 x7c */
120 __le32 NCBCurrCount; /* 128 x80 */
121 __le32 DRBAreaBaseAddr; /* 132 x84 */
122 __le32 DRBMaxCount; /* 136 x88 */
123 __le32 DRBCurrCount; /* 140 x8c */
124 __le32 LRCReserved[28]; /* 144 x90 */
125};
126
127struct prot_stat_regs {
128 __le32 MACTxFrameCount; /* 80 x50 R */
129 __le32 MACTxByteCount; /* 84 x54 R */
130 __le32 MACRxFrameCount; /* 88 x58 R */
131 __le32 MACRxByteCount; /* 92 x5c R */
132 __le32 MACCRCErrCount; /* 96 x60 R */
133 __le32 MACEncErrCount; /* 100 x64 R */
134 __le32 MACRxLengthErrCount; /* 104 x68 R */
135 __le32 IPTxPacketCount; /* 108 x6c R */
136 __le32 IPTxByteCount; /* 112 x70 R */
137 __le32 IPTxFragmentCount; /* 116 x74 R */
138 __le32 IPRxPacketCount; /* 120 x78 R */
139 __le32 IPRxByteCount; /* 124 x7c R */
140 __le32 IPRxFragmentCount; /* 128 x80 R */
141 __le32 IPDatagramReassemblyCount; /* 132 x84 R */
142 __le32 IPV6RxPacketCount; /* 136 x88 R */
143 __le32 IPErrPacketCount; /* 140 x8c R */
144 __le32 IPReassemblyErrCount; /* 144 x90 R */
145 __le32 TCPTxSegmentCount; /* 148 x94 R */
146 __le32 TCPTxByteCount; /* 152 x98 R */
147 __le32 TCPRxSegmentCount; /* 156 x9c R */
148 __le32 TCPRxByteCount; /* 160 xa0 R */
149 __le32 TCPTimerExpCount; /* 164 xa4 R */
150 __le32 TCPRxAckCount; /* 168 xa8 R */
151 __le32 TCPTxAckCount; /* 172 xac R */
152 __le32 TCPRxErrOOOCount; /* 176 xb0 R */
153 __le32 PSReserved0; /* 180 xb4 */
154 __le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
155 __le32 ECCErrCorrectionCount; /* 188 xbc R */
156 __le32 PSReserved1[16]; /* 192 xc0 */
157};
158
159
160/* remote register set (access via PCI memory read/write) */ 40/* remote register set (access via PCI memory read/write) */
161struct isp_reg { 41struct isp_reg {
162#define MBOX_REG_COUNT 8 42#define MBOX_REG_COUNT 8
@@ -207,11 +87,7 @@ struct isp_reg {
207 union { 87 union {
208 struct port_ctrl_stat_regs p0; 88 struct port_ctrl_stat_regs p0;
209 struct host_mem_cfg_regs p1; 89 struct host_mem_cfg_regs p1;
210 struct local_ram_cfg_regs p2;
211 struct prot_stat_regs p3;
212 __le32 r_union[44];
213 }; 90 };
214
215 } __attribute__ ((packed)) isp4022; 91 } __attribute__ ((packed)) isp4022;
216 } u2; 92 } u2;
217}; /* 256 x100 */ 93}; /* 256 x100 */
@@ -296,6 +172,7 @@ static inline uint32_t clr_rmask(uint32_t val)
296/* ISP Semaphore definitions */ 172/* ISP Semaphore definitions */
297 173
298/* ISP General Purpose Output definitions */ 174/* ISP General Purpose Output definitions */
175#define GPOR_TOPCAT_RESET 0x00000004
299 176
300/* shadow registers (DMA'd from HA to system memory. read only) */ 177/* shadow registers (DMA'd from HA to system memory. read only) */
301struct shadow_regs { 178struct shadow_regs {
@@ -337,6 +214,7 @@ union external_hw_config_reg {
337 214
338/* Mailbox command definitions */ 215/* Mailbox command definitions */
339#define MBOX_CMD_ABOUT_FW 0x0009 216#define MBOX_CMD_ABOUT_FW 0x0009
217#define MBOX_CMD_PING 0x000B
340#define MBOX_CMD_LUN_RESET 0x0016 218#define MBOX_CMD_LUN_RESET 0x0016
341#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E 219#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
342#define MBOX_CMD_GET_FW_STATUS 0x001F 220#define MBOX_CMD_GET_FW_STATUS 0x001F
@@ -364,6 +242,17 @@ union external_hw_config_reg {
364#define MBOX_CMD_GET_FW_STATE 0x0069 242#define MBOX_CMD_GET_FW_STATE 0x0069
365#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A 243#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
366#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087 244#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
245#define MBOX_CMD_SET_ACB 0x0088
246#define MBOX_CMD_GET_ACB 0x0089
247#define MBOX_CMD_DISABLE_ACB 0x008A
248#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE 0x008B
249#define MBOX_CMD_GET_IPV6_DEST_CACHE 0x008C
250#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST 0x008D
251#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST 0x008E
252#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE 0x0090
253#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
254#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
255#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
367 256
368/* Mailbox 1 */ 257/* Mailbox 1 */
369#define FW_STATE_READY 0x0000 258#define FW_STATE_READY 0x0000
@@ -409,6 +298,16 @@ union external_hw_config_reg {
409#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D 298#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
410#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F 299#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
411#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021 300#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
301#define MBOX_ASTS_DUPLICATE_IP 0x8025
302#define MBOX_ASTS_ARP_COMPLETE 0x8026
303#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
304#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
305#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
306#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
307#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
308#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
309#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
310
412#define ISNS_EVENT_DATA_RECEIVED 0x0000 311#define ISNS_EVENT_DATA_RECEIVED 0x0000
413#define ISNS_EVENT_CONNECTION_OPENED 0x0001 312#define ISNS_EVENT_CONNECTION_OPENED 0x0001
414#define ISNS_EVENT_CONNECTION_FAILED 0x0002 313#define ISNS_EVENT_CONNECTION_FAILED 0x0002
@@ -418,137 +317,166 @@ union external_hw_config_reg {
418/*************************************************************************/ 317/*************************************************************************/
419 318
420/* Host Adapter Initialization Control Block (from host) */ 319/* Host Adapter Initialization Control Block (from host) */
421struct init_fw_ctrl_blk { 320struct addr_ctrl_blk {
422 uint8_t Version; /* 00 */ 321 uint8_t version; /* 00 */
423 uint8_t Control; /* 01 */ 322 uint8_t control; /* 01 */
424 323
425 uint16_t FwOptions; /* 02-03 */ 324 uint16_t fw_options; /* 02-03 */
426#define FWOPT_HEARTBEAT_ENABLE 0x1000 325#define FWOPT_HEARTBEAT_ENABLE 0x1000
427#define FWOPT_SESSION_MODE 0x0040 326#define FWOPT_SESSION_MODE 0x0040
428#define FWOPT_INITIATOR_MODE 0x0020 327#define FWOPT_INITIATOR_MODE 0x0020
429#define FWOPT_TARGET_MODE 0x0010 328#define FWOPT_TARGET_MODE 0x0010
430 329
431 uint16_t ExecThrottle; /* 04-05 */ 330 uint16_t exec_throttle; /* 04-05 */
432 uint8_t RetryCount; /* 06 */ 331 uint8_t zio_count; /* 06 */
433 uint8_t RetryDelay; /* 07 */ 332 uint8_t res0; /* 07 */
434 uint16_t MaxEthFrPayloadSize; /* 08-09 */ 333 uint16_t eth_mtu_size; /* 08-09 */
435 uint16_t AddFwOptions; /* 0A-0B */ 334 uint16_t add_fw_options; /* 0A-0B */
436 335
437 uint8_t HeartbeatInterval; /* 0C */ 336 uint8_t hb_interval; /* 0C */
438 uint8_t InstanceNumber; /* 0D */ 337 uint8_t inst_num; /* 0D */
439 uint16_t RES2; /* 0E-0F */ 338 uint16_t res1; /* 0E-0F */
440 uint16_t ReqQConsumerIndex; /* 10-11 */ 339 uint16_t rqq_consumer_idx; /* 10-11 */
441 uint16_t ComplQProducerIndex; /* 12-13 */ 340 uint16_t compq_producer_idx; /* 12-13 */
442 uint16_t ReqQLen; /* 14-15 */ 341 uint16_t rqq_len; /* 14-15 */
443 uint16_t ComplQLen; /* 16-17 */ 342 uint16_t compq_len; /* 16-17 */
444 uint32_t ReqQAddrLo; /* 18-1B */ 343 uint32_t rqq_addr_lo; /* 18-1B */
445 uint32_t ReqQAddrHi; /* 1C-1F */ 344 uint32_t rqq_addr_hi; /* 1C-1F */
446 uint32_t ComplQAddrLo; /* 20-23 */ 345 uint32_t compq_addr_lo; /* 20-23 */
447 uint32_t ComplQAddrHi; /* 24-27 */ 346 uint32_t compq_addr_hi; /* 24-27 */
448 uint32_t ShadowRegBufAddrLo; /* 28-2B */ 347 uint32_t shdwreg_addr_lo; /* 28-2B */
449 uint32_t ShadowRegBufAddrHi; /* 2C-2F */ 348 uint32_t shdwreg_addr_hi; /* 2C-2F */
450 349
451 uint16_t iSCSIOptions; /* 30-31 */ 350 uint16_t iscsi_opts; /* 30-31 */
452 351 uint16_t ipv4_tcp_opts; /* 32-33 */
453 uint16_t TCPOptions; /* 32-33 */ 352 uint16_t ipv4_ip_opts; /* 34-35 */
454 353
455 uint16_t IPOptions; /* 34-35 */ 354 uint16_t iscsi_max_pdu_size; /* 36-37 */
456 355 uint8_t ipv4_tos; /* 38 */
457 uint16_t MaxPDUSize; /* 36-37 */ 356 uint8_t ipv4_ttl; /* 39 */
458 uint16_t RcvMarkerInt; /* 38-39 */ 357 uint8_t acb_version; /* 3A */
459 uint16_t SndMarkerInt; /* 3A-3B */ 358 uint8_t res2; /* 3B */
460 uint16_t InitMarkerlessInt; /* 3C-3D */ 359 uint16_t def_timeout; /* 3C-3D */
461 uint16_t FirstBurstSize; /* 3E-3F */ 360 uint16_t iscsi_fburst_len; /* 3E-3F */
462 uint16_t DefaultTime2Wait; /* 40-41 */ 361 uint16_t iscsi_def_time2wait; /* 40-41 */
463 uint16_t DefaultTime2Retain; /* 42-43 */ 362 uint16_t iscsi_def_time2retain; /* 42-43 */
464 uint16_t MaxOutStndngR2T; /* 44-45 */ 363 uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
465 uint16_t KeepAliveTimeout; /* 46-47 */ 364 uint16_t conn_ka_timeout; /* 46-47 */
466 uint16_t PortNumber; /* 48-49 */ 365 uint16_t ipv4_port; /* 48-49 */
467 uint16_t MaxBurstSize; /* 4A-4B */ 366 uint16_t iscsi_max_burst_len; /* 4A-4B */
468 uint32_t RES4; /* 4C-4F */ 367 uint32_t res5; /* 4C-4F */
469 uint8_t IPAddr[4]; /* 50-53 */ 368 uint8_t ipv4_addr[4]; /* 50-53 */
470 uint8_t RES5[12]; /* 54-5F */ 369 uint16_t ipv4_vlan_tag; /* 54-55 */
471 uint8_t SubnetMask[4]; /* 60-63 */ 370 uint8_t ipv4_addr_state; /* 56 */
472 uint8_t RES6[12]; /* 64-6F */ 371 uint8_t ipv4_cacheid; /* 57 */
473 uint8_t GatewayIPAddr[4]; /* 70-73 */ 372 uint8_t res6[8]; /* 58-5F */
474 uint8_t RES7[12]; /* 74-7F */ 373 uint8_t ipv4_subnet[4]; /* 60-63 */
475 uint8_t PriDNSIPAddr[4]; /* 80-83 */ 374 uint8_t res7[12]; /* 64-6F */
476 uint8_t SecDNSIPAddr[4]; /* 84-87 */ 375 uint8_t ipv4_gw_addr[4]; /* 70-73 */
477 uint8_t RES8[8]; /* 88-8F */ 376 uint8_t res8[0xc]; /* 74-7F */
478 uint8_t Alias[32]; /* 90-AF */ 377 uint8_t pri_dns_srvr_ip[4];/* 80-83 */
479 uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */ 378 uint8_t sec_dns_srvr_ip[4];/* 84-87 */
480 uint8_t CHAPNameSecretsTable[8]; /* B8-BF */ 379 uint16_t min_eph_port; /* 88-89 */
481 uint8_t EthernetMACAddr[6]; /* C0-C5 */ 380 uint16_t max_eph_port; /* 8A-8B */
482 uint16_t TargetPortalGroup; /* C6-C7 */ 381 uint8_t res9[4]; /* 8C-8F */
483 uint8_t SendScale; /* C8 */ 382 uint8_t iscsi_alias[32];/* 90-AF */
484 uint8_t RecvScale; /* C9 */ 383 uint8_t res9_1[0x16]; /* B0-C5 */
485 uint8_t TypeOfService; /* CA */ 384 uint16_t tgt_portal_grp;/* C6-C7 */
486 uint8_t Time2Live; /* CB */ 385 uint8_t abort_timer; /* C8 */
487 uint16_t VLANPriority; /* CC-CD */ 386 uint8_t ipv4_tcp_wsf; /* C9 */
488 uint16_t Reserved8; /* CE-CF */ 387 uint8_t res10[6]; /* CA-CF */
489 uint8_t SecIPAddr[4]; /* D0-D3 */ 388 uint8_t ipv4_sec_ip_addr[4]; /* D0-D3 */
490 uint8_t Reserved9[12]; /* D4-DF */ 389 uint8_t ipv4_dhcp_vid_len; /* D4 */
491 uint8_t iSNSIPAddr[4]; /* E0-E3 */ 390 uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
492 uint16_t iSNSServerPortNumber; /* E4-E5 */ 391 uint8_t res11[20]; /* E0-F3 */
493 uint8_t Reserved10[10]; /* E6-EF */ 392 uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
494 uint8_t SLPDAIPAddr[4]; /* F0-F3 */ 393 uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
495 uint8_t Reserved11[12]; /* F4-FF */ 394 uint8_t iscsi_name[224]; /* 100-1DF */
496 uint8_t iSCSINameString[256]; /* 100-1FF */ 395 uint8_t res12[32]; /* 1E0-1FF */
396 uint32_t cookie; /* 200-203 */
397 uint16_t ipv6_port; /* 204-205 */
398 uint16_t ipv6_opts; /* 206-207 */
399 uint16_t ipv6_addtl_opts; /* 208-209 */
400 uint16_t ipv6_tcp_opts; /* 20A-20B */
401 uint8_t ipv6_tcp_wsf; /* 20C */
402 uint16_t ipv6_flow_lbl; /* 20D-20F */
403 uint8_t ipv6_gw_addr[16]; /* 210-21F */
404 uint16_t ipv6_vlan_tag; /* 220-221 */
405 uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
406 uint8_t ipv6_addr0_state; /* 223 */
407 uint8_t ipv6_addr1_state; /* 224 */
408 uint8_t ipv6_gw_state; /* 225 */
409 uint8_t ipv6_traffic_class; /* 226 */
410 uint8_t ipv6_hop_limit; /* 227 */
411 uint8_t ipv6_if_id[8]; /* 228-22F */
412 uint8_t ipv6_addr0[16]; /* 230-23F */
413 uint8_t ipv6_addr1[16]; /* 240-24F */
414 uint32_t ipv6_nd_reach_time; /* 250-253 */
415 uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
416 uint32_t ipv6_nd_stale_timeout; /* 258-25B */
417 uint8_t ipv6_dup_addr_detect_count; /* 25C */
418 uint8_t ipv6_cache_id; /* 25D */
419 uint8_t res13[18]; /* 25E-26F */
420 uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
421 uint8_t res14[140]; /* 274-2FF */
422};
423
424struct init_fw_ctrl_blk {
425 struct addr_ctrl_blk pri;
426 struct addr_ctrl_blk sec;
497}; 427};
498 428
499/*************************************************************************/ 429/*************************************************************************/
500 430
501struct dev_db_entry { 431struct dev_db_entry {
502 uint8_t options; /* 00 */ 432 uint16_t options; /* 00-01 */
503#define DDB_OPT_DISC_SESSION 0x10 433#define DDB_OPT_DISC_SESSION 0x10
504#define DDB_OPT_TARGET 0x02 /* device is a target */ 434#define DDB_OPT_TARGET 0x02 /* device is a target */
505 435
506 uint8_t control; /* 01 */ 436 uint16_t exec_throttle; /* 02-03 */
507 437 uint16_t exec_count; /* 04-05 */
508 uint16_t exeThrottle; /* 02-03 */ 438 uint16_t res0; /* 06-07 */
509 uint16_t exeCount; /* 04-05 */ 439 uint16_t iscsi_options; /* 08-09 */
510 uint8_t retryCount; /* 06 */ 440 uint16_t tcp_options; /* 0A-0B */
511 uint8_t retryDelay; /* 07 */ 441 uint16_t ip_options; /* 0C-0D */
512 uint16_t iSCSIOptions; /* 08-09 */ 442 uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
513 443 uint32_t res1; /* 10-13 */
514 uint16_t TCPOptions; /* 0A-0B */ 444 uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
515 445 uint16_t iscsi_first_burst_len; /* 16-17 */
516 uint16_t IPOptions; /* 0C-0D */ 446 uint16_t iscsi_def_time2wait; /* 18-19 */
517 447 uint16_t iscsi_def_time2retain; /* 1A-1B */
518 uint16_t maxPDUSize; /* 0E-0F */ 448 uint16_t iscsi_max_outsnd_r2t; /* 1C-1D */
519 uint16_t rcvMarkerInt; /* 10-11 */ 449 uint16_t ka_timeout; /* 1E-1F */
520 uint16_t sndMarkerInt; /* 12-13 */ 450 uint8_t isid[6]; /* 20-25 big-endian, must be converted
521 uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
522 uint16_t firstBurstSize; /* 16-17 */
523 uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
524 uint16_t maxTime2Retain; /* 1A-1B */
525 uint16_t maxOutstndngR2T; /* 1C-1D */
526 uint16_t keepAliveTimeout; /* 1E-1F */
527 uint8_t ISID[6]; /* 20-25 big-endian, must be converted
528 * to little-endian */ 451 * to little-endian */
529 uint16_t TSID; /* 26-27 */ 452 uint16_t tsid; /* 26-27 */
530 uint16_t portNumber; /* 28-29 */ 453 uint16_t port; /* 28-29 */
531 uint16_t maxBurstSize; /* 2A-2B */ 454 uint16_t iscsi_max_burst_len; /* 2A-2B */
532 uint16_t taskMngmntTimeout; /* 2C-2D */ 455 uint16_t def_timeout; /* 2C-2D */
533 uint16_t reserved1; /* 2E-2F */ 456 uint16_t res2; /* 2E-2F */
534 uint8_t ipAddr[0x10]; /* 30-3F */ 457 uint8_t ip_addr[0x10]; /* 30-3F */
535 uint8_t iSCSIAlias[0x20]; /* 40-5F */ 458 uint8_t iscsi_alias[0x20]; /* 40-5F */
536 uint8_t targetAddr[0x20]; /* 60-7F */ 459 uint8_t tgt_addr[0x20]; /* 60-7F */
537 uint8_t userID[0x20]; /* 80-9F */ 460 uint16_t mss; /* 80-81 */
538 uint8_t password[0x20]; /* A0-BF */ 461 uint16_t res3; /* 82-83 */
539 uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a 462 uint16_t lcl_port; /* 84-85 */
463 uint8_t ipv4_tos; /* 86 */
464 uint16_t ipv6_flow_lbl; /* 87-89 */
465 uint8_t res4[0x36]; /* 8A-BF */
466 uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
540 * pointer to a string so we 467 * pointer to a string so we
541 * don't have to reserve soooo 468 * don't have to reserve soooo
542 * much RAM */ 469 * much RAM */
543 uint16_t ddbLink; /* 1C0-1C1 */ 470 uint8_t ipv6_addr[0x10];/* 1A0-1AF */
544 uint16_t CHAPTableIndex; /* 1C2-1C3 */ 471 uint8_t res5[0x10]; /* 1B0-1BF */
545 uint16_t TargetPortalGroup; /* 1C4-1C5 */ 472 uint16_t ddb_link; /* 1C0-1C1 */
546 uint16_t reserved2[2]; /* 1C6-1C7 */ 473 uint16_t chap_tbl_idx; /* 1C2-1C3 */
547 uint32_t statSN; /* 1C8-1CB */ 474 uint16_t tgt_portal_grp; /* 1C4-1C5 */
548 uint32_t expStatSN; /* 1CC-1CF */ 475 uint8_t tcp_xmt_wsf; /* 1C6 */
549 uint16_t reserved3[0x2C]; /* 1D0-1FB */ 476 uint8_t tcp_rcv_wsf; /* 1C7 */
550 uint16_t ddbValidCookie; /* 1FC-1FD */ 477 uint32_t stat_sn; /* 1C8-1CB */
551 uint16_t ddbValidSize; /* 1FE-1FF */ 478 uint32_t exp_stat_sn; /* 1CC-1CF */
479 uint8_t res6[0x30]; /* 1D0-1FF */
552}; 480};
553 481
554/*************************************************************************/ 482/*************************************************************************/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b00cb04e7c0..a3608e028bf6 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,9 @@
8#ifndef __QLA4x_GBL_H 8#ifndef __QLA4x_GBL_H
9#define __QLA4x_GBL_H 9#define __QLA4x_GBL_H
10 10
11struct iscsi_cls_conn;
12
13void qla4xxx_hw_reset(struct scsi_qla_host *ha);
11int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); 14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
12int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); 15int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
13int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb); 16int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
@@ -58,11 +61,13 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
58void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 61void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
59 uint32_t intr_status); 62 uint32_t intr_status);
60int qla4xxx_init_rings(struct scsi_qla_host * ha); 63int qla4xxx_init_rings(struct scsi_qla_host * ha);
61struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index); 64struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
65 uint32_t index);
62void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb); 66void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
63int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); 67int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
64int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, 68int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
65 uint32_t fw_ddb_index, uint32_t state); 69 uint32_t fw_ddb_index, uint32_t state);
70void qla4xxx_dump_buffer(void *b, uint32_t size);
66 71
67extern int ql4xextended_error_logging; 72extern int ql4xextended_error_logging;
68extern int ql4xdiscoverywait; 73extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 6365df268612..1e29f51d596b 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, 13static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
11 uint32_t fw_ddb_index); 14 uint32_t fw_ddb_index);
@@ -300,12 +303,12 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
300 if (!qla4xxx_fw_ready(ha)) 303 if (!qla4xxx_fw_ready(ha))
301 return status; 304 return status;
302 305
303 set_bit(AF_ONLINE, &ha->flags);
304 return qla4xxx_get_firmware_status(ha); 306 return qla4xxx_get_firmware_status(ha);
305} 307}
306 308
307static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha, 309static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
308 uint32_t fw_ddb_index) 310 uint32_t fw_ddb_index,
311 uint32_t *new_tgt)
309{ 312{
310 struct dev_db_entry *fw_ddb_entry = NULL; 313 struct dev_db_entry *fw_ddb_entry = NULL;
311 dma_addr_t fw_ddb_entry_dma; 314 dma_addr_t fw_ddb_entry_dma;
@@ -313,6 +316,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
313 int found = 0; 316 int found = 0;
314 uint32_t device_state; 317 uint32_t device_state;
315 318
319 *new_tgt = 0;
316 /* Make sure the dma buffer is valid */ 320 /* Make sure the dma buffer is valid */
317 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 321 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
318 sizeof(*fw_ddb_entry), 322 sizeof(*fw_ddb_entry),
@@ -337,7 +341,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
337 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, 341 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
338 __func__, fw_ddb_index)); 342 __func__, fw_ddb_index));
339 list_for_each_entry(ddb_entry, &ha->ddb_list, list) { 343 list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
340 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName, 344 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
341 ISCSI_NAME_SIZE) == 0) { 345 ISCSI_NAME_SIZE) == 0) {
342 found++; 346 found++;
343 break; 347 break;
@@ -348,6 +352,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
348 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating " 352 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
349 "new ddb\n", ha->host_no, __func__, 353 "new ddb\n", ha->host_no, __func__,
350 fw_ddb_index)); 354 fw_ddb_index));
355 *new_tgt = 1;
351 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); 356 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
352 } 357 }
353 358
@@ -409,26 +414,26 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
409 } 414 }
410 415
411 status = QLA_SUCCESS; 416 status = QLA_SUCCESS;
412 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID); 417 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
413 ddb_entry->task_mgmt_timeout = 418 ddb_entry->task_mgmt_timeout =
414 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout); 419 le16_to_cpu(fw_ddb_entry->def_timeout);
415 ddb_entry->CmdSn = 0; 420 ddb_entry->CmdSn = 0;
416 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle); 421 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle);
417 ddb_entry->default_relogin_timeout = 422 ddb_entry->default_relogin_timeout =
418 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout); 423 le16_to_cpu(fw_ddb_entry->def_timeout);
419 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait); 424 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
420 425
421 /* Update index in case it changed */ 426 /* Update index in case it changed */
422 ddb_entry->fw_ddb_index = fw_ddb_index; 427 ddb_entry->fw_ddb_index = fw_ddb_index;
423 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; 428 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
424 429
425 ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber); 430 ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
426 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup); 431 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
427 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0], 432 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
428 min(sizeof(ddb_entry->iscsi_name), 433 min(sizeof(ddb_entry->iscsi_name),
429 sizeof(fw_ddb_entry->iscsiName))); 434 sizeof(fw_ddb_entry->iscsi_name)));
430 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0], 435 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
431 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr))); 436 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
432 437
433 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n", 438 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
434 ha->host_no, __func__, fw_ddb_index, 439 ha->host_no, __func__, fw_ddb_index,
@@ -495,6 +500,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
495 uint32_t ddb_state; 500 uint32_t ddb_state;
496 uint32_t conn_err, err_code; 501 uint32_t conn_err, err_code;
497 struct ddb_entry *ddb_entry; 502 struct ddb_entry *ddb_entry;
503 uint32_t new_tgt;
498 504
499 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n"); 505 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
500 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; 506 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
@@ -526,8 +532,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
526 "completed " 532 "completed "
527 "or access denied failure\n", 533 "or access denied failure\n",
528 ha->host_no, __func__)); 534 ha->host_no, __func__));
529 } else 535 } else {
530 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0); 536 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
537 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
538 NULL, 0, NULL, &next_fw_ddb_index,
539 &ddb_state, &conn_err, NULL, NULL)
540 == QLA_ERROR) {
541 DEBUG2(printk("scsi%ld: %s:"
542 "get_ddb_entry %d failed\n",
543 ha->host_no,
544 __func__, fw_ddb_index));
545 return QLA_ERROR;
546 }
547 }
531 } 548 }
532 549
533 if (ddb_state != DDB_DS_SESSION_ACTIVE) 550 if (ddb_state != DDB_DS_SESSION_ACTIVE)
@@ -540,7 +557,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
540 ha->host_no, __func__, fw_ddb_index)); 557 ha->host_no, __func__, fw_ddb_index));
541 558
542 /* Add DDB to internal our ddb list. */ 559 /* Add DDB to internal our ddb list. */
543 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index); 560 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
544 if (ddb_entry == NULL) { 561 if (ddb_entry == NULL) {
545 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " 562 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
546 "for device at fw_ddb_index %d\n", 563 "for device at fw_ddb_index %d\n",
@@ -865,21 +882,20 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
865 882
866static void qla4x00_pci_config(struct scsi_qla_host *ha) 883static void qla4x00_pci_config(struct scsi_qla_host *ha)
867{ 884{
868 uint16_t w, mwi; 885 uint16_t w;
886 int status;
869 887
870 dev_info(&ha->pdev->dev, "Configuring PCI space...\n"); 888 dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
871 889
872 pci_set_master(ha->pdev); 890 pci_set_master(ha->pdev);
873 mwi = 0; 891 status = pci_set_mwi(ha->pdev);
874 if (pci_set_mwi(ha->pdev))
875 mwi = PCI_COMMAND_INVALIDATE;
876 /* 892 /*
877 * We want to respect framework's setting of PCI configuration space 893 * We want to respect framework's setting of PCI configuration space
878 * command register and also want to make sure that all bits of 894 * command register and also want to make sure that all bits of
879 * interest to us are properly set in command register. 895 * interest to us are properly set in command register.
880 */ 896 */
881 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 897 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
882 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 898 w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
883 w &= ~PCI_COMMAND_INTX_DISABLE; 899 w &= ~PCI_COMMAND_INTX_DISABLE;
884 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 900 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
885} 901}
@@ -911,6 +927,9 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
911 writel(set_rmask(NVR_WRITE_ENABLE), 927 writel(set_rmask(NVR_WRITE_ENABLE),
912 &ha->reg->u1.isp4022.nvram); 928 &ha->reg->u1.isp4022.nvram);
913 929
930 writel(2, &ha->reg->mailbox[6]);
931 readl(&ha->reg->mailbox[6]);
932
914 writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status); 933 writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
915 readl(&ha->reg->ctrl_status); 934 readl(&ha->reg->ctrl_status);
916 spin_unlock_irqrestore(&ha->hardware_lock, flags); 935 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -958,25 +977,25 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
958 return status; 977 return status;
959} 978}
960 979
961int ql4xxx_lock_drvr_wait(struct scsi_qla_host *ha) 980int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
962{ 981{
963#define QL4_LOCK_DRVR_WAIT 30 982#define QL4_LOCK_DRVR_WAIT 60
964#define QL4_LOCK_DRVR_SLEEP 1 983#define QL4_LOCK_DRVR_SLEEP 1
965 984
966 int drvr_wait = QL4_LOCK_DRVR_WAIT; 985 int drvr_wait = QL4_LOCK_DRVR_WAIT;
967 while (drvr_wait) { 986 while (drvr_wait) {
968 if (ql4xxx_lock_drvr(ha) == 0) { 987 if (ql4xxx_lock_drvr(a) == 0) {
969 ssleep(QL4_LOCK_DRVR_SLEEP); 988 ssleep(QL4_LOCK_DRVR_SLEEP);
970 if (drvr_wait) { 989 if (drvr_wait) {
971 DEBUG2(printk("scsi%ld: %s: Waiting for " 990 DEBUG2(printk("scsi%ld: %s: Waiting for "
972 "Global Init Semaphore(%d)...n", 991 "Global Init Semaphore(%d)...\n",
973 ha->host_no, 992 a->host_no,
974 __func__, drvr_wait)); 993 __func__, drvr_wait));
975 } 994 }
976 drvr_wait -= QL4_LOCK_DRVR_SLEEP; 995 drvr_wait -= QL4_LOCK_DRVR_SLEEP;
977 } else { 996 } else {
978 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " 997 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
979 "acquired.n", ha->host_no, __func__)); 998 "acquired\n", a->host_no, __func__));
980 return QLA_SUCCESS; 999 return QLA_SUCCESS;
981 } 1000 }
982 } 1001 }
@@ -1125,17 +1144,17 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1125 1144
1126 /* Initialize the Host adapter request/response queues and firmware */ 1145 /* Initialize the Host adapter request/response queues and firmware */
1127 if (qla4xxx_start_firmware(ha) == QLA_ERROR) 1146 if (qla4xxx_start_firmware(ha) == QLA_ERROR)
1128 return status; 1147 goto exit_init_hba;
1129 1148
1130 if (qla4xxx_validate_mac_address(ha) == QLA_ERROR) 1149 if (qla4xxx_validate_mac_address(ha) == QLA_ERROR)
1131 return status; 1150 goto exit_init_hba;
1132 1151
1133 if (qla4xxx_init_local_data(ha) == QLA_ERROR) 1152 if (qla4xxx_init_local_data(ha) == QLA_ERROR)
1134 return status; 1153 goto exit_init_hba;
1135 1154
1136 status = qla4xxx_init_firmware(ha); 1155 status = qla4xxx_init_firmware(ha);
1137 if (status == QLA_ERROR) 1156 if (status == QLA_ERROR)
1138 return status; 1157 goto exit_init_hba;
1139 1158
1140 /* 1159 /*
1141 * FW is waiting to get an IP address from DHCP server: Skip building 1160 * FW is waiting to get an IP address from DHCP server: Skip building
@@ -1143,12 +1162,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1143 * followed by 0x8014 aen" to trigger the tgt discovery process. 1162 * followed by 0x8014 aen" to trigger the tgt discovery process.
1144 */ 1163 */
1145 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) 1164 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS)
1146 return status; 1165 goto exit_init_online;
1147 1166
1148 /* Skip device discovery if ip and subnet is zero */ 1167 /* Skip device discovery if ip and subnet is zero */
1149 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 || 1168 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
1150 memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0) 1169 memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
1151 return status; 1170 goto exit_init_online;
1152 1171
1153 if (renew_ddb_list == PRESERVE_DDB_LIST) { 1172 if (renew_ddb_list == PRESERVE_DDB_LIST) {
1154 /* 1173 /*
@@ -1177,9 +1196,10 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1177 ha->host_no)); 1196 ha->host_no));
1178 } 1197 }
1179 1198
1180 exit_init_hba: 1199exit_init_online:
1200 set_bit(AF_ONLINE, &ha->flags);
1201exit_init_hba:
1181 return status; 1202 return status;
1182
1183} 1203}
1184 1204
1185/** 1205/**
@@ -1193,9 +1213,10 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1193 uint32_t fw_ddb_index) 1213 uint32_t fw_ddb_index)
1194{ 1214{
1195 struct ddb_entry * ddb_entry; 1215 struct ddb_entry * ddb_entry;
1216 uint32_t new_tgt;
1196 1217
1197 /* First allocate a device structure */ 1218 /* First allocate a device structure */
1198 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index); 1219 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
1199 if (ddb_entry == NULL) { 1220 if (ddb_entry == NULL) {
1200 DEBUG2(printk(KERN_WARNING 1221 DEBUG2(printk(KERN_WARNING
1201 "scsi%ld: Unable to allocate memory to add " 1222 "scsi%ld: Unable to allocate memory to add "
@@ -1203,6 +1224,18 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1203 return; 1224 return;
1204 } 1225 }
1205 1226
1227 if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) {
1228 /* Target has been bound to a new fw_ddb_index */
1229 qla4xxx_free_ddb(ha, ddb_entry);
1230 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
1231 if (ddb_entry == NULL) {
1232 DEBUG2(printk(KERN_WARNING
1233 "scsi%ld: Unable to allocate memory"
1234 " to add fw_ddb_index %d\n",
1235 ha->host_no, fw_ddb_index));
1236 return;
1237 }
1238 }
1206 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == 1239 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
1207 QLA_ERROR) { 1240 QLA_ERROR) {
1208 ha->fw_ddb_index_map[fw_ddb_index] = 1241 ha->fw_ddb_index_map[fw_ddb_index] =
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index a216a1781afb..5006ecb3ef5e 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -6,6 +6,10 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
12
9 13
10#include <scsi/scsi_tcq.h> 14#include <scsi/scsi_tcq.h>
11 15
@@ -141,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
141 uint16_t avail_dsds; 145 uint16_t avail_dsds;
142 struct data_seg_a64 *cur_dsd; 146 struct data_seg_a64 *cur_dsd;
143 struct scsi_cmnd *cmd; 147 struct scsi_cmnd *cmd;
148 struct scatterlist *sg;
149 int i;
144 150
145 cmd = srb->cmd; 151 cmd = srb->cmd;
146 ha = srb->ha; 152 ha = srb->ha;
147 153
148 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 154 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
149 /* No data being transferred */ 155 /* No data being transferred */
150 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0); 156 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
151 return; 157 return;
@@ -154,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
154 avail_dsds = COMMAND_SEG; 160 avail_dsds = COMMAND_SEG;
155 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); 161 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
156 162
157 /* Load data segments */ 163 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
158 if (cmd->use_sg) { 164 dma_addr_t sle_dma;
159 struct scatterlist *cur_seg; 165
160 struct scatterlist *end_seg; 166 /* Allocate additional continuation packets? */
161 167 if (avail_dsds == 0) {
162 cur_seg = (struct scatterlist *)cmd->request_buffer; 168 struct continuation_t1_entry *cont_entry;
163 end_seg = cur_seg + tot_dsds; 169
164 while (cur_seg < end_seg) { 170 cont_entry = qla4xxx_alloc_cont_entry(ha);
165 dma_addr_t sle_dma; 171 cur_dsd =
166 172 (struct data_seg_a64 *)
167 /* Allocate additional continuation packets? */ 173 &cont_entry->dataseg[0];
168 if (avail_dsds == 0) { 174 avail_dsds = CONTINUE_SEG;
169 struct continuation_t1_entry *cont_entry;
170
171 cont_entry = qla4xxx_alloc_cont_entry(ha);
172 cur_dsd =
173 (struct data_seg_a64 *)
174 &cont_entry->dataseg[0];
175 avail_dsds = CONTINUE_SEG;
176 }
177
178 sle_dma = sg_dma_address(cur_seg);
179 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
180 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
181 cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
182 avail_dsds--;
183
184 cur_dsd++;
185 cur_seg++;
186 } 175 }
187 } else { 176
188 cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle)); 177 sle_dma = sg_dma_address(sg);
189 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle)); 178 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
190 cur_dsd->count = cpu_to_le32(cmd->request_bufflen); 179 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
180 cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
181 avail_dsds--;
182
183 cur_dsd++;
191 } 184 }
192} 185}
193 186
@@ -204,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
204 struct scsi_cmnd *cmd = srb->cmd; 197 struct scsi_cmnd *cmd = srb->cmd;
205 struct ddb_entry *ddb_entry; 198 struct ddb_entry *ddb_entry;
206 struct command_t3_entry *cmd_entry; 199 struct command_t3_entry *cmd_entry;
207 struct scatterlist *sg = NULL;
208 200
201 int nseg;
209 uint16_t tot_dsds; 202 uint16_t tot_dsds;
210 uint16_t req_cnt; 203 uint16_t req_cnt;
211 204
@@ -233,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
233 index = (uint32_t)cmd->request->tag; 226 index = (uint32_t)cmd->request->tag;
234 227
235 /* Calculate the number of request entries needed. */ 228 /* Calculate the number of request entries needed. */
236 if (cmd->use_sg) { 229 nseg = scsi_dma_map(cmd);
237 sg = (struct scatterlist *)cmd->request_buffer; 230 if (nseg < 0)
238 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 231 goto queuing_error;
239 cmd->sc_data_direction); 232 tot_dsds = nseg;
240 if (tot_dsds == 0) 233
241 goto queuing_error;
242 } else if (cmd->request_bufflen) {
243 dma_addr_t req_dma;
244
245 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
246 cmd->request_bufflen,
247 cmd->sc_data_direction);
248 if (dma_mapping_error(req_dma))
249 goto queuing_error;
250
251 srb->dma_handle = req_dma;
252 tot_dsds = 1;
253 }
254 req_cnt = qla4xxx_calc_request_entries(tot_dsds); 234 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
255 235
256 if (ha->req_q_count < (req_cnt + 2)) { 236 if (ha->req_q_count < (req_cnt + 2)) {
@@ -279,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
279 259
280 int_to_scsilun(cmd->device->lun, &cmd_entry->lun); 260 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
281 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); 261 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
282 cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen); 262 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
283 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); 263 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
284 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); 264 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
285 cmd_entry->hdr.entryCount = req_cnt; 265 cmd_entry->hdr.entryCount = req_cnt;
@@ -289,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
289 * transferred, as the data direction bit is sometimed filled 269 * transferred, as the data direction bit is sometimed filled
290 * in when there is no data to be transferred */ 270 * in when there is no data to be transferred */
291 cmd_entry->control_flags = CF_NO_DATA; 271 cmd_entry->control_flags = CF_NO_DATA;
292 if (cmd->request_bufflen) { 272 if (scsi_bufflen(cmd)) {
293 if (cmd->sc_data_direction == DMA_TO_DEVICE) 273 if (cmd->sc_data_direction == DMA_TO_DEVICE)
294 cmd_entry->control_flags = CF_WRITE; 274 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 275 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ; 276 cmd_entry->control_flags = CF_READ;
297 277
298 ha->bytes_xfered += cmd->request_bufflen; 278 ha->bytes_xfered += scsi_bufflen(cmd);
299 if (ha->bytes_xfered & ~0xFFFFF){ 279 if (ha->bytes_xfered & ~0xFFFFF){
300 ha->total_mbytes_xferred += ha->bytes_xfered >> 20; 280 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
301 ha->bytes_xfered &= 0xFFFFF; 281 ha->bytes_xfered &= 0xFFFFF;
@@ -359,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
359 return QLA_SUCCESS; 339 return QLA_SUCCESS;
360 340
361queuing_error: 341queuing_error:
342 if (tot_dsds)
343 scsi_dma_unmap(cmd);
362 344
363 if (cmd->use_sg && tot_dsds) {
364 sg = (struct scatterlist *) cmd->request_buffer;
365 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
366 cmd->sc_data_direction);
367 } else if (tot_dsds)
368 pci_unmap_single(ha->pdev, srb->dma_handle,
369 cmd->request_bufflen, cmd->sc_data_direction);
370 spin_unlock_irqrestore(&ha->hardware_lock, flags); 345 spin_unlock_irqrestore(&ha->hardware_lock, flags);
371 346
372 return QLA_ERROR; 347 return QLA_ERROR;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 35b9e36a0e8d..4a154beb0d39 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10/** 13/**
11 * qla2x00_process_completed_request() - Process a Fast Post response. 14 * qla2x00_process_completed_request() - Process a Fast Post response.
@@ -90,9 +93,29 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
90 break; 93 break;
91 } 94 }
92 95
93 if (sts_entry->iscsiFlags & 96 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
94 (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER)) 97 cmd->result = DID_ERROR << 16;
95 cmd->resid = residual; 98 break;
99 }
100
101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 scsi_set_resid(cmd, residual);
103 if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
104 cmd->underflow)) {
105
106 cmd->result = DID_ERROR << 16;
107
108 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
109 "Mid-layer Data underrun0, "
110 "xferlen = 0x%x, "
111 "residual = 0x%x\n", ha->host_no,
112 cmd->device->channel,
113 cmd->device->id,
114 cmd->device->lun, __func__,
115 scsi_bufflen(cmd), residual));
116 break;
117 }
118 }
96 119
97 cmd->result = DID_OK << 16 | scsi_status; 120 cmd->result = DID_OK << 16 | scsi_status;
98 121
@@ -161,7 +184,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
161 184
162 case SCS_DATA_UNDERRUN: 185 case SCS_DATA_UNDERRUN:
163 case SCS_DATA_OVERRUN: 186 case SCS_DATA_OVERRUN:
164 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { 187 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
188 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
165 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, " 189 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
166 "residual = 0x%x\n", ha->host_no, 190 "residual = 0x%x\n", ha->host_no,
167 cmd->device->channel, cmd->device->id, 191 cmd->device->channel, cmd->device->id,
@@ -171,21 +195,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
171 break; 195 break;
172 } 196 }
173 197
174 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 198 scsi_set_resid(cmd, residual);
175 /*
176 * Firmware detected a SCSI transport underrun
177 * condition
178 */
179 cmd->resid = residual;
180 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
181 "detected, xferlen = 0x%x, residual = "
182 "0x%x\n",
183 ha->host_no, cmd->device->channel,
184 cmd->device->id,
185 cmd->device->lun, __func__,
186 cmd->request_bufflen,
187 residual));
188 }
189 199
190 /* 200 /*
191 * If there is scsi_status, it takes precedense over 201 * If there is scsi_status, it takes precedense over
@@ -227,7 +237,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
227 if ((sts_entry->iscsiFlags & 237 if ((sts_entry->iscsiFlags &
228 ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 238 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
229 cmd->result = DID_BUS_BUSY << 16; 239 cmd->result = DID_BUS_BUSY << 16;
230 } else if ((cmd->request_bufflen - residual) < 240 } else if ((scsi_bufflen(cmd) - residual) <
231 cmd->underflow) { 241 cmd->underflow) {
232 /* 242 /*
233 * Handle mid-layer underflow??? 243 * Handle mid-layer underflow???
@@ -242,13 +252,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
242 * will return DID_ERROR. 252 * will return DID_ERROR.
243 */ 253 */
244 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " 254 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
245 "Mid-layer Data underrun, " 255 "Mid-layer Data underrun1, "
246 "xferlen = 0x%x, " 256 "xferlen = 0x%x, "
247 "residual = 0x%x\n", ha->host_no, 257 "residual = 0x%x\n", ha->host_no,
248 cmd->device->channel, 258 cmd->device->channel,
249 cmd->device->id, 259 cmd->device->id,
250 cmd->device->lun, __func__, 260 cmd->device->lun, __func__,
251 cmd->request_bufflen, residual)); 261 scsi_bufflen(cmd), residual));
252 262
253 cmd->result = DID_ERROR << 16; 263 cmd->result = DID_ERROR << 16;
254 } else { 264 } else {
@@ -417,6 +427,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
417 uint32_t mbox_status) 427 uint32_t mbox_status)
418{ 428{
419 int i; 429 int i;
430 uint32_t mbox_stat2, mbox_stat3;
420 431
421 if ((mbox_status == MBOX_STS_BUSY) || 432 if ((mbox_status == MBOX_STS_BUSY) ||
422 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 433 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -437,6 +448,12 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
437 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 448 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
438 /* Immediately process the AENs that don't require much work. 449 /* Immediately process the AENs that don't require much work.
439 * Only queue the database_changed AENs */ 450 * Only queue the database_changed AENs */
451 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
452 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
453 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
454 readl(&ha->reg->mailbox[i]);
455 ha->aen_log.count++;
456 }
440 switch (mbox_status) { 457 switch (mbox_status) {
441 case MBOX_ASTS_SYSTEM_ERROR: 458 case MBOX_ASTS_SYSTEM_ERROR:
442 /* Log Mailbox registers */ 459 /* Log Mailbox registers */
@@ -493,6 +510,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
493 mbox_status)); 510 mbox_status));
494 break; 511 break;
495 512
513 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
514 mbox_stat2 = readl(&ha->reg->mailbox[2]);
515 mbox_stat3 = readl(&ha->reg->mailbox[3]);
516
517 if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
518 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
519 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
520 set_bit(DPC_RESET_HA, &ha->dpc_flags);
521 break;
522
496 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 523 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
497 case MBOX_ASTS_DNS: 524 case MBOX_ASTS_DNS:
498 /* No action */ 525 /* No action */
@@ -518,11 +545,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
518 /* Queue AEN information and process it in the DPC 545 /* Queue AEN information and process it in the DPC
519 * routine */ 546 * routine */
520 if (ha->aen_q_count > 0) { 547 if (ha->aen_q_count > 0) {
521 /* advance pointer */
522 if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
523 ha->aen_in = 0;
524 else
525 ha->aen_in++;
526 548
527 /* decrement available counter */ 549 /* decrement available counter */
528 ha->aen_q_count--; 550 ha->aen_q_count--;
@@ -542,6 +564,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
542 ha->aen_q[ha->aen_in].mbox_sts[2], 564 ha->aen_q[ha->aen_in].mbox_sts[2],
543 ha->aen_q[ha->aen_in].mbox_sts[3], 565 ha->aen_q[ha->aen_in].mbox_sts[3],
544 ha->aen_q[ha->aen_in]. mbox_sts[4])); 566 ha->aen_q[ha->aen_in]. mbox_sts[4]));
567 /* advance pointer */
568 ha->aen_in++;
569 if (ha->aen_in == MAX_AEN_ENTRIES)
570 ha->aen_in = 0;
545 571
546 /* The DPC routine will process the aen */ 572 /* The DPC routine will process the aen */
547 set_bit(DPC_AEN, &ha->dpc_flags); 573 set_bit(DPC_AEN, &ha->dpc_flags);
@@ -724,25 +750,24 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
724 750
725 spin_lock_irqsave(&ha->hardware_lock, flags); 751 spin_lock_irqsave(&ha->hardware_lock, flags);
726 while (ha->aen_out != ha->aen_in) { 752 while (ha->aen_out != ha->aen_in) {
727 /* Advance pointers for next entry */
728 if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
729 ha->aen_out = 0;
730 else
731 ha->aen_out++;
732
733 ha->aen_q_count++;
734 aen = &ha->aen_q[ha->aen_out]; 753 aen = &ha->aen_q[ha->aen_out];
735
736 /* copy aen information to local structure */ 754 /* copy aen information to local structure */
737 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 755 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
738 mbox_sts[i] = aen->mbox_sts[i]; 756 mbox_sts[i] = aen->mbox_sts[i];
739 757
758 ha->aen_q_count++;
759 ha->aen_out++;
760
761 if (ha->aen_out == MAX_AEN_ENTRIES)
762 ha->aen_out = 0;
763
740 spin_unlock_irqrestore(&ha->hardware_lock, flags); 764 spin_unlock_irqrestore(&ha->hardware_lock, flags);
741 765
742 DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x " 766 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
743 "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out, 767 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
744 mbox_sts[0], mbox_sts[2], mbox_sts[3], 768 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
745 mbox_sts[1], mbox_sts[4])); 769 mbox_sts[0], mbox_sts[1], mbox_sts[2],
770 mbox_sts[3], mbox_sts[4]));
746 771
747 switch (mbox_sts[0]) { 772 switch (mbox_sts[0]) {
748 case MBOX_ASTS_DATABASE_CHANGED: 773 case MBOX_ASTS_DATABASE_CHANGED:
@@ -792,6 +817,5 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
792 spin_lock_irqsave(&ha->hardware_lock, flags); 817 spin_lock_irqsave(&ha->hardware_lock, flags);
793 } 818 }
794 spin_unlock_irqrestore(&ha->hardware_lock, flags); 819 spin_unlock_irqrestore(&ha->hardware_lock, flags);
795
796} 820}
797 821
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index f116ff917237..35cd73c72a68 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10 13
11/** 14/**
@@ -169,84 +172,6 @@ mbox_exit:
169 return status; 172 return status;
170} 173}
171 174
172
173#if 0
174
175/**
176 * qla4xxx_issue_iocb - issue mailbox iocb command
177 * @ha: adapter state pointer.
178 * @buffer: buffer pointer.
179 * @phys_addr: physical address of buffer.
180 * @size: size of buffer.
181 *
182 * Issues iocbs via mailbox commands.
183 * TARGET_QUEUE_LOCK must be released.
184 * ADAPTER_STATE_LOCK must be released.
185 **/
186int
187qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
188 dma_addr_t phys_addr, size_t size)
189{
190 uint32_t mbox_cmd[MBOX_REG_COUNT];
191 uint32_t mbox_sts[MBOX_REG_COUNT];
192 int status;
193
194 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
195 memset(&mbox_sts, 0, sizeof(mbox_sts));
196 mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
197 mbox_cmd[1] = 0;
198 mbox_cmd[2] = LSDW(phys_addr);
199 mbox_cmd[3] = MSDW(phys_addr);
200 status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
201 return status;
202}
203
204int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
205 uint16_t fw_ddb_index,
206 uint16_t connection_id,
207 uint16_t option)
208{
209 uint32_t mbox_cmd[MBOX_REG_COUNT];
210 uint32_t mbox_sts[MBOX_REG_COUNT];
211
212 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
213 memset(&mbox_sts, 0, sizeof(mbox_sts));
214 mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
215 mbox_cmd[1] = fw_ddb_index;
216 mbox_cmd[2] = connection_id;
217 mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
218 if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
219 QLA_SUCCESS) {
220 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
221 "option %04x failed sts %04X %04X",
222 ha->host_no, __func__,
223 option, mbox_sts[0], mbox_sts[1]));
224 if (mbox_sts[0] == 0x4005)
225 DEBUG2(printk("%s reason %04X\n", __func__,
226 mbox_sts[1]));
227 }
228 return QLA_SUCCESS;
229}
230
231int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
232 uint16_t fw_ddb_index)
233{
234 uint32_t mbox_cmd[MBOX_REG_COUNT];
235 uint32_t mbox_sts[MBOX_REG_COUNT];
236
237 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
238 memset(&mbox_sts, 0, sizeof(mbox_sts));
239 mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
240 mbox_cmd[1] = fw_ddb_index;
241 if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
242 QLA_SUCCESS)
243 return QLA_ERROR;
244
245 return QLA_SUCCESS;
246}
247
248#endif /* 0 */
249
250/** 175/**
251 * qla4xxx_initialize_fw_cb - initializes firmware control block. 176 * qla4xxx_initialize_fw_cb - initializes firmware control block.
252 * @ha: Pointer to host adapter structure. 177 * @ha: Pointer to host adapter structure.
@@ -272,10 +197,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
272 /* Get Initialize Firmware Control Block. */ 197 /* Get Initialize Firmware Control Block. */
273 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 198 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
274 memset(&mbox_sts, 0, sizeof(mbox_sts)); 199 memset(&mbox_sts, 0, sizeof(mbox_sts));
200
275 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; 201 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
276 mbox_cmd[2] = LSDW(init_fw_cb_dma); 202 mbox_cmd[2] = LSDW(init_fw_cb_dma);
277 mbox_cmd[3] = MSDW(init_fw_cb_dma); 203 mbox_cmd[3] = MSDW(init_fw_cb_dma);
278 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) != 204 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
205
206 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
279 QLA_SUCCESS) { 207 QLA_SUCCESS) {
280 dma_free_coherent(&ha->pdev->dev, 208 dma_free_coherent(&ha->pdev->dev,
281 sizeof(struct init_fw_ctrl_blk), 209 sizeof(struct init_fw_ctrl_blk),
@@ -287,51 +215,56 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
287 qla4xxx_init_rings(ha); 215 qla4xxx_init_rings(ha);
288 216
289 /* Fill in the request and response queue information. */ 217 /* Fill in the request and response queue information. */
290 init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out); 218 init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
291 init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in); 219 init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
292 init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH); 220 init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
293 init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH); 221 init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
294 init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma)); 222 init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
295 init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma)); 223 init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
296 init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma)); 224 init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
297 init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma)); 225 init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
298 init_fw_cb->ShadowRegBufAddrLo = 226 init_fw_cb->pri.shdwreg_addr_lo =
299 cpu_to_le32(LSDW(ha->shadow_regs_dma)); 227 cpu_to_le32(LSDW(ha->shadow_regs_dma));
300 init_fw_cb->ShadowRegBufAddrHi = 228 init_fw_cb->pri.shdwreg_addr_hi =
301 cpu_to_le32(MSDW(ha->shadow_regs_dma)); 229 cpu_to_le32(MSDW(ha->shadow_regs_dma));
302 230
303 /* Set up required options. */ 231 /* Set up required options. */
304 init_fw_cb->FwOptions |= 232 init_fw_cb->pri.fw_options |=
305 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 233 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
306 FWOPT_INITIATOR_MODE); 234 FWOPT_INITIATOR_MODE);
307 init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 235 init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
308 236
309 /* Save some info in adapter structure. */ 237 /* Save some info in adapter structure. */
310 ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions); 238 ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
311 ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions); 239 ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
312 ha->heartbeat_interval = init_fw_cb->HeartbeatInterval; 240 ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
313 memcpy(ha->ip_address, init_fw_cb->IPAddr, 241 memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
314 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr))); 242 min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
315 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask, 243 memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
316 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask))); 244 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
317 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr, 245 memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
318 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr))); 246 min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
319 memcpy(ha->name_string, init_fw_cb->iSCSINameString, 247 memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
320 min(sizeof(ha->name_string), 248 min(sizeof(ha->name_string),
321 sizeof(init_fw_cb->iSCSINameString))); 249 sizeof(init_fw_cb->pri.iscsi_name)));
322 memcpy(ha->alias, init_fw_cb->Alias, 250 /*memcpy(ha->alias, init_fw_cb->Alias,
323 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias))); 251 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
324 252
325 /* Save Command Line Paramater info */ 253 /* Save Command Line Paramater info */
326 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout); 254 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
327 ha->discovery_wait = ql4xdiscoverywait; 255 ha->discovery_wait = ql4xdiscoverywait;
328 256
329 /* Send Initialize Firmware Control Block. */ 257 /* Send Initialize Firmware Control Block. */
258 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
259 memset(&mbox_sts, 0, sizeof(mbox_sts));
260
330 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 261 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
331 mbox_cmd[1] = 0; 262 mbox_cmd[1] = 0;
332 mbox_cmd[2] = LSDW(init_fw_cb_dma); 263 mbox_cmd[2] = LSDW(init_fw_cb_dma);
333 mbox_cmd[3] = MSDW(init_fw_cb_dma); 264 mbox_cmd[3] = MSDW(init_fw_cb_dma);
334 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) == 265 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
266
267 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
335 QLA_SUCCESS) 268 QLA_SUCCESS)
336 status = QLA_SUCCESS; 269 status = QLA_SUCCESS;
337 else { 270 else {
@@ -368,12 +301,14 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
368 /* Get Initialize Firmware Control Block. */ 301 /* Get Initialize Firmware Control Block. */
369 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 302 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
370 memset(&mbox_sts, 0, sizeof(mbox_sts)); 303 memset(&mbox_sts, 0, sizeof(mbox_sts));
304
371 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk)); 305 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
372 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; 306 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
373 mbox_cmd[2] = LSDW(init_fw_cb_dma); 307 mbox_cmd[2] = LSDW(init_fw_cb_dma);
374 mbox_cmd[3] = MSDW(init_fw_cb_dma); 308 mbox_cmd[3] = MSDW(init_fw_cb_dma);
309 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
375 310
376 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) != 311 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
377 QLA_SUCCESS) { 312 QLA_SUCCESS) {
378 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", 313 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
379 ha->host_no, __func__)); 314 ha->host_no, __func__));
@@ -384,12 +319,12 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
384 } 319 }
385 320
386 /* Save IP Address. */ 321 /* Save IP Address. */
387 memcpy(ha->ip_address, init_fw_cb->IPAddr, 322 memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
388 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr))); 323 min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
389 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask, 324 memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
390 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask))); 325 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
391 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr, 326 memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
392 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr))); 327 min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
393 328
394 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk), 329 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
395 init_fw_cb, init_fw_cb_dma); 330 init_fw_cb, init_fw_cb_dma);
@@ -409,8 +344,10 @@ int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
409 /* Get firmware version */ 344 /* Get firmware version */
410 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 345 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
411 memset(&mbox_sts, 0, sizeof(mbox_sts)); 346 memset(&mbox_sts, 0, sizeof(mbox_sts));
347
412 mbox_cmd[0] = MBOX_CMD_GET_FW_STATE; 348 mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
413 if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) != 349
350 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
414 QLA_SUCCESS) { 351 QLA_SUCCESS) {
415 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ " 352 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
416 "status %04X\n", ha->host_no, __func__, 353 "status %04X\n", ha->host_no, __func__,
@@ -438,8 +375,10 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
438 /* Get firmware version */ 375 /* Get firmware version */
439 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 376 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
440 memset(&mbox_sts, 0, sizeof(mbox_sts)); 377 memset(&mbox_sts, 0, sizeof(mbox_sts));
378
441 mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS; 379 mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
442 if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) != 380
381 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
443 QLA_SUCCESS) { 382 QLA_SUCCESS) {
444 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ " 383 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
445 "status %04X\n", ha->host_no, __func__, 384 "status %04X\n", ha->host_no, __func__,
@@ -491,11 +430,14 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
491 } 430 }
492 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 431 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
493 memset(&mbox_sts, 0, sizeof(mbox_sts)); 432 memset(&mbox_sts, 0, sizeof(mbox_sts));
433
494 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY; 434 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
495 mbox_cmd[1] = (uint32_t) fw_ddb_index; 435 mbox_cmd[1] = (uint32_t) fw_ddb_index;
496 mbox_cmd[2] = LSDW(fw_ddb_entry_dma); 436 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
497 mbox_cmd[3] = MSDW(fw_ddb_entry_dma); 437 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
498 if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) == 438 mbox_cmd[4] = sizeof(struct dev_db_entry);
439
440 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
499 QLA_ERROR) { 441 QLA_ERROR) {
500 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed" 442 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
501 " with status 0x%04X\n", ha->host_no, __func__, 443 " with status 0x%04X\n", ha->host_no, __func__,
@@ -512,11 +454,11 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
512 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d " 454 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
513 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n", 455 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
514 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], 456 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
515 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0], 457 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
516 fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2], 458 fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
517 fw_ddb_entry->ipAddr[3], 459 fw_ddb_entry->ip_addr[3],
518 le16_to_cpu(fw_ddb_entry->portNumber), 460 le16_to_cpu(fw_ddb_entry->port),
519 fw_ddb_entry->iscsiName); 461 fw_ddb_entry->iscsi_name);
520 } 462 }
521 if (num_valid_ddb_entries) 463 if (num_valid_ddb_entries)
522 *num_valid_ddb_entries = mbox_sts[2]; 464 *num_valid_ddb_entries = mbox_sts[2];
@@ -571,35 +513,10 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
571 mbox_cmd[1] = (uint32_t) fw_ddb_index; 513 mbox_cmd[1] = (uint32_t) fw_ddb_index;
572 mbox_cmd[2] = LSDW(fw_ddb_entry_dma); 514 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
573 mbox_cmd[3] = MSDW(fw_ddb_entry_dma); 515 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
574 return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]); 516 mbox_cmd[4] = sizeof(struct dev_db_entry);
575}
576 517
577#if 0 518 return qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
578int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
579 uint16_t fw_ddb_index)
580{
581 int status = QLA_ERROR;
582 uint32_t mbox_cmd[MBOX_REG_COUNT];
583 uint32_t mbox_sts[MBOX_REG_COUNT];
584
585 /* Do not wait for completion. The firmware will send us an
586 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
587 */
588 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
589 memset(&mbox_sts, 0, sizeof(mbox_sts));
590 mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
591 mbox_cmd[1] = (uint32_t) fw_ddb_index;
592 mbox_cmd[2] = 0;
593 mbox_cmd[3] = 0;
594 mbox_cmd[4] = 0;
595 status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
596 DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
597 __func__, fw_ddb_index, status, mbox_sts[0],
598 mbox_sts[1]);)
599
600 return status;
601} 519}
602#endif /* 0 */
603 520
604/** 521/**
605 * qla4xxx_get_crash_record - retrieves crash record. 522 * qla4xxx_get_crash_record - retrieves crash record.
@@ -614,12 +531,14 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
614 struct crash_record *crash_record = NULL; 531 struct crash_record *crash_record = NULL;
615 dma_addr_t crash_record_dma = 0; 532 dma_addr_t crash_record_dma = 0;
616 uint32_t crash_record_size = 0; 533 uint32_t crash_record_size = 0;
534
617 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 535 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
618 memset(&mbox_sts, 0, sizeof(mbox_cmd)); 536 memset(&mbox_sts, 0, sizeof(mbox_cmd));
619 537
620 /* Get size of crash record. */ 538 /* Get size of crash record. */
621 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; 539 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
622 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) != 540
541 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
623 QLA_SUCCESS) { 542 QLA_SUCCESS) {
624 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n", 543 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
625 ha->host_no, __func__)); 544 ha->host_no, __func__));
@@ -639,11 +558,15 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
639 goto exit_get_crash_record; 558 goto exit_get_crash_record;
640 559
641 /* Get Crash Record. */ 560 /* Get Crash Record. */
561 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
562 memset(&mbox_sts, 0, sizeof(mbox_cmd));
563
642 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; 564 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
643 mbox_cmd[2] = LSDW(crash_record_dma); 565 mbox_cmd[2] = LSDW(crash_record_dma);
644 mbox_cmd[3] = MSDW(crash_record_dma); 566 mbox_cmd[3] = MSDW(crash_record_dma);
645 mbox_cmd[4] = crash_record_size; 567 mbox_cmd[4] = crash_record_size;
646 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) != 568
569 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
647 QLA_SUCCESS) 570 QLA_SUCCESS)
648 goto exit_get_crash_record; 571 goto exit_get_crash_record;
649 572
@@ -655,7 +578,6 @@ exit_get_crash_record:
655 crash_record, crash_record_dma); 578 crash_record, crash_record_dma);
656} 579}
657 580
658#if 0
659/** 581/**
660 * qla4xxx_get_conn_event_log - retrieves connection event log 582 * qla4xxx_get_conn_event_log - retrieves connection event log
661 * @ha: Pointer to host adapter structure. 583 * @ha: Pointer to host adapter structure.
@@ -678,7 +600,8 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
678 600
679 /* Get size of crash record. */ 601 /* Get size of crash record. */
680 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; 602 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
681 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) != 603
604 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
682 QLA_SUCCESS) 605 QLA_SUCCESS)
683 goto exit_get_event_log; 606 goto exit_get_event_log;
684 607
@@ -693,10 +616,14 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
693 goto exit_get_event_log; 616 goto exit_get_event_log;
694 617
695 /* Get Crash Record. */ 618 /* Get Crash Record. */
619 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
620 memset(&mbox_sts, 0, sizeof(mbox_cmd));
621
696 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; 622 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
697 mbox_cmd[2] = LSDW(event_log_dma); 623 mbox_cmd[2] = LSDW(event_log_dma);
698 mbox_cmd[3] = MSDW(event_log_dma); 624 mbox_cmd[3] = MSDW(event_log_dma);
699 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) != 625
626 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
700 QLA_SUCCESS) { 627 QLA_SUCCESS) {
701 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event " 628 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
702 "log!\n", ha->host_no, __func__)); 629 "log!\n", ha->host_no, __func__));
@@ -745,7 +672,6 @@ exit_get_event_log:
745 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log, 672 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
746 event_log_dma); 673 event_log_dma);
747} 674}
748#endif /* 0 */
749 675
750/** 676/**
751 * qla4xxx_reset_lun - issues LUN Reset 677 * qla4xxx_reset_lun - issues LUN Reset
@@ -773,11 +699,13 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
773 */ 699 */
774 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 700 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
775 memset(&mbox_sts, 0, sizeof(mbox_sts)); 701 memset(&mbox_sts, 0, sizeof(mbox_sts));
702
776 mbox_cmd[0] = MBOX_CMD_LUN_RESET; 703 mbox_cmd[0] = MBOX_CMD_LUN_RESET;
777 mbox_cmd[1] = ddb_entry->fw_ddb_index; 704 mbox_cmd[1] = ddb_entry->fw_ddb_index;
778 mbox_cmd[2] = lun << 8; 705 mbox_cmd[2] = lun << 8;
779 mbox_cmd[5] = 0x01; /* Immediate Command Enable */ 706 mbox_cmd[5] = 0x01; /* Immediate Command Enable */
780 qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]); 707
708 qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
781 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && 709 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
782 mbox_sts[0] != MBOX_STS_COMMAND_ERROR) 710 mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
783 status = QLA_ERROR; 711 status = QLA_ERROR;
@@ -794,12 +722,14 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
794 722
795 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 723 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
796 memset(&mbox_sts, 0, sizeof(mbox_sts)); 724 memset(&mbox_sts, 0, sizeof(mbox_sts));
725
797 mbox_cmd[0] = MBOX_CMD_READ_FLASH; 726 mbox_cmd[0] = MBOX_CMD_READ_FLASH;
798 mbox_cmd[1] = LSDW(dma_addr); 727 mbox_cmd[1] = LSDW(dma_addr);
799 mbox_cmd[2] = MSDW(dma_addr); 728 mbox_cmd[2] = MSDW(dma_addr);
800 mbox_cmd[3] = offset; 729 mbox_cmd[3] = offset;
801 mbox_cmd[4] = len; 730 mbox_cmd[4] = len;
802 if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) != 731
732 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
803 QLA_SUCCESS) { 733 QLA_SUCCESS) {
804 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ " 734 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
805 "status %04X %04X, offset %08x, len %08x\n", ha->host_no, 735 "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
@@ -825,8 +755,10 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
825 /* Get firmware version. */ 755 /* Get firmware version. */
826 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 756 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
827 memset(&mbox_sts, 0, sizeof(mbox_sts)); 757 memset(&mbox_sts, 0, sizeof(mbox_sts));
758
828 mbox_cmd[0] = MBOX_CMD_ABOUT_FW; 759 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
829 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) != 760
761 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
830 QLA_SUCCESS) { 762 QLA_SUCCESS) {
831 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ " 763 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
832 "status %04X\n", ha->host_no, __func__, mbox_sts[0])); 764 "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
@@ -855,7 +787,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
855 mbox_cmd[2] = LSDW(dma_addr); 787 mbox_cmd[2] = LSDW(dma_addr);
856 mbox_cmd[3] = MSDW(dma_addr); 788 mbox_cmd[3] = MSDW(dma_addr);
857 789
858 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) != 790 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
859 QLA_SUCCESS) { 791 QLA_SUCCESS) {
860 DEBUG2(printk("scsi%ld: %s: failed status %04X\n", 792 DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
861 ha->host_no, __func__, mbox_sts[0])); 793 ha->host_no, __func__, mbox_sts[0]));
@@ -875,7 +807,7 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
875 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY; 807 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
876 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES; 808 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
877 809
878 if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) != 810 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
879 QLA_SUCCESS) { 811 QLA_SUCCESS) {
880 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) { 812 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
881 *ddb_index = mbox_sts[2]; 813 *ddb_index = mbox_sts[2];
@@ -918,23 +850,23 @@ int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
918 if (ret_val != QLA_SUCCESS) 850 if (ret_val != QLA_SUCCESS)
919 goto qla4xxx_send_tgts_exit; 851 goto qla4xxx_send_tgts_exit;
920 852
921 memset((void *)fw_ddb_entry->iSCSIAlias, 0, 853 memset(fw_ddb_entry->iscsi_alias, 0,
922 sizeof(fw_ddb_entry->iSCSIAlias)); 854 sizeof(fw_ddb_entry->iscsi_alias));
923 855
924 memset((void *)fw_ddb_entry->iscsiName, 0, 856 memset(fw_ddb_entry->iscsi_name, 0,
925 sizeof(fw_ddb_entry->iscsiName)); 857 sizeof(fw_ddb_entry->iscsi_name));
926 858
927 memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr)); 859 memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
928 memset((void *)fw_ddb_entry->targetAddr, 0, 860 memset(fw_ddb_entry->tgt_addr, 0,
929 sizeof(fw_ddb_entry->targetAddr)); 861 sizeof(fw_ddb_entry->tgt_addr));
930 862
931 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET); 863 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
932 fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port)); 864 fw_ddb_entry->port = cpu_to_le16(ntohs(port));
933 865
934 fw_ddb_entry->ipAddr[0] = *ip; 866 fw_ddb_entry->ip_addr[0] = *ip;
935 fw_ddb_entry->ipAddr[1] = *(ip + 1); 867 fw_ddb_entry->ip_addr[1] = *(ip + 1);
936 fw_ddb_entry->ipAddr[2] = *(ip + 2); 868 fw_ddb_entry->ip_addr[2] = *(ip + 2);
937 fw_ddb_entry->ipAddr[3] = *(ip + 3); 869 fw_ddb_entry->ip_addr[3] = *(ip + 3);
938 870
939 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma); 871 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
940 872
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 58afd135aa1d..7fe0482ecf03 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha) 13static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
11{ 14{
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index da21f5fbbf87..e69160a7bc60 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -10,6 +10,10 @@
10#include <scsi/scsicam.h> 10#include <scsi/scsicam.h>
11 11
12#include "ql4_def.h" 12#include "ql4_def.h"
13#include "ql4_version.h"
14#include "ql4_glbl.h"
15#include "ql4_dbg.h"
16#include "ql4_inline.h"
13 17
14/* 18/*
15 * Driver version 19 * Driver version
@@ -50,12 +54,15 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
50/* 54/*
51 * iSCSI template entry points 55 * iSCSI template entry points
52 */ 56 */
53static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no, 57static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
54 uint32_t enable, struct sockaddr *dst_addr); 58 enum iscsi_tgt_dscvr type, uint32_t enable,
59 struct sockaddr *dst_addr);
55static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 60static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
56 enum iscsi_param param, char *buf); 61 enum iscsi_param param, char *buf);
57static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, 62static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
58 enum iscsi_param param, char *buf); 63 enum iscsi_param param, char *buf);
64static int qla4xxx_host_get_param(struct Scsi_Host *shost,
65 enum iscsi_host_param param, char *buf);
59static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag); 66static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
60static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); 67static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
61static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 68static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
@@ -95,16 +102,20 @@ static struct scsi_host_template qla4xxx_driver_template = {
95static struct iscsi_transport qla4xxx_iscsi_transport = { 102static struct iscsi_transport qla4xxx_iscsi_transport = {
96 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
97 .name = DRIVER_NAME, 104 .name = DRIVER_NAME,
98 .param_mask = ISCSI_CONN_PORT | 105 .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD |
99 ISCSI_CONN_ADDRESS | 106 CAP_DATA_PATH_OFFLOAD,
100 ISCSI_TARGET_NAME | 107 .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
101 ISCSI_TPGT, 108 ISCSI_TARGET_NAME | ISCSI_TPGT,
109 .host_param_mask = ISCSI_HOST_HWADDRESS |
110 ISCSI_HOST_IPADDRESS |
111 ISCSI_HOST_INITIATOR_NAME,
102 .sessiondata_size = sizeof(struct ddb_entry), 112 .sessiondata_size = sizeof(struct ddb_entry),
103 .host_template = &qla4xxx_driver_template, 113 .host_template = &qla4xxx_driver_template,
104 114
105 .tgt_dscvr = qla4xxx_tgt_dscvr, 115 .tgt_dscvr = qla4xxx_tgt_dscvr,
106 .get_conn_param = qla4xxx_conn_get_param, 116 .get_conn_param = qla4xxx_conn_get_param,
107 .get_session_param = qla4xxx_sess_get_param, 117 .get_session_param = qla4xxx_sess_get_param,
118 .get_host_param = qla4xxx_host_get_param,
108 .start_conn = qla4xxx_conn_start, 119 .start_conn = qla4xxx_conn_start,
109 .stop_conn = qla4xxx_conn_stop, 120 .stop_conn = qla4xxx_conn_stop,
110 .session_recovery_timedout = qla4xxx_recovery_timedout, 121 .session_recovery_timedout = qla4xxx_recovery_timedout,
@@ -161,6 +172,43 @@ static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
161 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag); 172 printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
162} 173}
163 174
175static ssize_t format_addr(char *buf, const unsigned char *addr, int len)
176{
177 int i;
178 char *cp = buf;
179
180 for (i = 0; i < len; i++)
181 cp += sprintf(cp, "%02x%c", addr[i],
182 i == (len - 1) ? '\n' : ':');
183 return cp - buf;
184}
185
186
187static int qla4xxx_host_get_param(struct Scsi_Host *shost,
188 enum iscsi_host_param param, char *buf)
189{
190 struct scsi_qla_host *ha = to_qla_host(shost);
191 int len;
192
193 switch (param) {
194 case ISCSI_HOST_PARAM_HWADDRESS:
195 len = format_addr(buf, ha->my_mac, MAC_ADDR_LEN);
196 break;
197 case ISCSI_HOST_PARAM_IPADDRESS:
198 len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0],
199 ha->ip_address[1], ha->ip_address[2],
200 ha->ip_address[3]);
201 break;
202 case ISCSI_HOST_PARAM_INITIATOR_NAME:
203 len = sprintf(buf, "%s\n", ha->name_string);
204 break;
205 default:
206 return -ENOSYS;
207 }
208
209 return len;
210}
211
164static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, 212static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
165 enum iscsi_param param, char *buf) 213 enum iscsi_param param, char *buf)
166{ 214{
@@ -208,21 +256,15 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
208 return len; 256 return len;
209} 257}
210 258
211static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no, 259static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
212 uint32_t enable, struct sockaddr *dst_addr) 260 enum iscsi_tgt_dscvr type, uint32_t enable,
261 struct sockaddr *dst_addr)
213{ 262{
214 struct scsi_qla_host *ha; 263 struct scsi_qla_host *ha;
215 struct Scsi_Host *shost;
216 struct sockaddr_in *addr; 264 struct sockaddr_in *addr;
217 struct sockaddr_in6 *addr6; 265 struct sockaddr_in6 *addr6;
218 int ret = 0; 266 int ret = 0;
219 267
220 shost = scsi_host_lookup(host_no);
221 if (IS_ERR(shost)) {
222 printk(KERN_ERR "Could not find host no %u\n", host_no);
223 return -ENODEV;
224 }
225
226 ha = (struct scsi_qla_host *) shost->hostdata; 268 ha = (struct scsi_qla_host *) shost->hostdata;
227 269
228 switch (type) { 270 switch (type) {
@@ -246,8 +288,6 @@ static int qla4xxx_tgt_dscvr(enum iscsi_tgt_dscvr type, uint32_t host_no,
246 default: 288 default:
247 ret = -ENOSYS; 289 ret = -ENOSYS;
248 } 290 }
249
250 scsi_host_put(shost);
251 return ret; 291 return ret;
252} 292}
253 293
@@ -369,14 +409,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
369 struct scsi_cmnd *cmd = srb->cmd; 409 struct scsi_cmnd *cmd = srb->cmd;
370 410
371 if (srb->flags & SRB_DMA_VALID) { 411 if (srb->flags & SRB_DMA_VALID) {
372 if (cmd->use_sg) { 412 scsi_dma_unmap(cmd);
373 pci_unmap_sg(ha->pdev, cmd->request_buffer,
374 cmd->use_sg, cmd->sc_data_direction);
375 } else if (cmd->request_bufflen) {
376 pci_unmap_single(ha->pdev, srb->dma_handle,
377 cmd->request_bufflen,
378 cmd->sc_data_direction);
379 }
380 srb->flags &= ~SRB_DMA_VALID; 413 srb->flags &= ~SRB_DMA_VALID;
381 } 414 }
382 cmd->SCp.ptr = NULL; 415 cmd->SCp.ptr = NULL;
@@ -711,7 +744,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
711 return stat; 744 return stat;
712} 745}
713 746
714static void qla4xxx_hw_reset(struct scsi_qla_host *ha) 747void qla4xxx_hw_reset(struct scsi_qla_host *ha)
715{ 748{
716 uint32_t ctrl_status; 749 uint32_t ctrl_status;
717 unsigned long flags = 0; 750 unsigned long flags = 0;
@@ -1081,13 +1114,13 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1081 if (ha->timer_active) 1114 if (ha->timer_active)
1082 qla4xxx_stop_timer(ha); 1115 qla4xxx_stop_timer(ha);
1083 1116
1084 /* free extra memory */
1085 qla4xxx_mem_free(ha);
1086
1087 /* Detach interrupts */ 1117 /* Detach interrupts */
1088 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) 1118 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1089 free_irq(ha->pdev->irq, ha); 1119 free_irq(ha->pdev->irq, ha);
1090 1120
1121 /* free extra memory */
1122 qla4xxx_mem_free(ha);
1123
1091 pci_disable_device(ha->pdev); 1124 pci_disable_device(ha->pdev);
1092 1125
1093} 1126}
@@ -1332,6 +1365,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1332 1365
1333 ha = pci_get_drvdata(pdev); 1366 ha = pci_get_drvdata(pdev);
1334 1367
1368 qla4xxx_disable_intrs(ha);
1369
1370 while (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
1371 ssleep(1);
1372
1335 /* remove devs from iscsi_sessions to scsi_devices */ 1373 /* remove devs from iscsi_sessions to scsi_devices */
1336 qla4xxx_free_ddb_list(ha); 1374 qla4xxx_free_ddb_list(ha);
1337 1375
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index e5183a697d1f..ab984cb89cea 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,5 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.00.07-k1" 8#define QLA4XXX_DRIVER_VERSION "5.01.00-k8"
9
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 2e7db18f5aef..2bfbf26c00ed 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -265,8 +265,6 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
265 unsigned int message; /* scsi returned message */ 265 unsigned int message; /* scsi returned message */
266 unsigned int phase; /* recorded scsi phase */ 266 unsigned int phase; /* recorded scsi phase */
267 unsigned int reqlen; /* total length of transfer */ 267 unsigned int reqlen; /* total length of transfer */
268 struct scatterlist *sglist; /* scatter-gather list pointer */
269 unsigned int sgcount; /* sg counter */
270 char *buf; 268 char *buf;
271 struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); 269 struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
272 int qbase = priv->qbase; 270 int qbase = priv->qbase;
@@ -301,9 +299,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
301 if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */ 299 if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
302 outb(1, qbase + 3); /* clear fifo */ 300 outb(1, qbase + 3); /* clear fifo */
303 /* note that request_bufflen is the total xfer size when sg is used */ 301 /* note that request_bufflen is the total xfer size when sg is used */
304 reqlen = cmd->request_bufflen; 302 reqlen = scsi_bufflen(cmd);
305 /* note that it won't work if transfers > 16M are requested */ 303 /* note that it won't work if transfers > 16M are requested */
306 if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */ 304 if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
305 struct scatterlist *sg;
307 rtrc(2) 306 rtrc(2)
308 outb(reqlen, qbase); /* low-mid xfer cnt */ 307 outb(reqlen, qbase); /* low-mid xfer cnt */
309 outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */ 308 outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
@@ -311,23 +310,16 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
311 outb(0x90, qbase + 3); /* command do xfer */ 310 outb(0x90, qbase + 3); /* command do xfer */
312 /* PIO pseudo DMA to buffer or sglist */ 311 /* PIO pseudo DMA to buffer or sglist */
313 REG1; 312 REG1;
314 if (!cmd->use_sg) 313
315 ql_pdma(priv, phase, cmd->request_buffer, 314 scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
316 cmd->request_bufflen); 315 if (priv->qabort) {
317 else { 316 REG0;
318 sgcount = cmd->use_sg; 317 return ((priv->qabort == 1 ?
319 sglist = cmd->request_buffer; 318 DID_ABORT : DID_RESET) << 16);
320 while (sgcount--) {
321 if (priv->qabort) {
322 REG0;
323 return ((priv->qabort == 1 ?
324 DID_ABORT : DID_RESET) << 16);
325 }
326 buf = page_address(sglist->page) + sglist->offset;
327 if (ql_pdma(priv, phase, buf, sglist->length))
328 break;
329 sglist++;
330 } 319 }
320 buf = page_address(sg->page) + sg->offset;
321 if (ql_pdma(priv, phase, buf, sg->length))
322 break;
331 } 323 }
332 REG0; 324 REG0;
333 rtrc(2) 325 rtrc(2)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 4c1e31334765..a691dda40d2c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -368,7 +368,7 @@ void scsi_log_send(struct scsi_cmnd *cmd)
368 if (level > 3) { 368 if (level > 3) {
369 printk(KERN_INFO "buffer = 0x%p, bufflen = %d," 369 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
370 " done = 0x%p, queuecommand 0x%p\n", 370 " done = 0x%p, queuecommand 0x%p\n",
371 cmd->request_buffer, cmd->request_bufflen, 371 scsi_sglist(cmd), scsi_bufflen(cmd),
372 cmd->done, 372 cmd->done,
373 cmd->device->host->hostt->queuecommand); 373 cmd->device->host->hostt->queuecommand);
374 374
@@ -1016,52 +1016,6 @@ struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1016} 1016}
1017EXPORT_SYMBOL(scsi_device_lookup); 1017EXPORT_SYMBOL(scsi_device_lookup);
1018 1018
1019/**
1020 * scsi_device_cancel - cancel outstanding IO to this device
1021 * @sdev: Pointer to struct scsi_device
1022 * @recovery: Boolean instructing function to recover device or not.
1023 *
1024 **/
1025int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1026{
1027 struct scsi_cmnd *scmd;
1028 LIST_HEAD(active_list);
1029 struct list_head *lh, *lh_sf;
1030 unsigned long flags;
1031
1032 scsi_device_set_state(sdev, SDEV_CANCEL);
1033
1034 spin_lock_irqsave(&sdev->list_lock, flags);
1035 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1036 if (scmd->request) {
1037 /*
1038 * If we are unable to remove the timer, it means
1039 * that the command has already timed out or
1040 * finished.
1041 */
1042 if (!scsi_delete_timer(scmd))
1043 continue;
1044 list_add_tail(&scmd->eh_entry, &active_list);
1045 }
1046 }
1047 spin_unlock_irqrestore(&sdev->list_lock, flags);
1048
1049 if (!list_empty(&active_list)) {
1050 list_for_each_safe(lh, lh_sf, &active_list) {
1051 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1052 list_del_init(lh);
1053 if (recovery &&
1054 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1055 scmd->result = (DID_ABORT << 16);
1056 scsi_finish_command(scmd);
1057 }
1058 }
1059 }
1060
1061 return 0;
1062}
1063EXPORT_SYMBOL(scsi_device_cancel);
1064
1065MODULE_DESCRIPTION("SCSI core"); 1019MODULE_DESCRIPTION("SCSI core");
1066MODULE_LICENSE("GPL"); 1020MODULE_LICENSE("GPL");
1067 1021
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 06229f225ee9..4cd9c58efef1 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -2405,7 +2405,7 @@ MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2405MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); 2405MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2406MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); 2406MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2407MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); 2407MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2408MODULE_PARM_DESC(every_nth, "timeout every nth command(def=100)"); 2408MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2409MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); 2409MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2410MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); 2410MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2411MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); 2411MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 18dd5cc4d7c6..19c44f0781fd 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -128,6 +128,7 @@ static struct {
128 {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, 128 {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
129 {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, 129 {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
130 {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, 130 {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
131 {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN},
131 {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN}, 132 {"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
132 {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN}, 133 {"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
133 {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36}, 134 {"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e8350c562d24..9adb64ac054c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -18,12 +18,12 @@
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/kthread.h> 22#include <linux/kthread.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
25#include <linux/blkdev.h> 24#include <linux/blkdev.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/scatterlist.h>
27 27
28#include <scsi/scsi.h> 28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -640,16 +640,8 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
640 memcpy(scmd->cmnd, cmnd, cmnd_size); 640 memcpy(scmd->cmnd, cmnd, cmnd_size);
641 641
642 if (copy_sense) { 642 if (copy_sense) {
643 gfp_t gfp_mask = GFP_ATOMIC; 643 sg_init_one(&sgl, scmd->sense_buffer,
644 644 sizeof(scmd->sense_buffer));
645 if (shost->hostt->unchecked_isa_dma)
646 gfp_mask |= __GFP_DMA;
647
648 sgl.page = alloc_page(gfp_mask);
649 if (!sgl.page)
650 return FAILED;
651 sgl.offset = 0;
652 sgl.length = 252;
653 645
654 scmd->sc_data_direction = DMA_FROM_DEVICE; 646 scmd->sc_data_direction = DMA_FROM_DEVICE;
655 scmd->request_bufflen = sgl.length; 647 scmd->request_bufflen = sgl.length;
@@ -720,18 +712,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
720 712
721 713
722 /* 714 /*
723 * Last chance to have valid sense data.
724 */
725 if (copy_sense) {
726 if (!SCSI_SENSE_VALID(scmd)) {
727 memcpy(scmd->sense_buffer, page_address(sgl.page),
728 sizeof(scmd->sense_buffer));
729 }
730 __free_page(sgl.page);
731 }
732
733
734 /*
735 * Restore original data 715 * Restore original data
736 */ 716 */
737 scmd->request_buffer = old_buffer; 717 scmd->request_buffer = old_buffer;
diff --git a/drivers/scsi/scsi_lib_dma.c b/drivers/scsi/scsi_lib_dma.c
new file mode 100644
index 000000000000..ac6855cd2657
--- /dev/null
+++ b/drivers/scsi/scsi_lib_dma.c
@@ -0,0 +1,50 @@
1/*
2 * SCSI library functions depending on DMA
3 */
4
5#include <linux/blkdev.h>
6#include <linux/device.h>
7#include <linux/kernel.h>
8
9#include <scsi/scsi.h>
10#include <scsi/scsi_cmnd.h>
11#include <scsi/scsi_device.h>
12#include <scsi/scsi_host.h>
13
14/**
15 * scsi_dma_map - perform DMA mapping against command's sg lists
16 * @cmd: scsi command
17 *
18 * Returns the number of sg lists actually used, zero if the sg lists
19 * is NULL, or -ENOMEM if the mapping failed.
20 */
21int scsi_dma_map(struct scsi_cmnd *cmd)
22{
23 int nseg = 0;
24
25 if (scsi_sg_count(cmd)) {
26 struct device *dev = cmd->device->host->shost_gendev.parent;
27
28 nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
29 cmd->sc_data_direction);
30 if (unlikely(!nseg))
31 return -ENOMEM;
32 }
33 return nseg;
34}
35EXPORT_SYMBOL(scsi_dma_map);
36
37/**
38 * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
39 * @cmd: scsi command
40 */
41void scsi_dma_unmap(struct scsi_cmnd *cmd)
42{
43 if (scsi_sg_count(cmd)) {
44 struct device *dev = cmd->device->host->shost_gendev.parent;
45
46 dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
47 cmd->sc_data_direction);
48 }
49}
50EXPORT_SYMBOL(scsi_dma_unmap);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 662577fbe7a8..a86e62f4b3ba 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -703,16 +703,14 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
703 703
704/** 704/**
705 * scsi_add_lun - allocate and fully initialze a scsi_device 705 * scsi_add_lun - allocate and fully initialze a scsi_device
706 * @sdevscan: holds information to be stored in the new scsi_device 706 * @sdev: holds information to be stored in the new scsi_device
707 * @sdevnew: store the address of the newly allocated scsi_device
708 * @inq_result: holds the result of a previous INQUIRY to the LUN 707 * @inq_result: holds the result of a previous INQUIRY to the LUN
709 * @bflags: black/white list flag 708 * @bflags: black/white list flag
709 * @async: 1 if this device is being scanned asynchronously
710 * 710 *
711 * Description: 711 * Description:
712 * Allocate and initialize a scsi_device matching sdevscan. Optionally 712 * Initialize the scsi_device @sdev. Optionally set fields based
713 * set fields based on values in *@bflags. If @sdevnew is not 713 * on values in *@bflags.
714 * NULL, store the address of the new scsi_device in *@sdevnew (needed
715 * when scanning a particular LUN).
716 * 714 *
717 * Return: 715 * Return:
718 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 716 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
@@ -752,25 +750,15 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
752 sdev->rev = (char *) (sdev->inquiry + 32); 750 sdev->rev = (char *) (sdev->inquiry + 32);
753 751
754 if (*bflags & BLIST_ISROM) { 752 if (*bflags & BLIST_ISROM) {
755 /* 753 sdev->type = TYPE_ROM;
756 * It would be better to modify sdev->type, and set 754 sdev->removable = 1;
757 * sdev->removable; this can now be done since 755 } else {
758 * print_inquiry has gone away. 756 sdev->type = (inq_result[0] & 0x1f);
759 */ 757 sdev->removable = (inq_result[1] & 0x80) >> 7;
760 inq_result[0] = TYPE_ROM; 758 }
761 inq_result[1] |= 0x80; /* removable */
762 } else if (*bflags & BLIST_NO_ULD_ATTACH)
763 sdev->no_uld_attach = 1;
764 759
765 switch (sdev->type = (inq_result[0] & 0x1f)) { 760 switch (sdev->type) {
766 case TYPE_RBC: 761 case TYPE_RBC:
767 /* RBC devices can return SCSI-3 compliance and yet
768 * still not support REPORT LUNS, so make them act as
769 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
770 * specifically set */
771 if ((*bflags & BLIST_REPORTLUN2) == 0)
772 *bflags |= BLIST_NOREPORTLUN;
773 /* fall through */
774 case TYPE_TAPE: 762 case TYPE_TAPE:
775 case TYPE_DISK: 763 case TYPE_DISK:
776 case TYPE_PRINTER: 764 case TYPE_PRINTER:
@@ -784,13 +772,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
784 sdev->writeable = 1; 772 sdev->writeable = 1;
785 break; 773 break;
786 case TYPE_ROM: 774 case TYPE_ROM:
787 /* MMC devices can return SCSI-3 compliance and yet
788 * still not support REPORT LUNS, so make them act as
789 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
790 * specifically set */
791 if ((*bflags & BLIST_REPORTLUN2) == 0)
792 *bflags |= BLIST_NOREPORTLUN;
793 /* fall through */
794 case TYPE_WORM: 775 case TYPE_WORM:
795 sdev->writeable = 0; 776 sdev->writeable = 0;
796 break; 777 break;
@@ -798,6 +779,15 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
798 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type); 779 printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
799 } 780 }
800 781
782 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
783 /* RBC and MMC devices can return SCSI-3 compliance and yet
784 * still not support REPORT LUNS, so make them act as
785 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
786 * specifically set */
787 if ((*bflags & BLIST_REPORTLUN2) == 0)
788 *bflags |= BLIST_NOREPORTLUN;
789 }
790
801 /* 791 /*
802 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 792 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
803 * spec says: The device server is capable of supporting the 793 * spec says: The device server is capable of supporting the
@@ -815,12 +805,11 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
815 */ 805 */
816 806
817 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; 807 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
818 sdev->removable = (0x80 & inq_result[1]) >> 7;
819 sdev->lockable = sdev->removable; 808 sdev->lockable = sdev->removable;
820 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); 809 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
821 810
822 if (sdev->scsi_level >= SCSI_3 || (sdev->inquiry_len > 56 && 811 if (sdev->scsi_level >= SCSI_3 ||
823 inq_result[56] & 0x04)) 812 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
824 sdev->ppr = 1; 813 sdev->ppr = 1;
825 if (inq_result[7] & 0x60) 814 if (inq_result[7] & 0x60)
826 sdev->wdtr = 1; 815 sdev->wdtr = 1;
@@ -833,13 +822,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
833 sdev->inq_periph_qual, inq_result[2] & 0x07, 822 sdev->inq_periph_qual, inq_result[2] & 0x07,
834 (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); 823 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
835 824
836 /*
837 * End sysfs code.
838 */
839
840 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && 825 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
841 !(*bflags & BLIST_NOTQ)) 826 !(*bflags & BLIST_NOTQ))
842 sdev->tagged_supported = 1; 827 sdev->tagged_supported = 1;
828
843 /* 829 /*
844 * Some devices (Texel CD ROM drives) have handshaking problems 830 * Some devices (Texel CD ROM drives) have handshaking problems
845 * when used with the Seagate controllers. borken is initialized 831 * when used with the Seagate controllers. borken is initialized
@@ -848,6 +834,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
848 if ((*bflags & BLIST_BORKEN) == 0) 834 if ((*bflags & BLIST_BORKEN) == 0)
849 sdev->borken = 0; 835 sdev->borken = 0;
850 836
837 if (*bflags & BLIST_NO_ULD_ATTACH)
838 sdev->no_uld_attach = 1;
839
851 /* 840 /*
852 * Apparently some really broken devices (contrary to the SCSI 841 * Apparently some really broken devices (contrary to the SCSI
853 * standards) need to be selected without asserting ATN 842 * standards) need to be selected without asserting ATN
@@ -872,7 +861,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
872 if (*bflags & BLIST_SINGLELUN) 861 if (*bflags & BLIST_SINGLELUN)
873 sdev->single_lun = 1; 862 sdev->single_lun = 1;
874 863
875
876 sdev->use_10_for_rw = 1; 864 sdev->use_10_for_rw = 1;
877 865
878 if (*bflags & BLIST_MS_SKIP_PAGE_08) 866 if (*bflags & BLIST_MS_SKIP_PAGE_08)
@@ -1213,7 +1201,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
1213 * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns 1201 * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns
1214 * the integer: 0x0b030a04 1202 * the integer: 0x0b030a04
1215 **/ 1203 **/
1216static int scsilun_to_int(struct scsi_lun *scsilun) 1204int scsilun_to_int(struct scsi_lun *scsilun)
1217{ 1205{
1218 int i; 1206 int i;
1219 unsigned int lun; 1207 unsigned int lun;
@@ -1224,6 +1212,7 @@ static int scsilun_to_int(struct scsi_lun *scsilun)
1224 scsilun->scsi_lun[i + 1]) << (i * 8)); 1212 scsilun->scsi_lun[i + 1]) << (i * 8));
1225 return lun; 1213 return lun;
1226} 1214}
1215EXPORT_SYMBOL(scsilun_to_int);
1227 1216
1228/** 1217/**
1229 * int_to_scsilun: reverts an int into a scsi_lun 1218 * int_to_scsilun: reverts an int into a scsi_lun
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67a38a1409ba..ed720863ab97 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -293,30 +293,18 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
293{ 293{
294 struct device_driver *drv = dev->driver; 294 struct device_driver *drv = dev->driver;
295 struct scsi_device *sdev = to_scsi_device(dev); 295 struct scsi_device *sdev = to_scsi_device(dev);
296 struct scsi_host_template *sht = sdev->host->hostt;
297 int err; 296 int err;
298 297
299 err = scsi_device_quiesce(sdev); 298 err = scsi_device_quiesce(sdev);
300 if (err) 299 if (err)
301 return err; 300 return err;
302 301
303 /* call HLD suspend first */
304 if (drv && drv->suspend) { 302 if (drv && drv->suspend) {
305 err = drv->suspend(dev, state); 303 err = drv->suspend(dev, state);
306 if (err) 304 if (err)
307 return err; 305 return err;
308 } 306 }
309 307
310 /* then, call host suspend */
311 if (sht->suspend) {
312 err = sht->suspend(sdev, state);
313 if (err) {
314 if (drv && drv->resume)
315 drv->resume(dev);
316 return err;
317 }
318 }
319
320 return 0; 308 return 0;
321} 309}
322 310
@@ -324,21 +312,14 @@ static int scsi_bus_resume(struct device * dev)
324{ 312{
325 struct device_driver *drv = dev->driver; 313 struct device_driver *drv = dev->driver;
326 struct scsi_device *sdev = to_scsi_device(dev); 314 struct scsi_device *sdev = to_scsi_device(dev);
327 struct scsi_host_template *sht = sdev->host->hostt; 315 int err = 0;
328 int err = 0, err2 = 0;
329
330 /* call host resume first */
331 if (sht->resume)
332 err = sht->resume(sdev);
333 316
334 /* then, call HLD resume */
335 if (drv && drv->resume) 317 if (drv && drv->resume)
336 err2 = drv->resume(dev); 318 err = drv->resume(dev);
337 319
338 scsi_device_resume(sdev); 320 scsi_device_resume(sdev);
339 321
340 /* favor LLD failure */ 322 return err;
341 return err ? err : err2;;
342} 323}
343 324
344struct bus_type scsi_bus_type = { 325struct bus_type scsi_bus_type = {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b4d1ece46f78..e8825709797e 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * FiberChannel transport specific attributes exported to sysfs. 2 * FiberChannel transport specific attributes exported to sysfs.
3 * 3 *
4 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 4 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
@@ -19,9 +19,10 @@
19 * 19 *
20 * ======== 20 * ========
21 * 21 *
22 * Copyright (C) 2004-2005 James Smart, Emulex Corporation 22 * Copyright (C) 2004-2007 James Smart, Emulex Corporation
23 * Rewrite for host, target, device, and remote port attributes, 23 * Rewrite for host, target, device, and remote port attributes,
24 * statistics, and service functions... 24 * statistics, and service functions...
25 * Add vports, etc
25 * 26 *
26 */ 27 */
27#include <linux/module.h> 28#include <linux/module.h>
@@ -37,6 +38,34 @@
37#include "scsi_priv.h" 38#include "scsi_priv.h"
38 39
39static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 40static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
41static void fc_vport_sched_delete(struct work_struct *work);
42
43/*
44 * This is a temporary carrier for creating a vport. It will eventually
45 * be replaced by a real message definition for sgio or netlink.
46 *
47 * fc_vport_identifiers: This set of data contains all elements
48 * to uniquely identify and instantiate a FC virtual port.
49 *
50 * Notes:
51 * symbolic_name: The driver is to append the symbolic_name string data
52 * to the symbolic_node_name data that it generates by default.
53 * the resulting combination should then be registered with the switch.
54 * It is expected that things like Xen may stuff a VM title into
55 * this field.
56 */
57struct fc_vport_identifiers {
58 u64 node_name;
59 u64 port_name;
60 u32 roles;
61 bool disable;
62 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
63 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
64};
65
66static int fc_vport_create(struct Scsi_Host *shost, int channel,
67 struct device *pdev, struct fc_vport_identifiers *ids,
68 struct fc_vport **vport);
40 69
41/* 70/*
42 * Redefine so that we can have same named attributes in the 71 * Redefine so that we can have same named attributes in the
@@ -90,10 +119,14 @@ static struct {
90 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, 119 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
91 { FC_PORTTYPE_LPORT, "LPort (private loop)" }, 120 { FC_PORTTYPE_LPORT, "LPort (private loop)" },
92 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" }, 121 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" },
122 { FC_PORTTYPE_NPIV, "NPIV VPORT" },
93}; 123};
94fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) 124fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
95#define FC_PORTTYPE_MAX_NAMELEN 50 125#define FC_PORTTYPE_MAX_NAMELEN 50
96 126
127/* Reuse fc_port_type enum function for vport_type */
128#define get_fc_vport_type_name get_fc_port_type_name
129
97 130
98/* Convert fc_host_event_code values to ascii string name */ 131/* Convert fc_host_event_code values to ascii string name */
99static const struct { 132static const struct {
@@ -139,6 +172,29 @@ fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
139#define FC_PORTSTATE_MAX_NAMELEN 20 172#define FC_PORTSTATE_MAX_NAMELEN 20
140 173
141 174
175/* Convert fc_vport_state values to ascii string name */
176static struct {
177 enum fc_vport_state value;
178 char *name;
179} fc_vport_state_names[] = {
180 { FC_VPORT_UNKNOWN, "Unknown" },
181 { FC_VPORT_ACTIVE, "Active" },
182 { FC_VPORT_DISABLED, "Disabled" },
183 { FC_VPORT_LINKDOWN, "Linkdown" },
184 { FC_VPORT_INITIALIZING, "Initializing" },
185 { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
186 { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
187 { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
188 { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
189 { FC_VPORT_FAILED, "VPort Failed" },
190};
191fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
192#define FC_VPORTSTATE_MAX_NAMELEN 24
193
194/* Reuse fc_vport_state enum function for vport_last_state */
195#define get_fc_vport_last_state_name get_fc_vport_state_name
196
197
142/* Convert fc_tgtid_binding_type values to ascii string name */ 198/* Convert fc_tgtid_binding_type values to ascii string name */
143static const struct { 199static const struct {
144 enum fc_tgtid_binding_type value; 200 enum fc_tgtid_binding_type value;
@@ -219,16 +275,16 @@ show_fc_fc4s (char *buf, u8 *fc4_list)
219} 275}
220 276
221 277
222/* Convert FC_RPORT_ROLE bit values to ascii string name */ 278/* Convert FC_PORT_ROLE bit values to ascii string name */
223static const struct { 279static const struct {
224 u32 value; 280 u32 value;
225 char *name; 281 char *name;
226} fc_remote_port_role_names[] = { 282} fc_port_role_names[] = {
227 { FC_RPORT_ROLE_FCP_TARGET, "FCP Target" }, 283 { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
228 { FC_RPORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, 284 { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
229 { FC_RPORT_ROLE_IP_PORT, "IP Port" }, 285 { FC_PORT_ROLE_IP_PORT, "IP Port" },
230}; 286};
231fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) 287fc_bitfield_name_search(port_roles, fc_port_role_names)
232 288
233/* 289/*
234 * Define roles that are specific to port_id. Values are relative to ROLE_MASK. 290 * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
@@ -252,7 +308,8 @@ static void fc_scsi_scan_rport(struct work_struct *work);
252 */ 308 */
253#define FC_STARGET_NUM_ATTRS 3 309#define FC_STARGET_NUM_ATTRS 3
254#define FC_RPORT_NUM_ATTRS 10 310#define FC_RPORT_NUM_ATTRS 10
255#define FC_HOST_NUM_ATTRS 17 311#define FC_VPORT_NUM_ATTRS 9
312#define FC_HOST_NUM_ATTRS 21
256 313
257struct fc_internal { 314struct fc_internal {
258 struct scsi_transport_template t; 315 struct scsi_transport_template t;
@@ -278,6 +335,10 @@ struct fc_internal {
278 struct transport_container rport_attr_cont; 335 struct transport_container rport_attr_cont;
279 struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; 336 struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
280 struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; 337 struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
338
339 struct transport_container vport_attr_cont;
340 struct class_device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
341 struct class_device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
281}; 342};
282 343
283#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) 344#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
@@ -318,7 +379,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
318 struct Scsi_Host *shost = dev_to_shost(dev); 379 struct Scsi_Host *shost = dev_to_shost(dev);
319 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 380 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
320 381
321 /* 382 /*
322 * Set default values easily detected by the midlayer as 383 * Set default values easily detected by the midlayer as
323 * failure cases. The scsi lldd is responsible for initializing 384 * failure cases. The scsi lldd is responsible for initializing
324 * all transport attributes to valid values per host. 385 * all transport attributes to valid values per host.
@@ -331,6 +392,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
331 sizeof(fc_host->supported_fc4s)); 392 sizeof(fc_host->supported_fc4s));
332 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; 393 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
333 fc_host->maxframe_size = -1; 394 fc_host->maxframe_size = -1;
395 fc_host->max_npiv_vports = 0;
334 memset(fc_host->serial_number, 0, 396 memset(fc_host->serial_number, 0,
335 sizeof(fc_host->serial_number)); 397 sizeof(fc_host->serial_number));
336 398
@@ -348,8 +410,11 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
348 410
349 INIT_LIST_HEAD(&fc_host->rports); 411 INIT_LIST_HEAD(&fc_host->rports);
350 INIT_LIST_HEAD(&fc_host->rport_bindings); 412 INIT_LIST_HEAD(&fc_host->rport_bindings);
413 INIT_LIST_HEAD(&fc_host->vports);
351 fc_host->next_rport_number = 0; 414 fc_host->next_rport_number = 0;
352 fc_host->next_target_id = 0; 415 fc_host->next_target_id = 0;
416 fc_host->next_vport_number = 0;
417 fc_host->npiv_vports_inuse = 0;
353 418
354 snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d", 419 snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d",
355 shost->host_no); 420 shost->host_no);
@@ -388,6 +453,16 @@ static DECLARE_TRANSPORT_CLASS(fc_rport_class,
388 NULL); 453 NULL);
389 454
390/* 455/*
456 * Setup and Remove actions for virtual ports are handled
457 * in the service functions below.
458 */
459static DECLARE_TRANSPORT_CLASS(fc_vport_class,
460 "fc_vports",
461 NULL,
462 NULL,
463 NULL);
464
465/*
391 * Module Parameters 466 * Module Parameters
392 */ 467 */
393 468
@@ -585,6 +660,9 @@ static __init int fc_transport_init(void)
585 error = transport_class_register(&fc_host_class); 660 error = transport_class_register(&fc_host_class);
586 if (error) 661 if (error)
587 return error; 662 return error;
663 error = transport_class_register(&fc_vport_class);
664 if (error)
665 return error;
588 error = transport_class_register(&fc_rport_class); 666 error = transport_class_register(&fc_rport_class);
589 if (error) 667 if (error)
590 return error; 668 return error;
@@ -596,6 +674,7 @@ static void __exit fc_transport_exit(void)
596 transport_class_unregister(&fc_transport_class); 674 transport_class_unregister(&fc_transport_class);
597 transport_class_unregister(&fc_rport_class); 675 transport_class_unregister(&fc_rport_class);
598 transport_class_unregister(&fc_host_class); 676 transport_class_unregister(&fc_host_class);
677 transport_class_unregister(&fc_vport_class);
599} 678}
600 679
601/* 680/*
@@ -800,9 +879,9 @@ show_fc_rport_roles (struct class_device *cdev, char *buf)
800 return snprintf(buf, 30, "Unknown Fabric Entity\n"); 879 return snprintf(buf, 30, "Unknown Fabric Entity\n");
801 } 880 }
802 } else { 881 } else {
803 if (rport->roles == FC_RPORT_ROLE_UNKNOWN) 882 if (rport->roles == FC_PORT_ROLE_UNKNOWN)
804 return snprintf(buf, 20, "unknown\n"); 883 return snprintf(buf, 20, "unknown\n");
805 return get_fc_remote_port_roles_names(rport->roles, buf); 884 return get_fc_port_roles_names(rport->roles, buf);
806 } 885 }
807} 886}
808static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO, 887static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
@@ -857,7 +936,7 @@ static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
857 936
858/* 937/*
859 * Note: in the target show function we recognize when the remote 938 * Note: in the target show function we recognize when the remote
860 * port is in the hierarchy and do not allow the driver to get 939 * port is in the heirarchy and do not allow the driver to get
861 * involved in sysfs functions. The driver only gets involved if 940 * involved in sysfs functions. The driver only gets involved if
862 * it's the "old" style that doesn't use rports. 941 * it's the "old" style that doesn't use rports.
863 */ 942 */
@@ -912,6 +991,257 @@ fc_starget_rd_attr(port_id, "0x%06x\n", 20);
912 991
913 992
914/* 993/*
994 * FC Virtual Port Attribute Management
995 */
996
997#define fc_vport_show_function(field, format_string, sz, cast) \
998static ssize_t \
999show_fc_vport_##field (struct class_device *cdev, char *buf) \
1000{ \
1001 struct fc_vport *vport = transport_class_to_vport(cdev); \
1002 struct Scsi_Host *shost = vport_to_shost(vport); \
1003 struct fc_internal *i = to_fc_internal(shost->transportt); \
1004 if ((i->f->get_vport_##field) && \
1005 !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
1006 i->f->get_vport_##field(vport); \
1007 return snprintf(buf, sz, format_string, cast vport->field); \
1008}
1009
1010#define fc_vport_store_function(field) \
1011static ssize_t \
1012store_fc_vport_##field(struct class_device *cdev, const char *buf, \
1013 size_t count) \
1014{ \
1015 int val; \
1016 struct fc_vport *vport = transport_class_to_vport(cdev); \
1017 struct Scsi_Host *shost = vport_to_shost(vport); \
1018 struct fc_internal *i = to_fc_internal(shost->transportt); \
1019 char *cp; \
1020 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1021 return -EBUSY; \
1022 val = simple_strtoul(buf, &cp, 0); \
1023 if (*cp && (*cp != '\n')) \
1024 return -EINVAL; \
1025 i->f->set_vport_##field(vport, val); \
1026 return count; \
1027}
1028
1029#define fc_vport_store_str_function(field, slen) \
1030static ssize_t \
1031store_fc_vport_##field(struct class_device *cdev, const char *buf, \
1032 size_t count) \
1033{ \
1034 struct fc_vport *vport = transport_class_to_vport(cdev); \
1035 struct Scsi_Host *shost = vport_to_shost(vport); \
1036 struct fc_internal *i = to_fc_internal(shost->transportt); \
1037 unsigned int cnt=count; \
1038 \
1039 /* count may include a LF at end of string */ \
1040 if (buf[cnt-1] == '\n') \
1041 cnt--; \
1042 if (cnt > ((slen) - 1)) \
1043 return -EINVAL; \
1044 memcpy(vport->field, buf, cnt); \
1045 i->f->set_vport_##field(vport); \
1046 return count; \
1047}
1048
1049#define fc_vport_rd_attr(field, format_string, sz) \
1050 fc_vport_show_function(field, format_string, sz, ) \
1051static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1052 show_fc_vport_##field, NULL)
1053
1054#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
1055 fc_vport_show_function(field, format_string, sz, (cast)) \
1056static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1057 show_fc_vport_##field, NULL)
1058
1059#define fc_vport_rw_attr(field, format_string, sz) \
1060 fc_vport_show_function(field, format_string, sz, ) \
1061 fc_vport_store_function(field) \
1062static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1063 show_fc_vport_##field, \
1064 store_fc_vport_##field)
1065
1066#define fc_private_vport_show_function(field, format_string, sz, cast) \
1067static ssize_t \
1068show_fc_vport_##field (struct class_device *cdev, char *buf) \
1069{ \
1070 struct fc_vport *vport = transport_class_to_vport(cdev); \
1071 return snprintf(buf, sz, format_string, cast vport->field); \
1072}
1073
1074#define fc_private_vport_store_u32_function(field) \
1075static ssize_t \
1076store_fc_vport_##field(struct class_device *cdev, const char *buf, \
1077 size_t count) \
1078{ \
1079 u32 val; \
1080 struct fc_vport *vport = transport_class_to_vport(cdev); \
1081 char *cp; \
1082 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1083 return -EBUSY; \
1084 val = simple_strtoul(buf, &cp, 0); \
1085 if (*cp && (*cp != '\n')) \
1086 return -EINVAL; \
1087 vport->field = val; \
1088 return count; \
1089}
1090
1091
1092#define fc_private_vport_rd_attr(field, format_string, sz) \
1093 fc_private_vport_show_function(field, format_string, sz, ) \
1094static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1095 show_fc_vport_##field, NULL)
1096
1097#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
1098 fc_private_vport_show_function(field, format_string, sz, (cast)) \
1099static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1100 show_fc_vport_##field, NULL)
1101
1102#define fc_private_vport_rw_u32_attr(field, format_string, sz) \
1103 fc_private_vport_show_function(field, format_string, sz, ) \
1104 fc_private_vport_store_u32_function(field) \
1105static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1106 show_fc_vport_##field, \
1107 store_fc_vport_##field)
1108
1109
1110#define fc_private_vport_rd_enum_attr(title, maxlen) \
1111static ssize_t \
1112show_fc_vport_##title (struct class_device *cdev, char *buf) \
1113{ \
1114 struct fc_vport *vport = transport_class_to_vport(cdev); \
1115 const char *name; \
1116 name = get_fc_##title##_name(vport->title); \
1117 if (!name) \
1118 return -EINVAL; \
1119 return snprintf(buf, maxlen, "%s\n", name); \
1120} \
1121static FC_CLASS_DEVICE_ATTR(vport, title, S_IRUGO, \
1122 show_fc_vport_##title, NULL)
1123
1124
1125#define SETUP_VPORT_ATTRIBUTE_RD(field) \
1126 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1127 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1128 i->private_vport_attrs[count].store = NULL; \
1129 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1130 if (i->f->get_##field) \
1131 count++
1132 /* NOTE: Above MACRO differs: checks function not show bit */
1133
1134#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
1135 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1136 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1137 i->private_vport_attrs[count].store = NULL; \
1138 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1139 count++
1140
1141#define SETUP_VPORT_ATTRIBUTE_WR(field) \
1142 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1143 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1144 if (i->f->field) \
1145 count++
1146 /* NOTE: Above MACRO differs: checks function */
1147
1148#define SETUP_VPORT_ATTRIBUTE_RW(field) \
1149 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1150 if (!i->f->set_vport_##field) { \
1151 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1152 i->private_vport_attrs[count].store = NULL; \
1153 } \
1154 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1155 count++
1156 /* NOTE: Above MACRO differs: does not check show bit */
1157
1158#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
1159{ \
1160 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1161 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1162 count++; \
1163}
1164
1165
1166/* The FC Transport Virtual Port Attributes: */
1167
1168/* Fixed Virtual Port Attributes */
1169
1170/* Dynamic Virtual Port Attributes */
1171
1172/* Private Virtual Port Attributes */
1173
1174fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1175fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1176fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1177fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1178
1179static ssize_t
1180show_fc_vport_roles (struct class_device *cdev, char *buf)
1181{
1182 struct fc_vport *vport = transport_class_to_vport(cdev);
1183
1184 if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1185 return snprintf(buf, 20, "unknown\n");
1186 return get_fc_port_roles_names(vport->roles, buf);
1187}
1188static FC_CLASS_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1189
1190fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1191
1192fc_private_vport_show_function(symbolic_name, "%s\n",
1193 FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1194fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1195static FC_CLASS_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1196 show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1197
1198static ssize_t
1199store_fc_vport_delete(struct class_device *cdev, const char *buf,
1200 size_t count)
1201{
1202 struct fc_vport *vport = transport_class_to_vport(cdev);
1203 struct Scsi_Host *shost = vport_to_shost(vport);
1204
1205 fc_queue_work(shost, &vport->vport_delete_work);
1206 return count;
1207}
1208static FC_CLASS_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1209 NULL, store_fc_vport_delete);
1210
1211
1212/*
1213 * Enable/Disable vport
1214 * Write "1" to disable, write "0" to enable
1215 */
1216static ssize_t
1217store_fc_vport_disable(struct class_device *cdev, const char *buf,
1218 size_t count)
1219{
1220 struct fc_vport *vport = transport_class_to_vport(cdev);
1221 struct Scsi_Host *shost = vport_to_shost(vport);
1222 struct fc_internal *i = to_fc_internal(shost->transportt);
1223 int stat;
1224
1225 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1226 return -EBUSY;
1227
1228 if (*buf == '0') {
1229 if (vport->vport_state != FC_VPORT_DISABLED)
1230 return -EALREADY;
1231 } else if (*buf == '1') {
1232 if (vport->vport_state == FC_VPORT_DISABLED)
1233 return -EALREADY;
1234 } else
1235 return -EINVAL;
1236
1237 stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1238 return stat ? stat : count;
1239}
1240static FC_CLASS_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1241 NULL, store_fc_vport_disable);
1242
1243
1244/*
915 * Host Attribute Management 1245 * Host Attribute Management
916 */ 1246 */
917 1247
@@ -1003,6 +1333,13 @@ static FC_CLASS_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1003 if (i->f->show_host_##field) \ 1333 if (i->f->show_host_##field) \
1004 count++ 1334 count++
1005 1335
1336#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
1337 i->private_host_attrs[count] = class_device_attr_host_##field; \
1338 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1339 i->private_host_attrs[count].store = NULL; \
1340 i->host_attrs[count] = &i->private_host_attrs[count]; \
1341 count++
1342
1006#define SETUP_HOST_ATTRIBUTE_RW(field) \ 1343#define SETUP_HOST_ATTRIBUTE_RW(field) \
1007 i->private_host_attrs[count] = class_device_attr_host_##field; \ 1344 i->private_host_attrs[count] = class_device_attr_host_##field; \
1008 if (!i->f->set_host_##field) { \ 1345 if (!i->f->set_host_##field) { \
@@ -1090,6 +1427,7 @@ fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1090fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, 1427fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1091 unsigned long long); 1428 unsigned long long);
1092fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1429fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1430fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1093fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1431fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1094 1432
1095 1433
@@ -1210,6 +1548,9 @@ store_fc_private_host_issue_lip(struct class_device *cdev,
1210static FC_CLASS_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, 1548static FC_CLASS_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1211 store_fc_private_host_issue_lip); 1549 store_fc_private_host_issue_lip);
1212 1550
1551fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1552
1553
1213/* 1554/*
1214 * Host Statistics Management 1555 * Host Statistics Management
1215 */ 1556 */
@@ -1285,7 +1626,6 @@ fc_reset_statistics(struct class_device *cdev, const char *buf,
1285static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, 1626static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
1286 fc_reset_statistics); 1627 fc_reset_statistics);
1287 1628
1288
1289static struct attribute *fc_statistics_attrs[] = { 1629static struct attribute *fc_statistics_attrs[] = {
1290 &class_device_attr_host_seconds_since_last_reset.attr, 1630 &class_device_attr_host_seconds_since_last_reset.attr,
1291 &class_device_attr_host_tx_frames.attr, 1631 &class_device_attr_host_tx_frames.attr,
@@ -1316,6 +1656,142 @@ static struct attribute_group fc_statistics_group = {
1316 .attrs = fc_statistics_attrs, 1656 .attrs = fc_statistics_attrs,
1317}; 1657};
1318 1658
1659
1660/* Host Vport Attributes */
1661
1662static int
1663fc_parse_wwn(const char *ns, u64 *nm)
1664{
1665 unsigned int i, j;
1666 u8 wwn[8];
1667
1668 memset(wwn, 0, sizeof(wwn));
1669
1670 /* Validate and store the new name */
1671 for (i=0, j=0; i < 16; i++) {
1672 if ((*ns >= 'a') && (*ns <= 'f'))
1673 j = ((j << 4) | ((*ns++ -'a') + 10));
1674 else if ((*ns >= 'A') && (*ns <= 'F'))
1675 j = ((j << 4) | ((*ns++ -'A') + 10));
1676 else if ((*ns >= '0') && (*ns <= '9'))
1677 j = ((j << 4) | (*ns++ -'0'));
1678 else
1679 return -EINVAL;
1680 if (i % 2) {
1681 wwn[i/2] = j & 0xff;
1682 j = 0;
1683 }
1684 }
1685
1686 *nm = wwn_to_u64(wwn);
1687
1688 return 0;
1689}
1690
1691
1692/*
1693 * "Short-cut" sysfs variable to create a new vport on a FC Host.
1694 * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
1695 * will default to a NPIV-based FCP_Initiator; The WWNs are specified
1696 * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
1697 */
1698static ssize_t
1699store_fc_host_vport_create(struct class_device *cdev, const char *buf,
1700 size_t count)
1701{
1702 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1703 struct fc_vport_identifiers vid;
1704 struct fc_vport *vport;
1705 unsigned int cnt=count;
1706 int stat;
1707
1708 memset(&vid, 0, sizeof(vid));
1709
1710 /* count may include a LF at end of string */
1711 if (buf[cnt-1] == '\n')
1712 cnt--;
1713
1714 /* validate we have enough characters for WWPN */
1715 if ((cnt != (16+1+16)) || (buf[16] != ':'))
1716 return -EINVAL;
1717
1718 stat = fc_parse_wwn(&buf[0], &vid.port_name);
1719 if (stat)
1720 return stat;
1721
1722 stat = fc_parse_wwn(&buf[17], &vid.node_name);
1723 if (stat)
1724 return stat;
1725
1726 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1727 vid.vport_type = FC_PORTTYPE_NPIV;
1728 /* vid.symbolic_name is already zero/NULL's */
1729 vid.disable = false; /* always enabled */
1730
1731 /* we only allow support on Channel 0 !!! */
1732 stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
1733 return stat ? stat : count;
1734}
1735static FC_CLASS_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
1736 store_fc_host_vport_create);
1737
1738
1739/*
1740 * "Short-cut" sysfs variable to delete a vport on a FC Host.
1741 * Vport is identified by a string containing "<WWPN>:<WWNN>".
1742 * The WWNs are specified as hex characters, and may *not* contain
1743 * any prefixes (e.g. 0x, x, etc)
1744 */
1745static ssize_t
1746store_fc_host_vport_delete(struct class_device *cdev, const char *buf,
1747 size_t count)
1748{
1749 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1750 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1751 struct fc_vport *vport;
1752 u64 wwpn, wwnn;
1753 unsigned long flags;
1754 unsigned int cnt=count;
1755 int stat, match;
1756
1757 /* count may include a LF at end of string */
1758 if (buf[cnt-1] == '\n')
1759 cnt--;
1760
1761 /* validate we have enough characters for WWPN */
1762 if ((cnt != (16+1+16)) || (buf[16] != ':'))
1763 return -EINVAL;
1764
1765 stat = fc_parse_wwn(&buf[0], &wwpn);
1766 if (stat)
1767 return stat;
1768
1769 stat = fc_parse_wwn(&buf[17], &wwnn);
1770 if (stat)
1771 return stat;
1772
1773 spin_lock_irqsave(shost->host_lock, flags);
1774 match = 0;
1775 /* we only allow support on Channel 0 !!! */
1776 list_for_each_entry(vport, &fc_host->vports, peers) {
1777 if ((vport->channel == 0) &&
1778 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1779 match = 1;
1780 break;
1781 }
1782 }
1783 spin_unlock_irqrestore(shost->host_lock, flags);
1784
1785 if (!match)
1786 return -ENODEV;
1787
1788 stat = fc_vport_terminate(vport);
1789 return stat ? stat : count;
1790}
1791static FC_CLASS_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
1792 store_fc_host_vport_delete);
1793
1794
1319static int fc_host_match(struct attribute_container *cont, 1795static int fc_host_match(struct attribute_container *cont,
1320 struct device *dev) 1796 struct device *dev)
1321{ 1797{
@@ -1387,6 +1863,40 @@ static int fc_rport_match(struct attribute_container *cont,
1387} 1863}
1388 1864
1389 1865
1866static void fc_vport_dev_release(struct device *dev)
1867{
1868 struct fc_vport *vport = dev_to_vport(dev);
1869 put_device(dev->parent); /* release kobj parent */
1870 kfree(vport);
1871}
1872
1873int scsi_is_fc_vport(const struct device *dev)
1874{
1875 return dev->release == fc_vport_dev_release;
1876}
1877EXPORT_SYMBOL(scsi_is_fc_vport);
1878
1879static int fc_vport_match(struct attribute_container *cont,
1880 struct device *dev)
1881{
1882 struct fc_vport *vport;
1883 struct Scsi_Host *shost;
1884 struct fc_internal *i;
1885
1886 if (!scsi_is_fc_vport(dev))
1887 return 0;
1888 vport = dev_to_vport(dev);
1889
1890 shost = vport_to_shost(vport);
1891 if (!shost->transportt || shost->transportt->host_attrs.ac.class
1892 != &fc_host_class.class)
1893 return 0;
1894
1895 i = to_fc_internal(shost->transportt);
1896 return &i->vport_attr_cont.ac == cont;
1897}
1898
1899
1390/** 1900/**
1391 * fc_timed_out - FC Transport I/O timeout intercept handler 1901 * fc_timed_out - FC Transport I/O timeout intercept handler
1392 * 1902 *
@@ -1433,6 +1943,9 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
1433 if (rport->scsi_target_id == -1) 1943 if (rport->scsi_target_id == -1)
1434 continue; 1944 continue;
1435 1945
1946 if (rport->port_state != FC_PORTSTATE_ONLINE)
1947 continue;
1948
1436 if ((channel == SCAN_WILD_CARD || channel == rport->channel) && 1949 if ((channel == SCAN_WILD_CARD || channel == rport->channel) &&
1437 (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) { 1950 (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) {
1438 scsi_scan_target(&rport->dev, rport->channel, 1951 scsi_scan_target(&rport->dev, rport->channel,
@@ -1472,6 +1985,11 @@ fc_attach_transport(struct fc_function_template *ft)
1472 i->rport_attr_cont.ac.match = fc_rport_match; 1985 i->rport_attr_cont.ac.match = fc_rport_match;
1473 transport_container_register(&i->rport_attr_cont); 1986 transport_container_register(&i->rport_attr_cont);
1474 1987
1988 i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
1989 i->vport_attr_cont.ac.class = &fc_vport_class.class;
1990 i->vport_attr_cont.ac.match = fc_vport_match;
1991 transport_container_register(&i->vport_attr_cont);
1992
1475 i->f = ft; 1993 i->f = ft;
1476 1994
1477 /* Transport uses the shost workq for scsi scanning */ 1995 /* Transport uses the shost workq for scsi scanning */
@@ -1480,7 +1998,7 @@ fc_attach_transport(struct fc_function_template *ft)
1480 i->t.eh_timed_out = fc_timed_out; 1998 i->t.eh_timed_out = fc_timed_out;
1481 1999
1482 i->t.user_scan = fc_user_scan; 2000 i->t.user_scan = fc_user_scan;
1483 2001
1484 /* 2002 /*
1485 * Setup SCSI Target Attributes. 2003 * Setup SCSI Target Attributes.
1486 */ 2004 */
@@ -1505,6 +2023,10 @@ fc_attach_transport(struct fc_function_template *ft)
1505 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); 2023 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
1506 SETUP_HOST_ATTRIBUTE_RD(supported_speeds); 2024 SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
1507 SETUP_HOST_ATTRIBUTE_RD(maxframe_size); 2025 SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2026 if (ft->vport_create) {
2027 SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2028 SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2029 }
1508 SETUP_HOST_ATTRIBUTE_RD(serial_number); 2030 SETUP_HOST_ATTRIBUTE_RD(serial_number);
1509 2031
1510 SETUP_HOST_ATTRIBUTE_RD(port_id); 2032 SETUP_HOST_ATTRIBUTE_RD(port_id);
@@ -1520,6 +2042,10 @@ fc_attach_transport(struct fc_function_template *ft)
1520 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 2042 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
1521 if (ft->issue_fc_host_lip) 2043 if (ft->issue_fc_host_lip)
1522 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); 2044 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2045 if (ft->vport_create)
2046 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2047 if (ft->vport_delete)
2048 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
1523 2049
1524 BUG_ON(count > FC_HOST_NUM_ATTRS); 2050 BUG_ON(count > FC_HOST_NUM_ATTRS);
1525 2051
@@ -1545,6 +2071,24 @@ fc_attach_transport(struct fc_function_template *ft)
1545 2071
1546 i->rport_attrs[count] = NULL; 2072 i->rport_attrs[count] = NULL;
1547 2073
2074 /*
2075 * Setup Virtual Port Attributes.
2076 */
2077 count=0;
2078 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2079 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2080 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2081 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2082 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2083 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2084 SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2085 SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2086 SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2087
2088 BUG_ON(count > FC_VPORT_NUM_ATTRS);
2089
2090 i->vport_attrs[count] = NULL;
2091
1548 return &i->t; 2092 return &i->t;
1549} 2093}
1550EXPORT_SYMBOL(fc_attach_transport); 2094EXPORT_SYMBOL(fc_attach_transport);
@@ -1556,6 +2100,7 @@ void fc_release_transport(struct scsi_transport_template *t)
1556 transport_container_unregister(&i->t.target_attrs); 2100 transport_container_unregister(&i->t.target_attrs);
1557 transport_container_unregister(&i->t.host_attrs); 2101 transport_container_unregister(&i->t.host_attrs);
1558 transport_container_unregister(&i->rport_attr_cont); 2102 transport_container_unregister(&i->rport_attr_cont);
2103 transport_container_unregister(&i->vport_attr_cont);
1559 2104
1560 kfree(i); 2105 kfree(i);
1561} 2106}
@@ -1667,9 +2212,17 @@ fc_flush_devloss(struct Scsi_Host *shost)
1667void 2212void
1668fc_remove_host(struct Scsi_Host *shost) 2213fc_remove_host(struct Scsi_Host *shost)
1669{ 2214{
1670 struct fc_rport *rport, *next_rport; 2215 struct fc_vport *vport = NULL, *next_vport = NULL;
2216 struct fc_rport *rport = NULL, *next_rport = NULL;
1671 struct workqueue_struct *work_q; 2217 struct workqueue_struct *work_q;
1672 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2218 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2219 unsigned long flags;
2220
2221 spin_lock_irqsave(shost->host_lock, flags);
2222
2223 /* Remove any vports */
2224 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
2225 fc_queue_work(shost, &vport->vport_delete_work);
1673 2226
1674 /* Remove any remote ports */ 2227 /* Remove any remote ports */
1675 list_for_each_entry_safe(rport, next_rport, 2228 list_for_each_entry_safe(rport, next_rport,
@@ -1686,6 +2239,8 @@ fc_remove_host(struct Scsi_Host *shost)
1686 fc_queue_work(shost, &rport->rport_delete_work); 2239 fc_queue_work(shost, &rport->rport_delete_work);
1687 } 2240 }
1688 2241
2242 spin_unlock_irqrestore(shost->host_lock, flags);
2243
1689 /* flush all scan work items */ 2244 /* flush all scan work items */
1690 scsi_flush_work(shost); 2245 scsi_flush_work(shost);
1691 2246
@@ -1744,7 +2299,7 @@ fc_rport_final_delete(struct work_struct *work)
1744 unsigned long flags; 2299 unsigned long flags;
1745 2300
1746 /* 2301 /*
1747 * if a scan is pending, flush the SCSI Host work_q so that 2302 * if a scan is pending, flush the SCSI Host work_q so that
1748 * that we can reclaim the rport scan work element. 2303 * that we can reclaim the rport scan work element.
1749 */ 2304 */
1750 if (rport->flags & FC_RPORT_SCAN_PENDING) 2305 if (rport->flags & FC_RPORT_SCAN_PENDING)
@@ -1844,7 +2399,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1844 spin_lock_irqsave(shost->host_lock, flags); 2399 spin_lock_irqsave(shost->host_lock, flags);
1845 2400
1846 rport->number = fc_host->next_rport_number++; 2401 rport->number = fc_host->next_rport_number++;
1847 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 2402 if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
1848 rport->scsi_target_id = fc_host->next_target_id++; 2403 rport->scsi_target_id = fc_host->next_target_id++;
1849 else 2404 else
1850 rport->scsi_target_id = -1; 2405 rport->scsi_target_id = -1;
@@ -1869,7 +2424,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1869 transport_add_device(dev); 2424 transport_add_device(dev);
1870 transport_configure_device(dev); 2425 transport_configure_device(dev);
1871 2426
1872 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { 2427 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
1873 /* initiate a scan of the target */ 2428 /* initiate a scan of the target */
1874 rport->flags |= FC_RPORT_SCAN_PENDING; 2429 rport->flags |= FC_RPORT_SCAN_PENDING;
1875 scsi_queue_work(shost, &rport->scan_work); 2430 scsi_queue_work(shost, &rport->scan_work);
@@ -2003,7 +2558,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2003 2558
2004 /* was a target, not in roles */ 2559 /* was a target, not in roles */
2005 if ((rport->scsi_target_id != -1) && 2560 if ((rport->scsi_target_id != -1) &&
2006 (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET))) 2561 (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
2007 return rport; 2562 return rport;
2008 2563
2009 /* 2564 /*
@@ -2086,7 +2641,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2086 memset(rport->dd_data, 0, 2641 memset(rport->dd_data, 0,
2087 fci->f->dd_fcrport_size); 2642 fci->f->dd_fcrport_size);
2088 2643
2089 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { 2644 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2090 /* initiate a scan of the target */ 2645 /* initiate a scan of the target */
2091 rport->flags |= FC_RPORT_SCAN_PENDING; 2646 rport->flags |= FC_RPORT_SCAN_PENDING;
2092 scsi_queue_work(shost, &rport->scan_work); 2647 scsi_queue_work(shost, &rport->scan_work);
@@ -2243,11 +2798,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
2243 int create = 0; 2798 int create = 0;
2244 2799
2245 spin_lock_irqsave(shost->host_lock, flags); 2800 spin_lock_irqsave(shost->host_lock, flags);
2246 if (roles & FC_RPORT_ROLE_FCP_TARGET) { 2801 if (roles & FC_PORT_ROLE_FCP_TARGET) {
2247 if (rport->scsi_target_id == -1) { 2802 if (rport->scsi_target_id == -1) {
2248 rport->scsi_target_id = fc_host->next_target_id++; 2803 rport->scsi_target_id = fc_host->next_target_id++;
2249 create = 1; 2804 create = 1;
2250 } else if (!(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) 2805 } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
2251 create = 1; 2806 create = 1;
2252 } 2807 }
2253 2808
@@ -2294,7 +2849,7 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
2294 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port, 2849 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
2295 * which we blocked, and has now failed to return 2850 * which we blocked, and has now failed to return
2296 * in the allotted time. 2851 * in the allotted time.
2297 * 2852 *
2298 * @work: rport target that failed to reappear in the allotted time. 2853 * @work: rport target that failed to reappear in the allotted time.
2299 **/ 2854 **/
2300static void 2855static void
@@ -2317,7 +2872,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2317 */ 2872 */
2318 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2873 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2319 (rport->scsi_target_id != -1) && 2874 (rport->scsi_target_id != -1) &&
2320 !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2875 !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
2321 dev_printk(KERN_ERR, &rport->dev, 2876 dev_printk(KERN_ERR, &rport->dev,
2322 "blocked FC remote port time out: no longer" 2877 "blocked FC remote port time out: no longer"
2323 " a FCP target, removing starget\n"); 2878 " a FCP target, removing starget\n");
@@ -2367,7 +2922,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2367 */ 2922 */
2368 rport->maxframe_size = -1; 2923 rport->maxframe_size = -1;
2369 rport->supported_classes = FC_COS_UNSPECIFIED; 2924 rport->supported_classes = FC_COS_UNSPECIFIED;
2370 rport->roles = FC_RPORT_ROLE_UNKNOWN; 2925 rport->roles = FC_PORT_ROLE_UNKNOWN;
2371 rport->port_state = FC_PORTSTATE_NOTPRESENT; 2926 rport->port_state = FC_PORTSTATE_NOTPRESENT;
2372 2927
2373 /* remove the identifiers that aren't used in the consisting binding */ 2928 /* remove the identifiers that aren't used in the consisting binding */
@@ -2436,7 +2991,7 @@ fc_scsi_scan_rport(struct work_struct *work)
2436 unsigned long flags; 2991 unsigned long flags;
2437 2992
2438 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2993 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2439 (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2994 (rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
2440 scsi_scan_target(&rport->dev, rport->channel, 2995 scsi_scan_target(&rport->dev, rport->channel,
2441 rport->scsi_target_id, SCAN_WILD_CARD, 1); 2996 rport->scsi_target_id, SCAN_WILD_CARD, 1);
2442 } 2997 }
@@ -2447,7 +3002,227 @@ fc_scsi_scan_rport(struct work_struct *work)
2447} 3002}
2448 3003
2449 3004
2450MODULE_AUTHOR("Martin Hicks"); 3005/**
3006 * fc_vport_create - allocates and creates a FC virtual port.
3007 * @shost: scsi host the virtual port is connected to.
3008 * @channel: Channel on shost port connected to.
3009 * @pdev: parent device for vport
3010 * @ids: The world wide names, FC4 port roles, etc for
3011 * the virtual port.
3012 * @ret_vport: The pointer to the created vport.
3013 *
3014 * Allocates and creates the vport structure, calls the parent host
3015 * to instantiate the vport, the completes w/ class and sysfs creation.
3016 *
3017 * Notes:
3018 * This routine assumes no locks are held on entry.
3019 **/
3020static int
3021fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3022 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3023{
3024 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3025 struct fc_internal *fci = to_fc_internal(shost->transportt);
3026 struct fc_vport *vport;
3027 struct device *dev;
3028 unsigned long flags;
3029 size_t size;
3030 int error;
3031
3032 *ret_vport = NULL;
3033
3034 if ( ! fci->f->vport_create)
3035 return -ENOENT;
3036
3037 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3038 vport = kzalloc(size, GFP_KERNEL);
3039 if (unlikely(!vport)) {
3040 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
3041 return -ENOMEM;
3042 }
3043
3044 vport->vport_state = FC_VPORT_UNKNOWN;
3045 vport->vport_last_state = FC_VPORT_UNKNOWN;
3046 vport->node_name = ids->node_name;
3047 vport->port_name = ids->port_name;
3048 vport->roles = ids->roles;
3049 vport->vport_type = ids->vport_type;
3050 if (fci->f->dd_fcvport_size)
3051 vport->dd_data = &vport[1];
3052 vport->shost = shost;
3053 vport->channel = channel;
3054 vport->flags = FC_VPORT_CREATING;
3055 INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3056
3057 spin_lock_irqsave(shost->host_lock, flags);
3058
3059 if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3060 spin_unlock_irqrestore(shost->host_lock, flags);
3061 kfree(vport);
3062 return -ENOSPC;
3063 }
3064 fc_host->npiv_vports_inuse++;
3065 vport->number = fc_host->next_vport_number++;
3066 list_add_tail(&vport->peers, &fc_host->vports);
3067 get_device(&shost->shost_gendev); /* for fc_host->vport list */
3068
3069 spin_unlock_irqrestore(shost->host_lock, flags);
3070
3071 dev = &vport->dev;
3072 device_initialize(dev); /* takes self reference */
3073 dev->parent = get_device(pdev); /* takes parent reference */
3074 dev->release = fc_vport_dev_release;
3075 sprintf(dev->bus_id, "vport-%d:%d-%d",
3076 shost->host_no, channel, vport->number);
3077 transport_setup_device(dev);
3078
3079 error = device_add(dev);
3080 if (error) {
3081 printk(KERN_ERR "FC Virtual Port device_add failed\n");
3082 goto delete_vport;
3083 }
3084 transport_add_device(dev);
3085 transport_configure_device(dev);
3086
3087 error = fci->f->vport_create(vport, ids->disable);
3088 if (error) {
3089 printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3090 goto delete_vport_all;
3091 }
3092
3093 /*
3094 * if the parent isn't the physical adapter's Scsi_Host, ensure
3095 * the Scsi_Host at least contains ia symlink to the vport.
3096 */
3097 if (pdev != &shost->shost_gendev) {
3098 error = sysfs_create_link(&shost->shost_gendev.kobj,
3099 &dev->kobj, dev->bus_id);
3100 if (error)
3101 printk(KERN_ERR
3102 "%s: Cannot create vport symlinks for "
3103 "%s, err=%d\n",
3104 __FUNCTION__, dev->bus_id, error);
3105 }
3106 spin_lock_irqsave(shost->host_lock, flags);
3107 vport->flags &= ~FC_VPORT_CREATING;
3108 spin_unlock_irqrestore(shost->host_lock, flags);
3109
3110 dev_printk(KERN_NOTICE, pdev,
3111 "%s created via shost%d channel %d\n", dev->bus_id,
3112 shost->host_no, channel);
3113
3114 *ret_vport = vport;
3115
3116 return 0;
3117
3118delete_vport_all:
3119 transport_remove_device(dev);
3120 device_del(dev);
3121delete_vport:
3122 transport_destroy_device(dev);
3123 spin_lock_irqsave(shost->host_lock, flags);
3124 list_del(&vport->peers);
3125 put_device(&shost->shost_gendev); /* for fc_host->vport list */
3126 fc_host->npiv_vports_inuse--;
3127 spin_unlock_irqrestore(shost->host_lock, flags);
3128 put_device(dev->parent);
3129 kfree(vport);
3130
3131 return error;
3132}
3133
3134
3135/**
3136 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
3137 * @vport: fc_vport to be terminated
3138 *
3139 * Calls the LLDD vport_delete() function, then deallocates and removes
3140 * the vport from the shost and object tree.
3141 *
3142 * Notes:
3143 * This routine assumes no locks are held on entry.
3144 **/
3145int
3146fc_vport_terminate(struct fc_vport *vport)
3147{
3148 struct Scsi_Host *shost = vport_to_shost(vport);
3149 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3150 struct fc_internal *i = to_fc_internal(shost->transportt);
3151 struct device *dev = &vport->dev;
3152 unsigned long flags;
3153 int stat;
3154
3155 spin_lock_irqsave(shost->host_lock, flags);
3156 if (vport->flags & FC_VPORT_CREATING) {
3157 spin_unlock_irqrestore(shost->host_lock, flags);
3158 return -EBUSY;
3159 }
3160 if (vport->flags & (FC_VPORT_DEL)) {
3161 spin_unlock_irqrestore(shost->host_lock, flags);
3162 return -EALREADY;
3163 }
3164 vport->flags |= FC_VPORT_DELETING;
3165 spin_unlock_irqrestore(shost->host_lock, flags);
3166
3167 if (i->f->vport_delete)
3168 stat = i->f->vport_delete(vport);
3169 else
3170 stat = -ENOENT;
3171
3172 spin_lock_irqsave(shost->host_lock, flags);
3173 vport->flags &= ~FC_VPORT_DELETING;
3174 if (!stat) {
3175 vport->flags |= FC_VPORT_DELETED;
3176 list_del(&vport->peers);
3177 fc_host->npiv_vports_inuse--;
3178 put_device(&shost->shost_gendev); /* for fc_host->vport list */
3179 }
3180 spin_unlock_irqrestore(shost->host_lock, flags);
3181
3182 if (stat)
3183 return stat;
3184
3185 if (dev->parent != &shost->shost_gendev)
3186 sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
3187 transport_remove_device(dev);
3188 device_del(dev);
3189 transport_destroy_device(dev);
3190
3191 /*
3192 * Removing our self-reference should mean our
3193 * release function gets called, which will drop the remaining
3194 * parent reference and free the data structure.
3195 */
3196 put_device(dev); /* for self-reference */
3197
3198 return 0; /* SUCCESS */
3199}
3200EXPORT_SYMBOL(fc_vport_terminate);
3201
3202/**
3203 * fc_vport_sched_delete - workq-based delete request for a vport
3204 *
3205 * @work: vport to be deleted.
3206 **/
3207static void
3208fc_vport_sched_delete(struct work_struct *work)
3209{
3210 struct fc_vport *vport =
3211 container_of(work, struct fc_vport, vport_delete_work);
3212 int stat;
3213
3214 stat = fc_vport_terminate(vport);
3215 if (stat)
3216 dev_printk(KERN_ERR, vport->dev.parent,
3217 "%s: %s could not be deleted created via "
3218 "shost%d channel %d - error %d\n", __FUNCTION__,
3219 vport->dev.bus_id, vport->shost->host_no,
3220 vport->channel, stat);
3221}
3222
3223
3224/* Original Author: Martin Hicks */
3225MODULE_AUTHOR("James Smart");
2451MODULE_DESCRIPTION("FC Transport Attributes"); 3226MODULE_DESCRIPTION("FC Transport Attributes");
2452MODULE_LICENSE("GPL"); 3227MODULE_LICENSE("GPL");
2453 3228
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index caf1836bbeca..34c1860a259d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,9 +30,9 @@
30#include <scsi/scsi_transport_iscsi.h> 30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/iscsi_if.h> 31#include <scsi/iscsi_if.h>
32 32
33#define ISCSI_SESSION_ATTRS 11 33#define ISCSI_SESSION_ATTRS 15
34#define ISCSI_CONN_ATTRS 11 34#define ISCSI_CONN_ATTRS 11
35#define ISCSI_HOST_ATTRS 0 35#define ISCSI_HOST_ATTRS 4
36#define ISCSI_TRANSPORT_VERSION "2.0-724" 36#define ISCSI_TRANSPORT_VERSION "2.0-724"
37 37
38struct iscsi_internal { 38struct iscsi_internal {
@@ -609,12 +609,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
609 int t = done ? NLMSG_DONE : type; 609 int t = done ? NLMSG_DONE : type;
610 610
611 skb = alloc_skb(len, GFP_ATOMIC); 611 skb = alloc_skb(len, GFP_ATOMIC);
612 /* 612 if (!skb) {
613 * FIXME: 613 printk(KERN_ERR "Could not allocate skb to send reply.\n");
614 * user is supposed to react on iferror == -ENOMEM; 614 return -ENOMEM;
615 * see iscsi_if_rx(). 615 }
616 */
617 BUG_ON(!skb);
618 616
619 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); 617 nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
620 nlh->nlmsg_flags = flags; 618 nlh->nlmsg_flags = flags;
@@ -816,6 +814,8 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_uevent *ev)
816 uint32_t hostno; 814 uint32_t hostno;
817 815
818 session = transport->create_session(transport, &priv->t, 816 session = transport->create_session(transport, &priv->t,
817 ev->u.c_session.cmds_max,
818 ev->u.c_session.queue_depth,
819 ev->u.c_session.initial_cmdsn, 819 ev->u.c_session.initial_cmdsn,
820 &hostno); 820 &hostno);
821 if (!session) 821 if (!session)
@@ -947,15 +947,50 @@ static int
947iscsi_tgt_dscvr(struct iscsi_transport *transport, 947iscsi_tgt_dscvr(struct iscsi_transport *transport,
948 struct iscsi_uevent *ev) 948 struct iscsi_uevent *ev)
949{ 949{
950 struct Scsi_Host *shost;
950 struct sockaddr *dst_addr; 951 struct sockaddr *dst_addr;
952 int err;
951 953
952 if (!transport->tgt_dscvr) 954 if (!transport->tgt_dscvr)
953 return -EINVAL; 955 return -EINVAL;
954 956
957 shost = scsi_host_lookup(ev->u.tgt_dscvr.host_no);
958 if (IS_ERR(shost)) {
959 printk(KERN_ERR "target discovery could not find host no %u\n",
960 ev->u.tgt_dscvr.host_no);
961 return -ENODEV;
962 }
963
964
955 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); 965 dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
956 return transport->tgt_dscvr(ev->u.tgt_dscvr.type, 966 err = transport->tgt_dscvr(shost, ev->u.tgt_dscvr.type,
957 ev->u.tgt_dscvr.host_no, 967 ev->u.tgt_dscvr.enable, dst_addr);
958 ev->u.tgt_dscvr.enable, dst_addr); 968 scsi_host_put(shost);
969 return err;
970}
971
972static int
973iscsi_set_host_param(struct iscsi_transport *transport,
974 struct iscsi_uevent *ev)
975{
976 char *data = (char*)ev + sizeof(*ev);
977 struct Scsi_Host *shost;
978 int err;
979
980 if (!transport->set_host_param)
981 return -ENOSYS;
982
983 shost = scsi_host_lookup(ev->u.set_host_param.host_no);
984 if (IS_ERR(shost)) {
985 printk(KERN_ERR "set_host_param could not find host no %u\n",
986 ev->u.set_host_param.host_no);
987 return -ENODEV;
988 }
989
990 err = transport->set_host_param(shost, ev->u.set_host_param.param,
991 data, ev->u.set_host_param.len);
992 scsi_host_put(shost);
993 return err;
959} 994}
960 995
961static int 996static int
@@ -1049,8 +1084,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1049 case ISCSI_UEVENT_TGT_DSCVR: 1084 case ISCSI_UEVENT_TGT_DSCVR:
1050 err = iscsi_tgt_dscvr(transport, ev); 1085 err = iscsi_tgt_dscvr(transport, ev);
1051 break; 1086 break;
1087 case ISCSI_UEVENT_SET_HOST_PARAM:
1088 err = iscsi_set_host_param(transport, ev);
1089 break;
1052 default: 1090 default:
1053 err = -EINVAL; 1091 err = -ENOSYS;
1054 break; 1092 break;
1055 } 1093 }
1056 1094
@@ -1160,30 +1198,37 @@ iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1160/* 1198/*
1161 * iSCSI session attrs 1199 * iSCSI session attrs
1162 */ 1200 */
1163#define iscsi_session_attr_show(param) \ 1201#define iscsi_session_attr_show(param, perm) \
1164static ssize_t \ 1202static ssize_t \
1165show_session_param_##param(struct class_device *cdev, char *buf) \ 1203show_session_param_##param(struct class_device *cdev, char *buf) \
1166{ \ 1204{ \
1167 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \ 1205 struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev); \
1168 struct iscsi_transport *t = session->transport; \ 1206 struct iscsi_transport *t = session->transport; \
1207 \
1208 if (perm && !capable(CAP_SYS_ADMIN)) \
1209 return -EACCES; \
1169 return t->get_session_param(session, param, buf); \ 1210 return t->get_session_param(session, param, buf); \
1170} 1211}
1171 1212
1172#define iscsi_session_attr(field, param) \ 1213#define iscsi_session_attr(field, param, perm) \
1173 iscsi_session_attr_show(param) \ 1214 iscsi_session_attr_show(param, perm) \
1174static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ 1215static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
1175 NULL); 1216 NULL);
1176 1217
1177iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME); 1218iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
1178iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN); 1219iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
1179iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T); 1220iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
1180iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN); 1221iscsi_session_attr(immediate_data, ISCSI_PARAM_IMM_DATA_EN, 0);
1181iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST); 1222iscsi_session_attr(first_burst_len, ISCSI_PARAM_FIRST_BURST, 0);
1182iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST); 1223iscsi_session_attr(max_burst_len, ISCSI_PARAM_MAX_BURST, 0);
1183iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN); 1224iscsi_session_attr(data_pdu_in_order, ISCSI_PARAM_PDU_INORDER_EN, 0);
1184iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN); 1225iscsi_session_attr(data_seq_in_order, ISCSI_PARAM_DATASEQ_INORDER_EN, 0);
1185iscsi_session_attr(erl, ISCSI_PARAM_ERL); 1226iscsi_session_attr(erl, ISCSI_PARAM_ERL, 0);
1186iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT); 1227iscsi_session_attr(tpgt, ISCSI_PARAM_TPGT, 0);
1228iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
1229iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
1230iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
1231iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
1187 1232
1188#define iscsi_priv_session_attr_show(field, format) \ 1233#define iscsi_priv_session_attr_show(field, format) \
1189static ssize_t \ 1234static ssize_t \
@@ -1199,6 +1244,28 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO, show_priv_session_##field, \
1199 NULL) 1244 NULL)
1200iscsi_priv_session_attr(recovery_tmo, "%d"); 1245iscsi_priv_session_attr(recovery_tmo, "%d");
1201 1246
1247/*
1248 * iSCSI host attrs
1249 */
1250#define iscsi_host_attr_show(param) \
1251static ssize_t \
1252show_host_param_##param(struct class_device *cdev, char *buf) \
1253{ \
1254 struct Scsi_Host *shost = transport_class_to_shost(cdev); \
1255 struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); \
1256 return priv->iscsi_transport->get_host_param(shost, param, buf); \
1257}
1258
1259#define iscsi_host_attr(field, param) \
1260 iscsi_host_attr_show(param) \
1261static ISCSI_CLASS_ATTR(host, field, S_IRUGO, show_host_param_##param, \
1262 NULL);
1263
1264iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
1265iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
1266iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
1267iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
1268
1202#define SETUP_PRIV_SESSION_RD_ATTR(field) \ 1269#define SETUP_PRIV_SESSION_RD_ATTR(field) \
1203do { \ 1270do { \
1204 priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \ 1271 priv->session_attrs[count] = &class_device_attr_priv_sess_##field; \
@@ -1222,6 +1289,14 @@ do { \
1222 } \ 1289 } \
1223} while (0) 1290} while (0)
1224 1291
1292#define SETUP_HOST_RD_ATTR(field, param_flag) \
1293do { \
1294 if (tt->host_param_mask & param_flag) { \
1295 priv->host_attrs[count] = &class_device_attr_host_##field; \
1296 count++; \
1297 } \
1298} while (0)
1299
1225static int iscsi_session_match(struct attribute_container *cont, 1300static int iscsi_session_match(struct attribute_container *cont,
1226 struct device *dev) 1301 struct device *dev)
1227{ 1302{
@@ -1323,9 +1398,16 @@ iscsi_register_transport(struct iscsi_transport *tt)
1323 priv->t.host_attrs.ac.class = &iscsi_host_class.class; 1398 priv->t.host_attrs.ac.class = &iscsi_host_class.class;
1324 priv->t.host_attrs.ac.match = iscsi_host_match; 1399 priv->t.host_attrs.ac.match = iscsi_host_match;
1325 priv->t.host_size = sizeof(struct iscsi_host); 1400 priv->t.host_size = sizeof(struct iscsi_host);
1326 priv->host_attrs[0] = NULL;
1327 transport_container_register(&priv->t.host_attrs); 1401 transport_container_register(&priv->t.host_attrs);
1328 1402
1403 SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
1404 SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
1405 SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
1406 SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
1407 BUG_ON(count > ISCSI_HOST_ATTRS);
1408 priv->host_attrs[count] = NULL;
1409 count = 0;
1410
1329 /* connection parameters */ 1411 /* connection parameters */
1330 priv->conn_cont.ac.attrs = &priv->conn_attrs[0]; 1412 priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
1331 priv->conn_cont.ac.class = &iscsi_connection_class.class; 1413 priv->conn_cont.ac.class = &iscsi_connection_class.class;
@@ -1364,6 +1446,10 @@ iscsi_register_transport(struct iscsi_transport *tt)
1364 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL); 1446 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
1365 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME); 1447 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
1366 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT); 1448 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
1449 SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
1450 SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
1451 SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
1452 SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
1367 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo); 1453 SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
1368 1454
1369 BUG_ON(count > ISCSI_SESSION_ATTRS); 1455 BUG_ON(count > ISCSI_SESSION_ATTRS);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d8c9cb24f91..448d316f12d7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1515,7 +1515,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1515 if (!scsi_device_online(sdp)) 1515 if (!scsi_device_online(sdp))
1516 goto out; 1516 goto out;
1517 1517
1518 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA); 1518 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
1519 if (!buffer) { 1519 if (!buffer) {
1520 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 1520 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
1521 "allocation failure.\n"); 1521 "allocation failure.\n");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0c691a60a756..85d38940a6c9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1842,7 +1842,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1842 int blk_size = buff_size; 1842 int blk_size = buff_size;
1843 struct page *p = NULL; 1843 struct page *p = NULL;
1844 1844
1845 if ((blk_size < 0) || (!sfp)) 1845 if (blk_size < 0)
1846 return -EFAULT; 1846 return -EFAULT;
1847 if (0 == blk_size) 1847 if (0 == blk_size)
1848 ++blk_size; /* don't know why */ 1848 ++blk_size; /* don't know why */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index a7dfb65fb842..0a6b45b1b003 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -84,7 +84,7 @@ static int __init snirm710_probe(struct platform_device *dev)
84 84
85 hostdata->dev = &dev->dev; 85 hostdata->dev = &dev->dev;
86 dma_set_mask(&dev->dev, DMA_32BIT_MASK); 86 dma_set_mask(&dev->dev, DMA_32BIT_MASK);
87 hostdata->base = ioremap_nocache(CPHYSADDR(base), 0x100); 87 hostdata->base = ioremap_nocache(base, 0x100);
88 hostdata->differential = 0; 88 hostdata->differential = 0;
89 89
90 hostdata->clock = SNIRM710_CLOCK; 90 hostdata->clock = SNIRM710_CLOCK;
@@ -141,13 +141,7 @@ static struct platform_driver snirm710_driver = {
141 141
142static int __init snirm710_init(void) 142static int __init snirm710_init(void)
143{ 143{
144 int err; 144 return platform_driver_register(&snirm710_driver);
145
146 if ((err = platform_driver_register(&snirm710_driver))) {
147 printk(KERN_ERR "Driver registration failed\n");
148 return err;
149 }
150 return 0;
151} 145}
152 146
153static void __exit snirm710_exit(void) 147static void __exit snirm710_exit(void)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index f9a52af7f5b4..5143c8990845 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -885,7 +885,11 @@ static int __init init_sr(void)
885 rc = register_blkdev(SCSI_CDROM_MAJOR, "sr"); 885 rc = register_blkdev(SCSI_CDROM_MAJOR, "sr");
886 if (rc) 886 if (rc)
887 return rc; 887 return rc;
888 return scsi_register_driver(&sr_template.gendrv); 888 rc = scsi_register_driver(&sr_template.gendrv);
889 if (rc)
890 unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
891
892 return rc;
889} 893}
890 894
891static void __exit exit_sr(void) 895static void __exit exit_sr(void)
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9ac83abc4028..72f6d8015358 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -395,53 +395,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
395static int stex_map_sg(struct st_hba *hba, 395static int stex_map_sg(struct st_hba *hba,
396 struct req_msg *req, struct st_ccb *ccb) 396 struct req_msg *req, struct st_ccb *ccb)
397{ 397{
398 struct pci_dev *pdev = hba->pdev;
399 struct scsi_cmnd *cmd; 398 struct scsi_cmnd *cmd;
400 dma_addr_t dma_handle; 399 struct scatterlist *sg;
401 struct scatterlist *src;
402 struct st_sgtable *dst; 400 struct st_sgtable *dst;
403 int i; 401 int i, nseg;
404 402
405 cmd = ccb->cmd; 403 cmd = ccb->cmd;
406 dst = (struct st_sgtable *)req->variable; 404 dst = (struct st_sgtable *)req->variable;
407 dst->max_sg_count = cpu_to_le16(ST_MAX_SG); 405 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
408 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); 406 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
409
410 if (cmd->use_sg) {
411 int n_elem;
412 407
413 src = (struct scatterlist *) cmd->request_buffer; 408 nseg = scsi_dma_map(cmd);
414 n_elem = pci_map_sg(pdev, src, 409 if (nseg < 0)
415 cmd->use_sg, cmd->sc_data_direction); 410 return -EIO;
416 if (n_elem <= 0) 411 if (nseg) {
417 return -EIO; 412 ccb->sg_count = nseg;
413 dst->sg_count = cpu_to_le16((u16)nseg);
418 414
419 ccb->sg_count = n_elem; 415 scsi_for_each_sg(cmd, sg, nseg, i) {
420 dst->sg_count = cpu_to_le16((u16)n_elem); 416 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
421
422 for (i = 0; i < n_elem; i++, src++) {
423 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
424 dst->table[i].addr = 417 dst->table[i].addr =
425 cpu_to_le32(sg_dma_address(src) & 0xffffffff); 418 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
426 dst->table[i].addr_hi = 419 dst->table[i].addr_hi =
427 cpu_to_le32((sg_dma_address(src) >> 16) >> 16); 420 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
428 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; 421 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
429 } 422 }
430 dst->table[--i].ctrl |= SG_CF_EOT; 423 dst->table[--i].ctrl |= SG_CF_EOT;
431 return 0;
432 } 424 }
433 425
434 dma_handle = pci_map_single(pdev, cmd->request_buffer,
435 cmd->request_bufflen, cmd->sc_data_direction);
436 cmd->SCp.dma_handle = dma_handle;
437
438 ccb->sg_count = 1;
439 dst->sg_count = cpu_to_le16(1);
440 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
441 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
442 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
443 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
444
445 return 0; 426 return 0;
446} 427}
447 428
@@ -451,24 +432,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
451 size_t lcount; 432 size_t lcount;
452 size_t len; 433 size_t len;
453 void *s, *d, *base = NULL; 434 void *s, *d, *base = NULL;
454 if (*count > cmd->request_bufflen) 435 size_t offset;
455 *count = cmd->request_bufflen; 436
437 if (*count > scsi_bufflen(cmd))
438 *count = scsi_bufflen(cmd);
456 lcount = *count; 439 lcount = *count;
457 while (lcount) { 440 while (lcount) {
458 len = lcount; 441 len = lcount;
459 s = (void *)src; 442 s = (void *)src;
460 if (cmd->use_sg) { 443
461 size_t offset = *count - lcount; 444 offset = *count - lcount;
462 s += offset; 445 s += offset;
463 base = scsi_kmap_atomic_sg(cmd->request_buffer, 446 base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
464 sg_count, &offset, &len); 447 sg_count, &offset, &len);
465 if (base == NULL) { 448 if (!base) {
466 *count -= lcount; 449 *count -= lcount;
467 return; 450 return;
468 } 451 }
469 d = base + offset; 452 d = base + offset;
470 } else
471 d = cmd->request_buffer;
472 453
473 if (direction == ST_TO_CMD) 454 if (direction == ST_TO_CMD)
474 memcpy(d, s, len); 455 memcpy(d, s, len);
@@ -476,30 +457,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
476 memcpy(s, d, len); 457 memcpy(s, d, len);
477 458
478 lcount -= len; 459 lcount -= len;
479 if (cmd->use_sg) 460 scsi_kunmap_atomic_sg(base);
480 scsi_kunmap_atomic_sg(base);
481 } 461 }
482} 462}
483 463
484static int stex_direct_copy(struct scsi_cmnd *cmd, 464static int stex_direct_copy(struct scsi_cmnd *cmd,
485 const void *src, size_t count) 465 const void *src, size_t count)
486{ 466{
487 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
488 size_t cp_len = count; 467 size_t cp_len = count;
489 int n_elem = 0; 468 int n_elem = 0;
490 469
491 if (cmd->use_sg) { 470 n_elem = scsi_dma_map(cmd);
492 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, 471 if (n_elem < 0)
493 cmd->use_sg, cmd->sc_data_direction); 472 return 0;
494 if (n_elem <= 0)
495 return 0;
496 }
497 473
498 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); 474 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
499 475
500 if (cmd->use_sg) 476 scsi_dma_unmap(cmd);
501 pci_unmap_sg(hba->pdev, cmd->request_buffer, 477
502 cmd->use_sg, cmd->sc_data_direction);
503 return cp_len == count; 478 return cp_len == count;
504} 479}
505 480
@@ -678,18 +653,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
678 return 0; 653 return 0;
679} 654}
680 655
681static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
682{
683 if (cmd->sc_data_direction != DMA_NONE) {
684 if (cmd->use_sg)
685 pci_unmap_sg(hba->pdev, cmd->request_buffer,
686 cmd->use_sg, cmd->sc_data_direction);
687 else
688 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
689 cmd->request_bufflen, cmd->sc_data_direction);
690 }
691}
692
693static void stex_scsi_done(struct st_ccb *ccb) 656static void stex_scsi_done(struct st_ccb *ccb)
694{ 657{
695 struct scsi_cmnd *cmd = ccb->cmd; 658 struct scsi_cmnd *cmd = ccb->cmd;
@@ -756,8 +719,8 @@ static void stex_ys_commands(struct st_hba *hba,
756 719
757 if (ccb->cmd->cmnd[0] == MGT_CMD && 720 if (ccb->cmd->cmnd[0] == MGT_CMD &&
758 resp->scsi_status != SAM_STAT_CHECK_CONDITION) { 721 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
759 ccb->cmd->request_bufflen = 722 scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
760 le32_to_cpu(*(__le32 *)&resp->variable[0]); 723 le32_to_cpu(*(__le32 *)&resp->variable[0]));
761 return; 724 return;
762 } 725 }
763 726
@@ -855,7 +818,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
855 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) 818 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
856 stex_controller_info(hba, ccb); 819 stex_controller_info(hba, ccb);
857 820
858 stex_unmap_sg(hba, ccb->cmd); 821 scsi_dma_unmap(ccb->cmd);
859 stex_scsi_done(ccb); 822 stex_scsi_done(ccb);
860 hba->out_req_cnt--; 823 hba->out_req_cnt--;
861 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) { 824 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
@@ -1028,7 +991,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
1028 } 991 }
1029 992
1030fail_out: 993fail_out:
1031 stex_unmap_sg(hba, cmd); 994 scsi_dma_unmap(cmd);
1032 hba->wait_ccb->req = NULL; /* nullify the req's future return */ 995 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1033 hba->wait_ccb = NULL; 996 hba->wait_ccb = NULL;
1034 result = FAILED; 997 result = FAILED;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index bbeb2451d32f..2c87db98cdfb 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -493,7 +493,7 @@ static int __devinit esp_sbus_probe_one(struct device *dev,
493 goto fail; 493 goto fail;
494 494
495 host->max_id = (hme ? 16 : 8); 495 host->max_id = (hme ? 16 : 8);
496 esp = host_to_esp(host); 496 esp = shost_priv(host);
497 497
498 esp->host = host; 498 esp->host = host;
499 esp->dev = esp_dev; 499 esp->dev = esp_dev;
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 2ca950582bc3..92bfaeafe30d 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -332,8 +332,7 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
332 int i; 332 int i;
333 unsigned long flags = 0; 333 unsigned long flags = 0;
334 unsigned char status_reg, pio_int_reg, int_reg; 334 unsigned char status_reg, pio_int_reg, int_reg;
335 struct scatterlist *sglist; 335 struct scatterlist *sg;
336 unsigned int sgcount;
337 unsigned int tot_trans = 0; 336 unsigned int tot_trans = 0;
338 337
339 /* We search the base address of the host adapter which caused the interrupt */ 338 /* We search the base address of the host adapter which caused the interrupt */
@@ -429,19 +428,15 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
429 { 428 {
430 current_command->SCp.phase = data_out; 429 current_command->SCp.phase = data_out;
431 outb(FLUSH_FIFO, base + COMMAND_REG); 430 outb(FLUSH_FIFO, base + COMMAND_REG);
432 sym53c416_set_transfer_counter(base, current_command->request_bufflen); 431 sym53c416_set_transfer_counter(base,
432 scsi_bufflen(current_command));
433 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG); 433 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
434 if(!current_command->use_sg) 434
435 tot_trans = sym53c416_write(base, current_command->request_buffer, current_command->request_bufflen); 435 scsi_for_each_sg(current_command,
436 else 436 sg, scsi_sg_count(current_command), i) {
437 { 437 tot_trans += sym53c416_write(base,
438 sgcount = current_command->use_sg; 438 SG_ADDRESS(sg),
439 sglist = current_command->request_buffer; 439 sg->length);
440 while(sgcount--)
441 {
442 tot_trans += sym53c416_write(base, SG_ADDRESS(sglist), sglist->length);
443 sglist++;
444 }
445 } 440 }
446 if(tot_trans < current_command->underflow) 441 if(tot_trans < current_command->underflow)
447 printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow); 442 printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
@@ -455,19 +450,16 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
455 { 450 {
456 current_command->SCp.phase = data_in; 451 current_command->SCp.phase = data_in;
457 outb(FLUSH_FIFO, base + COMMAND_REG); 452 outb(FLUSH_FIFO, base + COMMAND_REG);
458 sym53c416_set_transfer_counter(base, current_command->request_bufflen); 453 sym53c416_set_transfer_counter(base,
454 scsi_bufflen(current_command));
455
459 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG); 456 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
460 if(!current_command->use_sg) 457
461 tot_trans = sym53c416_read(base, current_command->request_buffer, current_command->request_bufflen); 458 scsi_for_each_sg(current_command,
462 else 459 sg, scsi_sg_count(current_command), i) {
463 { 460 tot_trans += sym53c416_read(base,
464 sgcount = current_command->use_sg; 461 SG_ADDRESS(sg),
465 sglist = current_command->request_buffer; 462 sg->length);
466 while(sgcount--)
467 {
468 tot_trans += sym53c416_read(base, SG_ADDRESS(sglist), sglist->length);
469 sglist++;
470 }
471 } 463 }
472 if(tot_trans < current_command->underflow) 464 if(tot_trans < current_command->underflow)
473 printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow); 465 printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 4d78c7e87cca..15a51459c81f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -146,41 +146,17 @@ struct sym_ucmd { /* Override the SCSI pointer structure */
146 146
147static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 147static void __unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
148{ 148{
149 int dma_dir = cmd->sc_data_direction; 149 if (SYM_UCMD_PTR(cmd)->data_mapped)
150 scsi_dma_unmap(cmd);
150 151
151 switch(SYM_UCMD_PTR(cmd)->data_mapped) {
152 case 2:
153 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir);
154 break;
155 case 1:
156 pci_unmap_single(pdev, SYM_UCMD_PTR(cmd)->data_mapping,
157 cmd->request_bufflen, dma_dir);
158 break;
159 }
160 SYM_UCMD_PTR(cmd)->data_mapped = 0; 152 SYM_UCMD_PTR(cmd)->data_mapped = 0;
161} 153}
162 154
163static dma_addr_t __map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
164{
165 dma_addr_t mapping;
166 int dma_dir = cmd->sc_data_direction;
167
168 mapping = pci_map_single(pdev, cmd->request_buffer,
169 cmd->request_bufflen, dma_dir);
170 if (mapping) {
171 SYM_UCMD_PTR(cmd)->data_mapped = 1;
172 SYM_UCMD_PTR(cmd)->data_mapping = mapping;
173 }
174
175 return mapping;
176}
177
178static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 155static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
179{ 156{
180 int use_sg; 157 int use_sg;
181 int dma_dir = cmd->sc_data_direction;
182 158
183 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, dma_dir); 159 use_sg = scsi_dma_map(cmd);
184 if (use_sg > 0) { 160 if (use_sg > 0) {
185 SYM_UCMD_PTR(cmd)->data_mapped = 2; 161 SYM_UCMD_PTR(cmd)->data_mapped = 2;
186 SYM_UCMD_PTR(cmd)->data_mapping = use_sg; 162 SYM_UCMD_PTR(cmd)->data_mapping = use_sg;
@@ -191,8 +167,6 @@ static int __map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
191 167
192#define unmap_scsi_data(np, cmd) \ 168#define unmap_scsi_data(np, cmd) \
193 __unmap_scsi_data(np->s.device, cmd) 169 __unmap_scsi_data(np->s.device, cmd)
194#define map_scsi_single_data(np, cmd) \
195 __map_scsi_single_data(np->s.device, cmd)
196#define map_scsi_sg_data(np, cmd) \ 170#define map_scsi_sg_data(np, cmd) \
197 __map_scsi_sg_data(np->s.device, cmd) 171 __map_scsi_sg_data(np->s.device, cmd)
198/* 172/*
@@ -322,55 +296,20 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid)
322 */ 296 */
323 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); 297 cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status);
324 } 298 }
325 cmd->resid = resid; 299 scsi_set_resid(cmd, resid);
326 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; 300 cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status;
327} 301}
328 302
329
330/*
331 * Build the scatter/gather array for an I/O.
332 */
333
334static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
335{
336 struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1];
337 int segment;
338 unsigned int len = cmd->request_bufflen;
339
340 if (len) {
341 dma_addr_t baddr = map_scsi_single_data(np, cmd);
342 if (baddr) {
343 if (len & 1) {
344 struct sym_tcb *tp = &np->target[cp->target];
345 if (tp->head.wval & EWS) {
346 len++;
347 cp->odd_byte_adjustment++;
348 }
349 }
350 cp->data_len = len;
351 sym_build_sge(np, data, baddr, len);
352 segment = 1;
353 } else {
354 segment = -2;
355 }
356 } else {
357 segment = 0;
358 }
359
360 return segment;
361}
362
363static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) 303static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd)
364{ 304{
365 int segment; 305 int segment;
366 int use_sg = (int) cmd->use_sg; 306 int use_sg;
367 307
368 cp->data_len = 0; 308 cp->data_len = 0;
369 309
370 if (!use_sg) 310 use_sg = map_scsi_sg_data(np, cmd);
371 segment = sym_scatter_no_sglist(np, cp, cmd); 311 if (use_sg > 0) {
372 else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) { 312 struct scatterlist *sg;
373 struct scatterlist *scatter = (struct scatterlist *)cmd->request_buffer;
374 struct sym_tcb *tp = &np->target[cp->target]; 313 struct sym_tcb *tp = &np->target[cp->target];
375 struct sym_tblmove *data; 314 struct sym_tblmove *data;
376 315
@@ -381,9 +320,9 @@ static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd
381 320
382 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg]; 321 data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
383 322
384 for (segment = 0; segment < use_sg; segment++) { 323 scsi_for_each_sg(cmd, sg, use_sg, segment) {
385 dma_addr_t baddr = sg_dma_address(&scatter[segment]); 324 dma_addr_t baddr = sg_dma_address(sg);
386 unsigned int len = sg_dma_len(&scatter[segment]); 325 unsigned int len = sg_dma_len(sg);
387 326
388 if ((len & 1) && (tp->head.wval & EWS)) { 327 if ((len & 1) && (tp->head.wval & EWS)) {
389 len++; 328 len++;
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index e022d3c71b59..0f097ba4f712 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -255,7 +255,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd)
255 */ 255 */
256static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid) 256static __inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid)
257{ 257{
258 cmd->resid = resid; 258 scsi_set_resid(cmd, resid);
259 cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f)); 259 cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f));
260} 260}
261void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid); 261void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid);
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index e7b85e832eb5..14cba1ca38b3 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -457,28 +457,21 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
457 error = 1; 457 error = 1;
458 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle)); 458 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle));
459 /* Map SG list */ 459 /* Map SG list */
460 } else if (pcmd->use_sg) { 460 } else if (scsi_sg_count(pcmd)) {
461 pSRB->pSegmentList = (struct scatterlist *) pcmd->request_buffer; 461 int nseg;
462 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, pcmd->use_sg, 462
463 pcmd->sc_data_direction); 463 nseg = scsi_dma_map(pcmd);
464
465 pSRB->pSegmentList = scsi_sglist(pcmd);
466 pSRB->SGcount = nseg;
467
464 /* TODO: error handling */ 468 /* TODO: error handling */
465 if (!pSRB->SGcount) 469 if (nseg < 0)
466 error = 1; 470 error = 1;
467 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ 471 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
468 __FUNCTION__, pcmd->request_buffer, pSRB->SGcount, pcmd->use_sg)); 472 __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
469 /* Map single segment */ 473 /* Map single segment */
470 } else if (pcmd->request_buffer && pcmd->request_bufflen) { 474 } else
471 pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->request_buffer, pcmd->request_bufflen);
472 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
473 pcmd->sc_data_direction);
474 cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
475
476 /* TODO: error handling */
477 if (pSRB->SGcount != 1)
478 error = 1;
479 DEBUG1(printk("%s(): Mapped request buffer %p at %x\n", __FUNCTION__, pcmd->request_buffer, cmdp->saved_dma_handle));
480 /* No mapping !? */
481 } else
482 pSRB->SGcount = 0; 475 pSRB->SGcount = 0;
483 476
484 return error; 477 return error;
@@ -494,12 +487,10 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
494 if (pSRB->SRBFlag) { 487 if (pSRB->SRBFlag) {
495 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); 488 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
496 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle)); 489 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
497 } else if (pcmd->use_sg) { 490 } else {
498 pci_unmap_sg(pdev, pcmd->request_buffer, pcmd->use_sg, pcmd->sc_data_direction); 491 scsi_dma_unmap(pcmd);
499 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", __FUNCTION__, pcmd->request_buffer, pcmd->use_sg)); 492 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
500 } else if (pcmd->request_buffer && pcmd->request_bufflen) { 493 __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
501 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, pcmd->sc_data_direction);
502 DEBUG1(printk("%s(): Unmapped request buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
503 } 494 }
504} 495}
505 496
@@ -1153,9 +1144,9 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1153 struct scatterlist *psgl; 1144 struct scatterlist *psgl;
1154 pSRB->TotalXferredLen = 0; 1145 pSRB->TotalXferredLen = 0;
1155 pSRB->SGIndex = 0; 1146 pSRB->SGIndex = 0;
1156 if (pcmd->use_sg) { 1147 if (scsi_sg_count(pcmd)) {
1157 size_t saved; 1148 size_t saved;
1158 pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer; 1149 pSRB->pSegmentList = scsi_sglist(pcmd);
1159 psgl = pSRB->pSegmentList; 1150 psgl = pSRB->pSegmentList;
1160 //dc390_pci_sync(pSRB); 1151 //dc390_pci_sync(pSRB);
1161 1152
@@ -1179,12 +1170,6 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1179 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n", 1170 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
1180 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr); 1171 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
1181 1172
1182 } else if(pcmd->request_buffer) {
1183 //dc390_pci_sync(pSRB);
1184
1185 sg_dma_len(&pSRB->Segmentx) = pcmd->request_bufflen - pSRB->Saved_Ptr;
1186 pSRB->SGcount = 1;
1187 pSRB->pSegmentList = (struct scatterlist *) &pSRB->Segmentx;
1188 } else { 1173 } else {
1189 pSRB->SGcount = 0; 1174 pSRB->SGcount = 0;
1190 printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n"); 1175 printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
@@ -1579,7 +1564,8 @@ dc390_Disconnect( struct dc390_acb* pACB )
1579 if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) || 1564 if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) ||
1580 !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) ) 1565 !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) )
1581 { /* Selection time out */ 1566 { /* Selection time out */
1582 pSRB->TargetStatus = SCSI_STAT_SEL_TIMEOUT; 1567 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1568 pSRB->TargetStatus = 0;
1583 goto disc1; 1569 goto disc1;
1584 } 1570 }
1585 else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED)) 1571 else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED))
@@ -1612,7 +1598,7 @@ dc390_Reselect( struct dc390_acb* pACB )
1612 if( !( pACB->scan_devices ) ) 1598 if( !( pACB->scan_devices ) )
1613 { 1599 {
1614 struct scsi_cmnd *pcmd = pSRB->pcmd; 1600 struct scsi_cmnd *pcmd = pSRB->pcmd;
1615 pcmd->resid = pcmd->request_bufflen; 1601 scsi_set_resid(pcmd, scsi_bufflen(pcmd));
1616 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1602 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1617 dc390_Going_remove(pDCB, pSRB); 1603 dc390_Going_remove(pDCB, pSRB);
1618 dc390_Free_insert(pACB, pSRB); 1604 dc390_Free_insert(pACB, pSRB);
@@ -1695,7 +1681,6 @@ dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_
1695 pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN)); 1681 pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
1696 1682
1697 pSRB->SRBFlag |= AUTO_REQSENSE; 1683 pSRB->SRBFlag |= AUTO_REQSENSE;
1698 pSRB->SavedSGCount = pcmd->use_sg;
1699 pSRB->SavedTotXLen = pSRB->TotalXferredLen; 1684 pSRB->SavedTotXLen = pSRB->TotalXferredLen;
1700 pSRB->AdaptStatus = 0; 1685 pSRB->AdaptStatus = 0;
1701 pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */ 1686 pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
@@ -1728,22 +1713,21 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1728 { /* Last command was a Request Sense */ 1713 { /* Last command was a Request Sense */
1729 pSRB->SRBFlag &= ~AUTO_REQSENSE; 1714 pSRB->SRBFlag &= ~AUTO_REQSENSE;
1730 pSRB->AdaptStatus = 0; 1715 pSRB->AdaptStatus = 0;
1731 pSRB->TargetStatus = CHECK_CONDITION << 1; 1716 pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION;
1732 1717
1733 //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status); 1718 //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status);
1734 if (status == (CHECK_CONDITION << 1)) 1719 if (status == SAM_STAT_CHECK_CONDITION)
1735 pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0); 1720 pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0);
1736 else /* Retry */ 1721 else /* Retry */
1737 { 1722 {
1738 if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */) 1723 if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */)
1739 { 1724 {
1740 /* Don't retry on TEST_UNIT_READY */ 1725 /* Don't retry on TEST_UNIT_READY */
1741 pcmd->result = MK_RES_LNX(DRIVER_SENSE,DID_OK,0,CHECK_CONDITION); 1726 pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION);
1742 REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\ 1727 REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\
1743 (u32) pcmd->result, (u32) pSRB->TotalXferredLen)); 1728 (u32) pcmd->result, (u32) pSRB->TotalXferredLen));
1744 } else { 1729 } else {
1745 SET_RES_DRV(pcmd->result, DRIVER_SENSE); 1730 SET_RES_DRV(pcmd->result, DRIVER_SENSE);
1746 pcmd->use_sg = pSRB->SavedSGCount;
1747 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); 1731 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
1748 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1732 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1749 pSRB->TotalXferredLen = 0; 1733 pSRB->TotalXferredLen = 0;
@@ -1754,7 +1738,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1754 } 1738 }
1755 if( status ) 1739 if( status )
1756 { 1740 {
1757 if( status_byte(status) == CHECK_CONDITION ) 1741 if (status == SAM_STAT_CHECK_CONDITION)
1758 { 1742 {
1759 if (dc390_RequestSense(pACB, pDCB, pSRB)) { 1743 if (dc390_RequestSense(pACB, pDCB, pSRB)) {
1760 SET_RES_DID(pcmd->result, DID_ERROR); 1744 SET_RES_DID(pcmd->result, DID_ERROR);
@@ -1762,22 +1746,14 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1762 } 1746 }
1763 return; 1747 return;
1764 } 1748 }
1765 else if( status_byte(status) == QUEUE_FULL ) 1749 else if (status == SAM_STAT_TASK_SET_FULL)
1766 { 1750 {
1767 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); 1751 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
1768 pcmd->use_sg = pSRB->SavedSGCount;
1769 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1752 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1770 pSRB->TotalXferredLen = 0; 1753 pSRB->TotalXferredLen = 0;
1771 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1754 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1772 } 1755 }
1773 else if(status == SCSI_STAT_SEL_TIMEOUT) 1756 else if (status == SAM_STAT_BUSY &&
1774 {
1775 pSRB->AdaptStatus = H_SEL_TIMEOUT;
1776 pSRB->TargetStatus = 0;
1777 pcmd->result = MK_RES(0,DID_NO_CONNECT,0,0);
1778 /* Devices are removed below ... */
1779 }
1780 else if (status_byte(status) == BUSY &&
1781 (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) && 1757 (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) &&
1782 pACB->scan_devices) 1758 pACB->scan_devices)
1783 { 1759 {
@@ -1795,12 +1771,17 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1795 else 1771 else
1796 { /* Target status == 0 */ 1772 { /* Target status == 0 */
1797 status = pSRB->AdaptStatus; 1773 status = pSRB->AdaptStatus;
1798 if(status & H_OVER_UNDER_RUN) 1774 if (status == H_OVER_UNDER_RUN)
1799 { 1775 {
1800 pSRB->TargetStatus = 0; 1776 pSRB->TargetStatus = 0;
1801 SET_RES_DID(pcmd->result,DID_OK); 1777 SET_RES_DID(pcmd->result,DID_OK);
1802 SET_RES_MSG(pcmd->result,pSRB->EndMessage); 1778 SET_RES_MSG(pcmd->result,pSRB->EndMessage);
1803 } 1779 }
1780 else if (status == H_SEL_TIMEOUT)
1781 {
1782 pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0);
1783 /* Devices are removed below ... */
1784 }
1804 else if( pSRB->SRBStatus & PARITY_ERROR) 1785 else if( pSRB->SRBStatus & PARITY_ERROR)
1805 { 1786 {
1806 //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0); 1787 //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0);
@@ -1816,7 +1797,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1816 } 1797 }
1817 1798
1818cmd_done: 1799cmd_done:
1819 pcmd->resid = pcmd->request_bufflen - pSRB->TotalXferredLen; 1800 scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
1820 1801
1821 dc390_Going_remove (pDCB, pSRB); 1802 dc390_Going_remove (pDCB, pSRB);
1822 /* Add to free list */ 1803 /* Add to free list */
diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h
index c3d8c80cfb38..77adc54dbd16 100644
--- a/drivers/scsi/tmscsim.h
+++ b/drivers/scsi/tmscsim.h
@@ -57,7 +57,6 @@ u8 SGcount;
57 57
58u8 MsgCnt; 58u8 MsgCnt;
59u8 EndMessage; 59u8 EndMessage;
60u8 SavedSGCount;
61 60
62u8 MsgInBuf[6]; 61u8 MsgInBuf[6];
63u8 MsgOutBuf[6]; 62u8 MsgOutBuf[6];
@@ -258,13 +257,6 @@ struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
258#define H_BAD_CCB_OR_SG 0x1A 257#define H_BAD_CCB_OR_SG 0x1A
259#define H_ABORT 0x0FF 258#define H_ABORT 0x0FF
260 259
261/*; SCSI Status byte codes*/
262/* The values defined in include/scsi/scsi.h, to be shifted << 1 */
263
264#define SCSI_STAT_UNEXP_BUS_F 0xFD /*; Unexpect Bus Free */
265#define SCSI_STAT_BUS_RST_DETECT 0xFE /*; Scsi Bus Reset detected */
266#define SCSI_STAT_SEL_TIMEOUT 0xFF /*; Selection Time out */
267
268/* cmd->result */ 260/* cmd->result */
269#define RES_TARGET 0x000000FF /* Target State */ 261#define RES_TARGET 0x000000FF /* Target State */
270#define RES_TARGET_LNX STATUS_MASK /* Only official ... */ 262#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
@@ -273,7 +265,7 @@ struct dc390_srb SRB_array[MAX_SRB_CNT]; /* 50 SRBs */
273#define RES_DRV 0xFF000000 /* DRIVER_ codes */ 265#define RES_DRV 0xFF000000 /* DRIVER_ codes */
274 266
275#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)) 267#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
276#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1) 268#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
277 269
278#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0) 270#define SET_RES_TARGET(who, tgt) do { who &= ~RES_TARGET; who |= (int)(tgt); } while (0)
279#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0) 271#define SET_RES_TARGET_LNX(who, tgt) do { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; } while (0)
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 3de08a15de40..9e8232a1f169 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,7 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
1111static void map_dma(unsigned int i, unsigned int j) { 1111static void map_dma(unsigned int i, unsigned int j) {
1112 unsigned int data_len = 0; 1112 unsigned int data_len = 0;
1113 unsigned int k, count, pci_dir; 1113 unsigned int k, count, pci_dir;
1114 struct scatterlist *sgpnt; 1114 struct scatterlist *sg;
1115 struct mscp *cpp; 1115 struct mscp *cpp;
1116 struct scsi_cmnd *SCpnt; 1116 struct scsi_cmnd *SCpnt;
1117 1117
@@ -1124,33 +1124,28 @@ static void map_dma(unsigned int i, unsigned int j) {
1124 1124
1125 cpp->sense_len = sizeof SCpnt->sense_buffer; 1125 cpp->sense_len = sizeof SCpnt->sense_buffer;
1126 1126
1127 if (!SCpnt->use_sg) { 1127 if (scsi_bufflen(SCpnt)) {
1128 1128 count = scsi_dma_map(SCpnt);
1129 /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */ 1129 BUG_ON(count < 0);
1130 if (!SCpnt->request_bufflen) pci_dir = PCI_DMA_BIDIRECTIONAL; 1130
1131 1131 scsi_for_each_sg(SCpnt, sg, count, k) {
1132 if (SCpnt->request_buffer) 1132 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1133 cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, 1133 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
1134 SCpnt->request_buffer, SCpnt->request_bufflen, pci_dir)); 1134 data_len += sg->length;
1135 1135 }
1136 cpp->data_len = H2DEV(SCpnt->request_bufflen); 1136
1137 return; 1137 cpp->sg = TRUE;
1138 } 1138 cpp->use_sg = scsi_sg_count(SCpnt);
1139 1139 cpp->data_address =
1140 sgpnt = (struct scatterlist *) SCpnt->request_buffer; 1140 H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
1141 count = pci_map_sg(HD(j)->pdev, sgpnt, SCpnt->use_sg, pci_dir); 1141 cpp->use_sg * sizeof(struct sg_list),
1142 1142 pci_dir));
1143 for (k = 0; k < count; k++) { 1143 cpp->data_len = H2DEV(data_len);
1144 cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k])); 1144
1145 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k])); 1145 } else {
1146 data_len += sgpnt[k].length; 1146 pci_dir = PCI_DMA_BIDIRECTIONAL;
1147 } 1147 cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
1148 1148 }
1149 cpp->sg = TRUE;
1150 cpp->use_sg = SCpnt->use_sg;
1151 cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
1152 SCpnt->use_sg * sizeof(struct sg_list), pci_dir));
1153 cpp->data_len = H2DEV(data_len);
1154} 1149}
1155 1150
1156static void unmap_dma(unsigned int i, unsigned int j) { 1151static void unmap_dma(unsigned int i, unsigned int j) {
@@ -1165,8 +1160,7 @@ static void unmap_dma(unsigned int i, unsigned int j) {
1165 pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr), 1160 pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
1166 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1161 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1167 1162
1168 if (SCpnt->use_sg) 1163 scsi_dma_unmap(SCpnt);
1169 pci_unmap_sg(HD(j)->pdev, SCpnt->request_buffer, SCpnt->use_sg, pci_dir);
1170 1164
1171 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; 1165 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
1172 1166
@@ -1187,9 +1181,9 @@ static void sync_dma(unsigned int i, unsigned int j) {
1187 pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr), 1181 pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
1188 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1182 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1189 1183
1190 if (SCpnt->use_sg) 1184 if (scsi_sg_count(SCpnt))
1191 pci_dma_sync_sg_for_cpu(HD(j)->pdev, SCpnt->request_buffer, 1185 pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
1192 SCpnt->use_sg, pci_dir); 1186 scsi_sg_count(SCpnt), pci_dir);
1193 1187
1194 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; 1188 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
1195 1189
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 56906aba5ee3..c08235d5afc9 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -675,16 +675,15 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
675 675
676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt) 676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
677{ 677{
678 struct scatterlist *sl; 678 struct scatterlist *sg;
679 long transfer_length = 0; 679 long transfer_length = 0;
680 int i, max; 680 int i, max;
681 681
682 sl = (struct scatterlist *) SCpnt->request_buffer; 682 max = scsi_sg_count(SCpnt);
683 max = SCpnt->use_sg; 683 scsi_for_each_sg(SCpnt, sg, max, i) {
684 for (i = 0; i < max; i++) { 684 mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset;
685 mscp->sglist[i].address = isa_page_to_bus(sl[i].page) + sl[i].offset; 685 mscp->sglist[i].num_bytes = sg->length;
686 mscp->sglist[i].num_bytes = sl[i].length; 686 transfer_length += sg->length;
687 transfer_length += sl[i].length;
688 } 687 }
689 mscp->number_of_sg_list = max; 688 mscp->number_of_sg_list = max;
690 mscp->transfer_data = isa_virt_to_bus(mscp->sglist); 689 mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
@@ -730,15 +729,15 @@ static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
730 my_mscp->target_id = SCpnt->device->id; 729 my_mscp->target_id = SCpnt->device->id;
731 my_mscp->ch_no = 0; 730 my_mscp->ch_no = 0;
732 my_mscp->lun = SCpnt->device->lun; 731 my_mscp->lun = SCpnt->device->lun;
733 if (SCpnt->use_sg) { 732 if (scsi_sg_count(SCpnt)) {
734 /* Set scatter/gather flag in SCSI command packet */ 733 /* Set scatter/gather flag in SCSI command packet */
735 my_mscp->sg = TRUE; 734 my_mscp->sg = TRUE;
736 build_sg_list(my_mscp, SCpnt); 735 build_sg_list(my_mscp, SCpnt);
737 } else { 736 } else {
738 /* Unset scatter/gather flag in SCSI command packet */ 737 /* Unset scatter/gather flag in SCSI command packet */
739 my_mscp->sg = FALSE; 738 my_mscp->sg = FALSE;
740 my_mscp->transfer_data = isa_virt_to_bus(SCpnt->request_buffer); 739 my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
741 my_mscp->transfer_data_length = SCpnt->request_bufflen; 740 my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
742 } 741 }
743 my_mscp->command_link = 0; /*???*/ 742 my_mscp->command_link = 0; /*???*/
744 my_mscp->scsi_command_link_id = 0; /*???*/ 743 my_mscp->scsi_command_link_id = 0; /*???*/
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 30be76514c43..d6fd4259c56b 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1091,6 +1091,7 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1091 unchar *cdb = (unchar *) SCpnt->cmnd; 1091 unchar *cdb = (unchar *) SCpnt->cmnd;
1092 unchar idlun; 1092 unchar idlun;
1093 short cdblen; 1093 short cdblen;
1094 int nseg;
1094 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; 1095 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1095 1096
1096 cdblen = SCpnt->cmd_len; 1097 cdblen = SCpnt->cmd_len;
@@ -1106,28 +1107,29 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1106 SCpnt->host_scribble = (unchar *) scb; 1107 SCpnt->host_scribble = (unchar *) scb;
1107 scb->host = host; 1108 scb->host = host;
1108 1109
1109 if (SCpnt->use_sg) { 1110 nseg = scsi_sg_count(SCpnt);
1110 struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer; 1111 if (nseg) {
1112 struct scatterlist *sg;
1111 unsigned i; 1113 unsigned i;
1112 1114
1113 if (SCpnt->device->host->sg_tablesize == SG_NONE) { 1115 if (SCpnt->device->host->sg_tablesize == SG_NONE) {
1114 panic("wd7000_queuecommand: scatter/gather not supported.\n"); 1116 panic("wd7000_queuecommand: scatter/gather not supported.\n");
1115 } 1117 }
1116 dprintk("Using scatter/gather with %d elements.\n", SCpnt->use_sg); 1118 dprintk("Using scatter/gather with %d elements.\n", nseg);
1117 1119
1118 sgb = scb->sgb; 1120 sgb = scb->sgb;
1119 scb->op = 1; 1121 scb->op = 1;
1120 any2scsi(scb->dataptr, (int) sgb); 1122 any2scsi(scb->dataptr, (int) sgb);
1121 any2scsi(scb->maxlen, SCpnt->use_sg * sizeof(Sgb)); 1123 any2scsi(scb->maxlen, nseg * sizeof(Sgb));
1122 1124
1123 for (i = 0; i < SCpnt->use_sg; i++) { 1125 scsi_for_each_sg(SCpnt, sg, nseg, i) {
1124 any2scsi(sgb[i].ptr, isa_page_to_bus(sg[i].page) + sg[i].offset); 1126 any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset);
1125 any2scsi(sgb[i].len, sg[i].length); 1127 any2scsi(sgb[i].len, sg->length);
1126 } 1128 }
1127 } else { 1129 } else {
1128 scb->op = 0; 1130 scb->op = 0;
1129 any2scsi(scb->dataptr, isa_virt_to_bus(SCpnt->request_buffer)); 1131 any2scsi(scb->dataptr, isa_virt_to_bus(scsi_sglist(SCpnt)));
1130 any2scsi(scb->maxlen, SCpnt->request_bufflen); 1132 any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
1131 } 1133 }
1132 1134
1133 /* FIXME: drop lock and yield here ? */ 1135 /* FIXME: drop lock and yield here ? */
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
new file mode 100644
index 000000000000..50703877a585
--- /dev/null
+++ b/drivers/scsi/zorro7xx.c
@@ -0,0 +1,180 @@
1/*
2 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
3 * Amiga MacroSystemUS WarpEngine SCSI controller.
4 * Amiga Technologies/DKB A4091 SCSI controller.
5 *
6 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
7 * plus modifications of the 53c7xx.c driver to support the Amiga.
8 *
9 * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/zorro.h>
16#include <asm/amigaints.h>
17#include <scsi/scsi_host.h>
18#include <scsi/scsi_transport_spi.h>
19
20#include "53c700.h"
21
22MODULE_AUTHOR("Alan Hourihane <alanh@fairlite.demon.co.uk> / Kars de Jong <jongk@linux-m68k.org>");
23MODULE_DESCRIPTION("Amiga Zorro NCR53C710 driver");
24MODULE_LICENSE("GPL");
25
26
27static struct scsi_host_template zorro7xx_scsi_driver_template = {
28 .proc_name = "zorro7xx",
29 .this_id = 7,
30 .module = THIS_MODULE,
31};
32
33static struct zorro_driver_data {
34 const char *name;
35 unsigned long offset;
36 int absolute; /* offset is absolute address */
37} zorro7xx_driver_data[] __devinitdata = {
38 { .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 },
39 { .name = "WarpEngine 40xx", .offset = 0x40000 },
40 { .name = "A4091", .offset = 0x800000 },
41 { .name = "GForce 040/060", .offset = 0x40000 },
42 { 0 }
43};
44
45static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
46 {
47 .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS,
48 .driver_data = (unsigned long)&zorro7xx_driver_data[0],
49 },
50 {
51 .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx,
52 .driver_data = (unsigned long)&zorro7xx_driver_data[1],
53 },
54 {
55 .id = ZORRO_PROD_CBM_A4091_1,
56 .driver_data = (unsigned long)&zorro7xx_driver_data[2],
57 },
58 {
59 .id = ZORRO_PROD_CBM_A4091_2,
60 .driver_data = (unsigned long)&zorro7xx_driver_data[2],
61 },
62 {
63 .id = ZORRO_PROD_GVP_GFORCE_040_060,
64 .driver_data = (unsigned long)&zorro7xx_driver_data[3],
65 },
66 { 0 }
67};
68
69static int __devinit zorro7xx_init_one(struct zorro_dev *z,
70 const struct zorro_device_id *ent)
71{
72 struct Scsi_Host * host = NULL;
73 struct NCR_700_Host_Parameters *hostdata;
74 struct zorro_driver_data *zdd;
75 unsigned long board, ioaddr;
76
77 board = zorro_resource_start(z);
78 zdd = (struct zorro_driver_data *)ent->driver_data;
79
80 if (zdd->absolute) {
81 ioaddr = zdd->offset;
82 } else {
83 ioaddr = board + zdd->offset;
84 }
85
86 if (!zorro_request_device(z, zdd->name)) {
87 printk(KERN_ERR "zorro7xx: cannot reserve region 0x%lx, abort\n",
88 board);
89 return -EBUSY;
90 }
91
92 hostdata = kmalloc(sizeof(struct NCR_700_Host_Parameters), GFP_KERNEL);
93 if (hostdata == NULL) {
94 printk(KERN_ERR "zorro7xx: Failed to allocate host data\n");
95 goto out_release;
96 }
97
98 memset(hostdata, 0, sizeof(struct NCR_700_Host_Parameters));
99
100 /* Fill in the required pieces of hostdata */
101 if (ioaddr > 0x01000000)
102 hostdata->base = ioremap(ioaddr, zorro_resource_len(z));
103 else
104 hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr);
105
106 hostdata->clock = 50;
107 hostdata->chip710 = 1;
108
109 /* Settings for at least WarpEngine 40xx */
110 hostdata->ctest7_extra = CTEST7_TT1;
111
112 zorro7xx_scsi_driver_template.name = zdd->name;
113
114 /* and register the chip */
115 host = NCR_700_detect(&zorro7xx_scsi_driver_template, hostdata,
116 &z->dev);
117 if (!host) {
118 printk(KERN_ERR "zorro7xx: No host detected; "
119 "board configuration problem?\n");
120 goto out_free;
121 }
122
123 host->this_id = 7;
124 host->base = ioaddr;
125 host->irq = IRQ_AMIGA_PORTS;
126
127 if (request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "zorro7xx-scsi",
128 host)) {
129 printk(KERN_ERR "zorro7xx: request_irq failed\n");
130 goto out_put_host;
131 }
132
133 scsi_scan_host(host);
134
135 return 0;
136
137 out_put_host:
138 scsi_host_put(host);
139 out_free:
140 if (ioaddr > 0x01000000)
141 iounmap(hostdata->base);
142 kfree(hostdata);
143 out_release:
144 zorro_release_device(z);
145
146 return -ENODEV;
147}
148
149static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
150{
151 struct Scsi_Host *host = dev_to_shost(&z->dev);
152 struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
153
154 scsi_remove_host(host);
155
156 NCR_700_release(host);
157 kfree(hostdata);
158 free_irq(host->irq, host);
159 zorro_release_device(z);
160}
161
162static struct zorro_driver zorro7xx_driver = {
163 .name = "zorro7xx-scsi",
164 .id_table = zorro7xx_zorro_tbl,
165 .probe = zorro7xx_init_one,
166 .remove = __devexit_p(zorro7xx_remove_one),
167};
168
169static int __init zorro7xx_scsi_init(void)
170{
171 return zorro_register_driver(&zorro7xx_driver);
172}
173
174static void __exit zorro7xx_scsi_exit(void)
175{
176 zorro_unregister_driver(&zorro7xx_driver);
177}
178
179module_init(zorro7xx_scsi_init);
180module_exit(zorro7xx_scsi_exit);